mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2024-12-22 05:35:20 +00:00
scripts: fix shell scripts according to shellcheck errors
Unify coding style of shell scripts in accordance with shellcheck errors. Signed-off-by: Ruslan Mstoi <ruslan.mstoi@intel.com>
This commit is contained in:
parent
318caeb9d8
commit
2b2d00653c
@ -16,7 +16,7 @@ where:
|
||||
-w directory to be used for temporary files"
|
||||
|
||||
function check_command {
|
||||
if ! command -v $1 &>/dev/null; then
|
||||
if ! command -v "$1" &>/dev/null; then
|
||||
echo "Command $1 could not be found"
|
||||
exit 1
|
||||
fi
|
||||
@ -68,7 +68,7 @@ if [[ ! -f ${file_name} ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
file_abs_path=$(readlink -m ${file_name})
|
||||
file_abs_path=$(readlink -m "${file_name}")
|
||||
if [[ "${working_dir}" != "" && ! -d "${working_dir}" ]]; then
|
||||
echo "Directory ${working_dir} does not exist"
|
||||
exit 1
|
||||
@ -76,12 +76,12 @@ elif [[ "${working_dir}" == "" ]]; then
|
||||
working_dir=$(mktemp -d)
|
||||
tmp_created=1
|
||||
else
|
||||
working_dir=$(readlink -m ${working_dir})
|
||||
working_dir=$(readlink -m "${working_dir}")
|
||||
fi
|
||||
|
||||
filename="${file_name%.*}"
|
||||
dest_file=${working_dir}/${filename}.raw
|
||||
image_type=$(qemu-img info ${file_abs_path} | grep 'file format:' | awk '{ print $3 }')
|
||||
image_type=$(qemu-img info "${file_abs_path}" | grep 'file format:' | awk '{ print $3 }')
|
||||
echo "Image type detected as ${image_type}"
|
||||
|
||||
if [[ "${image_type}" == "raw" ]]; then
|
||||
@ -91,26 +91,26 @@ elif [[ "$image_type" == "qcow2" ]]; then
|
||||
echo "Module nbd is loaded!"
|
||||
else
|
||||
echo "Module nbd is not loaded. Trying to load the module"
|
||||
modprobe nbd max_part=8
|
||||
if [ $? != 0 ]; then
|
||||
|
||||
if ! modprobe nbd max_part=8; then
|
||||
echo "failed to load nbd module. Exiting"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
check_command qemu-img
|
||||
dest_file=/dev/nbd0
|
||||
qemu-nbd --connect=${dest_file} ${file_abs_path} --read-only
|
||||
qemu-nbd --connect=${dest_file} "${file_abs_path}" --read-only
|
||||
fi
|
||||
|
||||
check_command blkid
|
||||
#get part info
|
||||
part_type=$(blkid -o value -s PTTYPE ${dest_file})
|
||||
part_type=$(blkid -o value -s PTTYPE "${dest_file}")
|
||||
|
||||
check_command partx
|
||||
nr_partitions=$(partx -g ${dest_file} | wc -l)
|
||||
nr_partitions=$(partx -g "${dest_file}" | wc -l)
|
||||
|
||||
check_command fdisk
|
||||
out=$(fdisk -l ${dest_file} --bytes | grep -i -A ${nr_partitions} 'Device' | tail -n +2)
|
||||
out=$(fdisk -l "${dest_file}" --bytes | grep -i -A "${nr_partitions}" 'Device' | tail -n +2)
|
||||
|
||||
IFS='
|
||||
'
|
||||
@ -128,7 +128,7 @@ ROWS=${#lines[@]}
|
||||
|
||||
for line in "${lines[@]}"; do
|
||||
j=0
|
||||
read -a str_arr <<<"$line"
|
||||
read -a -r str_arr <<<"$line"
|
||||
for val in "${str_arr[@]}"; do
|
||||
if [[ "$val" != "*" ]]; then
|
||||
partitions[$i, $j]=$val
|
||||
@ -163,9 +163,9 @@ MOUNT_DIR=/mnt/clh-img-check/
|
||||
rm -rf ${MOUNT_DIR}
|
||||
mkdir ${MOUNT_DIR}
|
||||
if [[ "${image_type}" == "raw" ]]; then
|
||||
mount -o ro,loop,offset=$offset ${dest_file} ${MOUNT_DIR}
|
||||
mount -o ro,loop,offset=$offset "${dest_file}" ${MOUNT_DIR}
|
||||
elif [[ "${image_type}" == "qcow2" ]]; then
|
||||
mount -o ro ${partitions[${MOUNT_ROW}, ${DEVICE_INDEX}]} ${MOUNT_DIR}
|
||||
mount -o ro "${partitions[${MOUNT_ROW}, ${DEVICE_INDEX}]}" ${MOUNT_DIR}
|
||||
fi
|
||||
|
||||
CONFIG_DIR=${MOUNT_DIR}boot/
|
||||
@ -175,8 +175,8 @@ fi
|
||||
|
||||
#check VIRTIO
|
||||
HAS_VIRTIO=1
|
||||
for conf_file in ${CONFIG_DIR}config*; do
|
||||
out=$(grep -E "CONFIG_VIRTIO=y|CONFIG_VIRTIO_BLK=y|CONFIG_VIRTIO_BLK=m" ${conf_file} | wc -l)
|
||||
for conf_file in "${CONFIG_DIR}"config*; do
|
||||
out=$(grep -cE "CONFIG_VIRTIO=y|CONFIG_VIRTIO_BLK=y|CONFIG_VIRTIO_BLK=m" "${conf_file}")
|
||||
if [[ "$out" != "2" ]]; then
|
||||
echo "VIRTIO not found"
|
||||
HAS_VIRTIO=0
|
||||
@ -187,11 +187,11 @@ done
|
||||
umount ${MOUNT_DIR}
|
||||
|
||||
if [[ "${tmp_created}" == "1" ]]; then
|
||||
rm -rf ${working_dir}
|
||||
rm -rf "${working_dir}"
|
||||
fi
|
||||
|
||||
if [[ "${image_type}" == "qcow2" ]]; then
|
||||
qemu-nbd --disconnect ${dest_file} >/dev/null
|
||||
qemu-nbd --disconnect "${dest_file}" >/dev/null
|
||||
fi
|
||||
|
||||
result=""
|
||||
|
@ -22,25 +22,26 @@ build_edk2() {
|
||||
|
||||
# Prepare source code
|
||||
checkout_repo "$EDK2_DIR" "$EDK2_REPO" master "46b4606ba23498d3d0e66b53e498eb3d5d592586"
|
||||
pushd "$EDK2_DIR"
|
||||
pushd "$EDK2_DIR" || exit
|
||||
git submodule update --init
|
||||
popd
|
||||
popd || exit
|
||||
checkout_repo "$EDK2_PLAT_DIR" "$EDK2_PLAT_REPO" master "8227e9e9f6a8aefbd772b40138f835121ccb2307"
|
||||
checkout_repo "$ACPICA_DIR" "$ACPICA_REPO" master "b9c69f81a05c45611c91ea9cbce8756078d76233"
|
||||
|
||||
if [[ ! -f "$EDK2_DIR/.built" ||
|
||||
! -f "$EDK2_PLAT_DIR/.built" ||
|
||||
! -f "$ACPICA_DIR/.built" ]]; then
|
||||
pushd "$EDK2_BUILD_DIR"
|
||||
pushd "$EDK2_BUILD_DIR" || exit
|
||||
# Build
|
||||
make -C acpica -j $(nproc)
|
||||
make -C acpica -j "$(nproc)"
|
||||
# shellcheck disable=SC1091
|
||||
source edk2/edksetup.sh
|
||||
make -C edk2/BaseTools -j $(nproc)
|
||||
make -C edk2/BaseTools -j "$(nproc)"
|
||||
build -a AARCH64 -t GCC5 -p ArmVirtPkg/ArmVirtCloudHv.dsc -b RELEASE -n 0
|
||||
cp Build/ArmVirtCloudHv-AARCH64/RELEASE_GCC5/FV/CLOUDHV_EFI.fd "$WORKLOADS_DIR"
|
||||
touch "$EDK2_DIR"/.built
|
||||
touch "$EDK2_PLAT_DIR"/.built
|
||||
touch "$ACPICA_DIR"/.built
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
}
|
||||
|
@ -121,9 +121,7 @@ ensure_latest_ctr() {
|
||||
if [ "$CTR_IMAGE_VERSION" = "local" ]; then
|
||||
build_container
|
||||
else
|
||||
$DOCKER_RUNTIME pull "$CTR_IMAGE"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
if ! $DOCKER_RUNTIME pull "$CTR_IMAGE"; then
|
||||
build_container
|
||||
fi
|
||||
|
||||
@ -143,7 +141,8 @@ fix_dir_perms() {
|
||||
--workdir "$CTR_CLH_ROOT_DIR" \
|
||||
--rm \
|
||||
--volume /dev:/dev \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" $exported_volumes \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" \
|
||||
${exported_volumes:+"$exported_volumes"} \
|
||||
"$CTR_IMAGE" \
|
||||
chown -R "$(id -u):$(id -g)" "$CTR_CLH_ROOT_DIR"
|
||||
|
||||
@ -158,9 +157,9 @@ process_volumes_args() {
|
||||
return
|
||||
fi
|
||||
exported_volumes=""
|
||||
arr_vols=(${arg_vols//#/ })
|
||||
arr_vols=("${arg_vols//#/ }")
|
||||
for var in "${arr_vols[@]}"; do
|
||||
parts=(${var//:/ })
|
||||
parts=("${var//:/ }")
|
||||
if [[ ! -e "${parts[0]}" ]]; then
|
||||
echo "The volume ${parts[0]} does not exist."
|
||||
exit 1
|
||||
@ -282,6 +281,7 @@ cmd_build() {
|
||||
[ $build = "release" ] && cargo_args+=("--release")
|
||||
cargo_args+=(--target "$target")
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
rustflags="$RUSTFLAGS"
|
||||
target_cc=""
|
||||
if [ "$(uname -m)" = "aarch64" ] && [ "$libc" = "musl" ]; then
|
||||
@ -293,11 +293,12 @@ cmd_build() {
|
||||
--workdir "$CTR_CLH_ROOT_DIR" \
|
||||
--rm \
|
||||
--volume $exported_device \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" $exported_volumes \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" \
|
||||
${exported_volumes:+"$exported_volumes"} \
|
||||
--env RUSTFLAGS="$rustflags" \
|
||||
--env TARGET_CC="$target_cc" \
|
||||
"$CTR_IMAGE" \
|
||||
cargo build --all $features_build \
|
||||
cargo build --all "$features_build" \
|
||||
--target-dir "$CTR_CLH_CARGO_TARGET" \
|
||||
"${cargo_args[@]}" && say "Binaries placed under $CLH_CARGO_TARGET/$target/$build"
|
||||
}
|
||||
@ -312,7 +313,8 @@ cmd_clean() {
|
||||
--user "$(id -u):$(id -g)" \
|
||||
--workdir "$CTR_CLH_ROOT_DIR" \
|
||||
--rm \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" $exported_volumes \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" \
|
||||
${exported_volumes:+"$exported_volumes"} \
|
||||
"$CTR_IMAGE" \
|
||||
cargo clean \
|
||||
--target-dir "$CTR_CLH_CARGO_TARGET" \
|
||||
@ -408,7 +410,8 @@ cmd_tests() {
|
||||
--device $exported_device \
|
||||
--device /dev/net/tun \
|
||||
--cap-add net_admin \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" $exported_volumes \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" \
|
||||
${exported_volumes:+"$exported_volumes"} \
|
||||
--env BUILD_TARGET="$target" \
|
||||
--env RUSTFLAGS="$rustflags" \
|
||||
--env TARGET_CC="$target_cc" \
|
||||
@ -427,7 +430,8 @@ cmd_tests() {
|
||||
--net="$CTR_CLH_NET" \
|
||||
--mount type=tmpfs,destination=/tmp \
|
||||
--volume /dev:/dev \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" $exported_volumes \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" \
|
||||
${exported_volumes:+"$exported_volumes"} \
|
||||
--volume "$CLH_INTEGRATION_WORKLOADS:$CTR_CLH_INTEGRATION_WORKLOADS" \
|
||||
--env USER="root" \
|
||||
--env BUILD_TARGET="$target" \
|
||||
@ -449,7 +453,8 @@ cmd_tests() {
|
||||
--net="$CTR_CLH_NET" \
|
||||
--mount type=tmpfs,destination=/tmp \
|
||||
--volume /dev:/dev \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" $exported_volumes \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" \
|
||||
${exported_volumes:+"$exported_volumes"} \
|
||||
--volume "$CLH_INTEGRATION_WORKLOADS:$CTR_CLH_INTEGRATION_WORKLOADS" \
|
||||
--env USER="root" \
|
||||
--env BUILD_TARGET="$target" \
|
||||
@ -471,7 +476,8 @@ cmd_tests() {
|
||||
--net="$CTR_CLH_NET" \
|
||||
--mount type=tmpfs,destination=/tmp \
|
||||
--volume /dev:/dev \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" $exported_volumes \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" \
|
||||
${exported_volumes:+"$exported_volumes"} \
|
||||
--volume "$CLH_INTEGRATION_WORKLOADS:$CTR_CLH_INTEGRATION_WORKLOADS" \
|
||||
--env USER="root" \
|
||||
--env BUILD_TARGET="$target" \
|
||||
@ -493,7 +499,8 @@ cmd_tests() {
|
||||
--net="$CTR_CLH_NET" \
|
||||
--mount type=tmpfs,destination=/tmp \
|
||||
--volume /dev:/dev \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" $exported_volumes \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" \
|
||||
${exported_volumes:+"$exported_volumes"} \
|
||||
--volume "$CLH_INTEGRATION_WORKLOADS:$CTR_CLH_INTEGRATION_WORKLOADS" \
|
||||
--env USER="root" \
|
||||
--env BUILD_TARGET="$target" \
|
||||
@ -515,7 +522,8 @@ cmd_tests() {
|
||||
--net="$CTR_CLH_NET" \
|
||||
--mount type=tmpfs,destination=/tmp \
|
||||
--volume /dev:/dev \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" $exported_volumes \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" \
|
||||
${exported_volumes:+"$exported_volumes"} \
|
||||
--volume "$CLH_INTEGRATION_WORKLOADS:$CTR_CLH_INTEGRATION_WORKLOADS" \
|
||||
--env USER="root" \
|
||||
--env BUILD_TARGET="$target" \
|
||||
@ -537,7 +545,8 @@ cmd_tests() {
|
||||
--net="$CTR_CLH_NET" \
|
||||
--mount type=tmpfs,destination=/tmp \
|
||||
--volume /dev:/dev \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" $exported_volumes \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" \
|
||||
${exported_volumes:+"$exported_volumes"} \
|
||||
--volume "$CLH_INTEGRATION_WORKLOADS:$CTR_CLH_INTEGRATION_WORKLOADS" \
|
||||
--env USER="root" \
|
||||
--env BUILD_TARGET="$target" \
|
||||
@ -559,7 +568,8 @@ cmd_tests() {
|
||||
--net="$CTR_CLH_NET" \
|
||||
--mount type=tmpfs,destination=/tmp \
|
||||
--volume /dev:/dev \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" $exported_volumes \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" \
|
||||
${exported_volumes:+"$exported_volumes"} \
|
||||
--volume "$CLH_INTEGRATION_WORKLOADS:$CTR_CLH_INTEGRATION_WORKLOADS" \
|
||||
--env USER="root" \
|
||||
--env BUILD_TARGET="$target" \
|
||||
@ -587,9 +597,9 @@ build_container() {
|
||||
|
||||
$DOCKER_RUNTIME build \
|
||||
--target dev \
|
||||
-t $CTR_IMAGE \
|
||||
-t "$CTR_IMAGE" \
|
||||
-f $BUILD_DIR/Dockerfile \
|
||||
--build-arg TARGETARCH=$TARGETARCH \
|
||||
--build-arg TARGETARCH="$TARGETARCH" \
|
||||
$BUILD_DIR
|
||||
}
|
||||
|
||||
@ -649,7 +659,8 @@ cmd_shell() {
|
||||
--net="$CTR_CLH_NET" \
|
||||
--tmpfs /tmp:exec \
|
||||
--volume /dev:/dev \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" $exported_volumes \
|
||||
--volume "$CLH_ROOT_DIR:$CTR_CLH_ROOT_DIR" \
|
||||
${exported_volumes:+"$exported_volumes"} \
|
||||
--volume "$CLH_INTEGRATION_WORKLOADS:$CTR_CLH_INTEGRATION_WORKLOADS" \
|
||||
--env USER="root" \
|
||||
--entrypoint bash \
|
||||
|
@ -4,17 +4,17 @@ set -x
|
||||
sudo apt install -y libncurses-dev gawk flex bison openssl libssl-dev dkms libelf-dev libudev-dev libpci-dev libiberty-dev autoconf git make dpkg-dev libmnl-dev pkg-config iproute2
|
||||
sudo sed -i -- 's/# deb-src/deb-src/g' /etc/apt/sources.list
|
||||
sudo apt update
|
||||
apt-get source linux-image-unsigned-$(uname -r)
|
||||
pushd linux-azure*/drivers/vdpa/vdpa_sim/
|
||||
apt-get source linux-image-unsigned-"$(uname -r)"
|
||||
pushd linux-azure*/drivers/vdpa/vdpa_sim/ || exit
|
||||
cat <<'EOF' >Makefile
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-m += vdpa_sim.o
|
||||
obj-m += vdpa_sim_net.o
|
||||
obj-m += vdpa_sim_blk.o
|
||||
EOF
|
||||
make -C /lib/modules/$(uname -r)/build M=$PWD
|
||||
sudo make -C /lib/modules/$(uname -r)/build M=$PWD modules_install
|
||||
popd
|
||||
make -C /lib/modules/"$(uname -r)"/build M="$PWD"
|
||||
sudo make -C /lib/modules/"$(uname -r)"/build M="$PWD" modules_install
|
||||
popd || exit
|
||||
sudo depmod -a
|
||||
sudo modprobe vdpa
|
||||
sudo modprobe vhost_vdpa
|
||||
|
@ -1,9 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC2048,SC2086
|
||||
set -x
|
||||
|
||||
source $HOME/.cargo/env
|
||||
source $(dirname "$0")/test-util.sh
|
||||
source $(dirname "$0")/common-aarch64.sh
|
||||
# shellcheck source=/dev/null
|
||||
source "$HOME"/.cargo/env
|
||||
source "$(dirname "$0")"/test-util.sh
|
||||
source "$(dirname "$0")"/common-aarch64.sh
|
||||
|
||||
WORKLOADS_LOCK="$WORKLOADS_DIR/integration_test.lock"
|
||||
|
||||
@ -14,16 +16,16 @@ build_spdk_nvme() {
|
||||
checkout_repo "$SPDK_DIR" "$SPDK_REPO" master "ef8bcce58f3f02b79c0619a297e4f17e81e62b24"
|
||||
|
||||
if [ ! -f "$SPDK_DIR/.built" ]; then
|
||||
pushd $SPDK_DIR
|
||||
pushd "$SPDK_DIR" || exit
|
||||
git submodule update --init
|
||||
apt-get update
|
||||
sed -i "/grpcio/d" scripts/pkgdep/debian.sh
|
||||
./scripts/pkgdep.sh
|
||||
./configure --with-vfio-user
|
||||
chmod +x /usr/local/lib/python3.10/dist-packages/ninja/data/bin/ninja
|
||||
make -j $(nproc) || exit 1
|
||||
make -j "$(nproc)" || exit 1
|
||||
touch .built
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
if [ ! -d "/usr/local/bin/spdk-nvme" ]; then
|
||||
mkdir -p $SPDK_DEPLOY_DIR
|
||||
@ -41,33 +43,33 @@ build_virtiofsd() {
|
||||
checkout_repo "$VIRTIOFSD_DIR" "$VIRTIOFSD_REPO" v1.8.0 "97ea7908fe7f9bc59916671a771bdcfaf4044b45"
|
||||
|
||||
if [ ! -f "$VIRTIOFSD_DIR/.built" ]; then
|
||||
pushd $VIRTIOFSD_DIR
|
||||
pushd "$VIRTIOFSD_DIR" || exit
|
||||
rm -rf target/
|
||||
time RUSTFLAGS="" TARGET_CC="" cargo build --release
|
||||
cp target/release/virtiofsd "$WORKLOADS_DIR/" || exit 1
|
||||
touch .built
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
}
|
||||
|
||||
update_workloads() {
|
||||
cp scripts/sha1sums-aarch64 $WORKLOADS_DIR
|
||||
cp scripts/sha1sums-aarch64 "$WORKLOADS_DIR"
|
||||
|
||||
BIONIC_OS_IMAGE_DOWNLOAD_NAME="bionic-server-cloudimg-arm64.img"
|
||||
BIONIC_OS_IMAGE_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$BIONIC_OS_IMAGE_DOWNLOAD_NAME"
|
||||
BIONIC_OS_DOWNLOAD_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_IMAGE_DOWNLOAD_NAME"
|
||||
if [ ! -f "$BIONIC_OS_DOWNLOAD_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time wget --quiet $BIONIC_OS_IMAGE_DOWNLOAD_URL || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
BIONIC_OS_RAW_IMAGE_NAME="bionic-server-cloudimg-arm64.raw"
|
||||
BIONIC_OS_RAW_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_RAW_IMAGE_NAME"
|
||||
if [ ! -f "$BIONIC_OS_RAW_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time qemu-img convert -p -f qcow2 -O raw $BIONIC_OS_IMAGE_DOWNLOAD_NAME $BIONIC_OS_RAW_IMAGE_NAME || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
# Convert the raw image to qcow2 image to remove compressed blocks from the disk. Therefore letting the
|
||||
@ -75,66 +77,66 @@ update_workloads() {
|
||||
BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="bionic-server-cloudimg-arm64.qcow2"
|
||||
BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
|
||||
if [ ! -f "$BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
time qemu-img convert -p -f raw -O qcow2 $BIONIC_OS_RAW_IMAGE_NAME $BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE || exit 1
|
||||
popd
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time qemu-img convert -p -f raw -O qcow2 $BIONIC_OS_RAW_IMAGE_NAME "$BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE" || exit 1
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
FOCAL_OS_RAW_IMAGE_NAME="focal-server-cloudimg-arm64-custom-20210929-0.raw"
|
||||
FOCAL_OS_RAW_IMAGE_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_RAW_IMAGE_NAME"
|
||||
FOCAL_OS_RAW_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME"
|
||||
if [ ! -f "$FOCAL_OS_RAW_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time wget --quiet $FOCAL_OS_RAW_IMAGE_DOWNLOAD_URL || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="focal-server-cloudimg-arm64-custom-20210929-0.qcow2"
|
||||
FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
|
||||
FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
|
||||
if [ ! -f "$FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time wget --quiet $FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
FOCAL_OS_QCOW2_IMAGE_BACKING_FILE_NAME="focal-server-cloudimg-arm64-custom-20210929-0-backing.qcow2"
|
||||
FOCAL_OS_QCOW2_BACKING_FILE_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_QCOW2_IMAGE_BACKING_FILE_NAME"
|
||||
if [ ! -f "$FOCAL_OS_QCOW2_BACKING_FILE_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
time qemu-img create -f qcow2 -b $FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE -F qcow2 $FOCAL_OS_QCOW2_IMAGE_BACKING_FILE_NAME
|
||||
popd
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time qemu-img create -f qcow2 -b "$FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE" -F qcow2 $FOCAL_OS_QCOW2_IMAGE_BACKING_FILE_NAME
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
JAMMY_OS_RAW_IMAGE_NAME="jammy-server-cloudimg-arm64-custom-20220329-0.raw"
|
||||
JAMMY_OS_RAW_IMAGE_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$JAMMY_OS_RAW_IMAGE_NAME"
|
||||
JAMMY_OS_RAW_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_RAW_IMAGE_NAME"
|
||||
if [ ! -f "$JAMMY_OS_RAW_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time wget --quiet $JAMMY_OS_RAW_IMAGE_DOWNLOAD_URL || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="jammy-server-cloudimg-arm64-custom-20220329-0.qcow2"
|
||||
JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL="https://cloud-hypervisor.azureedge.net/$JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
|
||||
JAMMY_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
|
||||
if [ ! -f "$JAMMY_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time wget --quiet $JAMMY_OS_QCOW2_IMAGE_UNCOMPRESSED_DOWNLOAD_URL || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
ALPINE_MINIROOTFS_URL="http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/aarch64/alpine-minirootfs-3.11.3-aarch64.tar.gz"
|
||||
ALPINE_MINIROOTFS_TARBALL="$WORKLOADS_DIR/alpine-minirootfs-aarch64.tar.gz"
|
||||
if [ ! -f "$ALPINE_MINIROOTFS_TARBALL" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
time wget --quiet $ALPINE_MINIROOTFS_URL -O $ALPINE_MINIROOTFS_TARBALL || exit 1
|
||||
popd
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time wget --quiet $ALPINE_MINIROOTFS_URL -O "$ALPINE_MINIROOTFS_TARBALL" || exit 1
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
ALPINE_INITRAMFS_IMAGE="$WORKLOADS_DIR/alpine_initramfs.img"
|
||||
if [ ! -f "$ALPINE_INITRAMFS_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
mkdir alpine-minirootfs
|
||||
tar xf "$ALPINE_MINIROOTFS_TARBALL" -C alpine-minirootfs
|
||||
cat >alpine-minirootfs/init <<-EOF
|
||||
@ -144,25 +146,25 @@ update_workloads() {
|
||||
poweroff -f
|
||||
EOF
|
||||
chmod +x alpine-minirootfs/init
|
||||
cd alpine-minirootfs
|
||||
cd alpine-minirootfs || exit
|
||||
find . -print0 |
|
||||
cpio --null --create --verbose --owner root:root --format=newc >"$ALPINE_INITRAMFS_IMAGE"
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
pushd $WORKLOADS_DIR
|
||||
sha1sum sha1sums-aarch64 --check
|
||||
if [ $? -ne 0 ]; then
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
|
||||
if ! sha1sum sha1sums-aarch64 --check; then
|
||||
echo "sha1sum validation of images failed, remove invalid images to fix the issue."
|
||||
exit 1
|
||||
fi
|
||||
popd
|
||||
popd || exit
|
||||
|
||||
# Download Cloud Hypervisor binary from its last stable release
|
||||
LAST_RELEASE_VERSION="v36.0"
|
||||
CH_RELEASE_URL="https://github.com/cloud-hypervisor/cloud-hypervisor/releases/download/$LAST_RELEASE_VERSION/cloud-hypervisor-static-aarch64"
|
||||
CH_RELEASE_NAME="cloud-hypervisor-static-aarch64"
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
# Repeat a few times to workaround a random wget failure
|
||||
WGET_RETRY_MAX=10
|
||||
wget_retry=0
|
||||
@ -177,7 +179,7 @@ update_workloads() {
|
||||
else
|
||||
chmod +x $CH_RELEASE_NAME
|
||||
fi
|
||||
popd
|
||||
popd || exit
|
||||
|
||||
# Build custom kernel for guest VMs
|
||||
build_custom_linux
|
||||
@ -198,20 +200,20 @@ update_workloads() {
|
||||
BLK_IMAGE="$WORKLOADS_DIR/blk.img"
|
||||
MNT_DIR="mount_image"
|
||||
if [ ! -f "$BLK_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
fallocate -l 16M $BLK_IMAGE
|
||||
mkfs.ext4 -j $BLK_IMAGE
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
fallocate -l 16M "$BLK_IMAGE"
|
||||
mkfs.ext4 -j "$BLK_IMAGE"
|
||||
mkdir $MNT_DIR
|
||||
sudo mount -t ext4 $BLK_IMAGE $MNT_DIR
|
||||
sudo mount -t ext4 "$BLK_IMAGE" $MNT_DIR
|
||||
sudo bash -c "echo bar > $MNT_DIR/foo" || exit 1
|
||||
sudo umount $BLK_IMAGE
|
||||
sudo umount "$BLK_IMAGE"
|
||||
rm -r $MNT_DIR
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
SHARED_DIR="$WORKLOADS_DIR/shared_dir"
|
||||
if [ ! -d "$SHARED_DIR" ]; then
|
||||
mkdir -p $SHARED_DIR
|
||||
mkdir -p "$SHARED_DIR"
|
||||
echo "foo" >"$SHARED_DIR/file1"
|
||||
echo "bar" >"$SHARED_DIR/file3" || exit 1
|
||||
fi
|
||||
@ -235,7 +237,7 @@ fi
|
||||
(
|
||||
echo "try to lock $WORKLOADS_DIR folder and update"
|
||||
flock -x 12 && update_workloads
|
||||
) 12>$WORKLOADS_LOCK
|
||||
) 12>"$WORKLOADS_LOCK"
|
||||
|
||||
# Check if there is any error in the execution of `update_workloads`.
|
||||
# If there is any error, then kill the shell. Otherwise the script will continue
|
||||
@ -247,7 +249,7 @@ fi
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
cargo build --all --release --target $BUILD_TARGET
|
||||
cargo build --all --release --target "$BUILD_TARGET"
|
||||
|
||||
# Enable KSM with some reasonable parameters so that it won't take too long
|
||||
# for the memory to be merged between two processes.
|
||||
@ -257,18 +259,18 @@ sudo bash -c "echo 1 > /sys/kernel/mm/ksm/run"
|
||||
|
||||
# Both test_vfio and ovs-dpdk rely on hugepages
|
||||
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||
PAGE_NUM=$((12288 * 1024 / HUGEPAGESIZE))
|
||||
echo "$PAGE_NUM" | sudo tee /proc/sys/vm/nr_hugepages
|
||||
sudo chmod a+rwX /dev/hugepages
|
||||
|
||||
# Run all direct kernel boot (Device Tree) test cases in mod `parallel`
|
||||
time cargo test "common_parallel::$test_filter" --target $BUILD_TARGET -- ${test_binary_args[*]}
|
||||
time cargo test "common_parallel::$test_filter" --target "$BUILD_TARGET" -- ${test_binary_args[*]}
|
||||
RES=$?
|
||||
|
||||
# Run some tests in sequence since the result could be affected by other tests
|
||||
# running in parallel.
|
||||
if [ $RES -eq 0 ]; then
|
||||
time cargo test "common_sequential::$test_filter" --target $BUILD_TARGET -- --test-threads=1 ${test_binary_args[*]}
|
||||
time cargo test "common_sequential::$test_filter" --target "$BUILD_TARGET" -- --test-threads=1 ${test_binary_args[*]}
|
||||
RES=$?
|
||||
else
|
||||
exit $RES
|
||||
@ -276,7 +278,7 @@ fi
|
||||
|
||||
# Run all ACPI test cases
|
||||
if [ $RES -eq 0 ]; then
|
||||
time cargo test "aarch64_acpi::$test_filter" --target $BUILD_TARGET -- ${test_binary_args[*]}
|
||||
time cargo test "aarch64_acpi::$test_filter" --target "$BUILD_TARGET" -- ${test_binary_args[*]}
|
||||
RES=$?
|
||||
else
|
||||
exit $RES
|
||||
@ -284,14 +286,14 @@ fi
|
||||
|
||||
# Run all test cases related to live migration
|
||||
if [ $RES -eq 0 ]; then
|
||||
time cargo test "live_migration_parallel::$test_filter" --target $BUILD_TARGET -- ${test_binary_args[*]}
|
||||
time cargo test "live_migration_parallel::$test_filter" --target "$BUILD_TARGET" -- ${test_binary_args[*]}
|
||||
RES=$?
|
||||
else
|
||||
exit $RES
|
||||
fi
|
||||
|
||||
if [ $RES -eq 0 ]; then
|
||||
time cargo test "live_migration_sequential::$test_filter" --target $BUILD_TARGET -- --test-threads=1 ${test_binary_args[*]}
|
||||
time cargo test "live_migration_sequential::$test_filter" --target "$BUILD_TARGET" -- --test-threads=1 ${test_binary_args[*]}
|
||||
RES=$?
|
||||
else
|
||||
exit $RES
|
||||
@ -299,9 +301,9 @@ fi
|
||||
|
||||
# Run tests on dbus_api
|
||||
if [ $RES -eq 0 ]; then
|
||||
cargo build --features "dbus_api" --all --release --target $BUILD_TARGET
|
||||
cargo build --features "dbus_api" --all --release --target "$BUILD_TARGET"
|
||||
export RUST_BACKTRACE=1
|
||||
time cargo test "dbus_api::$test_filter" --target $BUILD_TARGET -- ${test_binary_args[*]}
|
||||
time cargo test "dbus_api::$test_filter" --target "$BUILD_TARGET" -- ${test_binary_args[*]}
|
||||
RES=$?
|
||||
fi
|
||||
|
||||
|
@ -1,8 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC2048,SC2086
|
||||
set -x
|
||||
|
||||
source $HOME/.cargo/env
|
||||
source $(dirname "$0")/test-util.sh
|
||||
# shellcheck source=/dev/null
|
||||
source "$HOME"/.cargo/env
|
||||
source "$(dirname "$0")"/test-util.sh
|
||||
|
||||
WORKLOADS_DIR="$HOME/workloads"
|
||||
mkdir -p "$WORKLOADS_DIR"
|
||||
@ -16,41 +18,40 @@ if [ "$hypervisor" = "mshv" ]; then
|
||||
test_features="--features mshv"
|
||||
fi
|
||||
|
||||
cp scripts/sha1sums-x86_64 $WORKLOADS_DIR
|
||||
cp scripts/sha1sums-x86_64 "$WORKLOADS_DIR"
|
||||
|
||||
FOCAL_OS_IMAGE_NAME="focal-server-cloudimg-amd64-custom-20210609-0.qcow2"
|
||||
FOCAL_OS_IMAGE_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_IMAGE_NAME"
|
||||
FOCAL_OS_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_IMAGE_NAME"
|
||||
if [ ! -f "$FOCAL_OS_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time wget --quiet $FOCAL_OS_IMAGE_URL || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
FOCAL_OS_RAW_IMAGE_NAME="focal-server-cloudimg-amd64-custom-20210609-0.raw"
|
||||
FOCAL_OS_RAW_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME"
|
||||
if [ ! -f "$FOCAL_OS_RAW_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time qemu-img convert -p -f qcow2 -O raw $FOCAL_OS_IMAGE_NAME $FOCAL_OS_RAW_IMAGE_NAME || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
pushd $WORKLOADS_DIR
|
||||
grep focal sha1sums-x86_64 | sha1sum --check
|
||||
if [ $? -ne 0 ]; then
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
if ! grep focal sha1sums-x86_64 | sha1sum --check; then
|
||||
echo "sha1sum validation of images failed, remove invalid images to fix the issue."
|
||||
exit 1
|
||||
fi
|
||||
popd
|
||||
popd || exit
|
||||
|
||||
# Download Cloud Hypervisor binary from its last stable release
|
||||
LAST_RELEASE_VERSION="v36.0"
|
||||
CH_RELEASE_URL="https://github.com/cloud-hypervisor/cloud-hypervisor/releases/download/$LAST_RELEASE_VERSION/cloud-hypervisor-static"
|
||||
CH_RELEASE_NAME="cloud-hypervisor-static"
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time wget --quiet $CH_RELEASE_URL -O "$CH_RELEASE_NAME" || exit 1
|
||||
chmod +x $CH_RELEASE_NAME
|
||||
popd
|
||||
popd || exit
|
||||
|
||||
# Build custom kernel based on virtio-pmem and virtio-fs upstream patches
|
||||
VMLINUX_IMAGE="$WORKLOADS_DIR/vmlinux"
|
||||
@ -60,15 +61,16 @@ fi
|
||||
|
||||
CFLAGS=""
|
||||
if [[ "${BUILD_TARGET}" == "x86_64-unknown-linux-musl" ]]; then
|
||||
# shellcheck disable=SC2034
|
||||
CFLAGS="-I /usr/include/x86_64-linux-musl/ -idirafter /usr/include/"
|
||||
fi
|
||||
|
||||
cargo build --features mshv --all --release --target $BUILD_TARGET
|
||||
cargo build --features mshv --all --release --target "$BUILD_TARGET"
|
||||
|
||||
# Test ovs-dpdk relies on hugepages
|
||||
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||
PAGE_NUM=$((12288 * 1024 / HUGEPAGESIZE))
|
||||
echo "$PAGE_NUM" | sudo tee /proc/sys/vm/nr_hugepages
|
||||
sudo chmod a+rwX /dev/hugepages
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
|
@ -1,8 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC2048,SC2086
|
||||
set -x
|
||||
|
||||
source $HOME/.cargo/env
|
||||
source $(dirname "$0")/test-util.sh
|
||||
# shellcheck source=/dev/null
|
||||
source "$HOME"/.cargo/env
|
||||
source "$(dirname "$0")"/test-util.sh
|
||||
|
||||
WORKLOADS_DIR="$HOME/workloads"
|
||||
mkdir -p "$WORKLOADS_DIR"
|
||||
@ -16,41 +18,41 @@ if [ "$hypervisor" = "mshv" ]; then
|
||||
test_features="--features mshv"
|
||||
fi
|
||||
|
||||
cp scripts/sha1sums-x86_64 $WORKLOADS_DIR
|
||||
cp scripts/sha1sums-x86_64 "$WORKLOADS_DIR"
|
||||
|
||||
FOCAL_OS_IMAGE_NAME="focal-server-cloudimg-amd64-custom-20210609-0.qcow2"
|
||||
FOCAL_OS_IMAGE_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_IMAGE_NAME"
|
||||
FOCAL_OS_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_IMAGE_NAME"
|
||||
if [ ! -f "$FOCAL_OS_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time wget --quiet $FOCAL_OS_IMAGE_URL || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
FOCAL_OS_RAW_IMAGE_NAME="focal-server-cloudimg-amd64-custom-20210609-0.raw"
|
||||
FOCAL_OS_RAW_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME"
|
||||
if [ ! -f "$FOCAL_OS_RAW_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time qemu-img convert -p -f qcow2 -O raw $FOCAL_OS_IMAGE_NAME $FOCAL_OS_RAW_IMAGE_NAME || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
pushd $WORKLOADS_DIR
|
||||
grep focal sha1sums-x86_64 | sha1sum --check
|
||||
if [ $? -ne 0 ]; then
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
if ! grep focal sha1sums-x86_64 | sha1sum --check; then
|
||||
echo "sha1sum validation of images failed, remove invalid images to fix the issue."
|
||||
exit 1
|
||||
fi
|
||||
popd
|
||||
popd || exit
|
||||
|
||||
build_custom_linux
|
||||
|
||||
CFLAGS=""
|
||||
if [[ "${BUILD_TARGET}" == "x86_64-unknown-linux-musl" ]]; then
|
||||
# shellcheck disable=SC2034
|
||||
CFLAGS="-I /usr/include/x86_64-linux-musl/ -idirafter /usr/include/"
|
||||
fi
|
||||
|
||||
cargo build --features mshv --all --release --target $BUILD_TARGET
|
||||
cargo build --features mshv --all --release --target "$BUILD_TARGET"
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
time cargo test $test_features "rate_limiter::$test_filter" -- --test-threads=1 ${test_binary_args[*]}
|
||||
|
@ -1,8 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC2048,SC2086
|
||||
set -x
|
||||
|
||||
source $HOME/.cargo/env
|
||||
source $(dirname "$0")/test-util.sh
|
||||
# shellcheck source=/dev/null
|
||||
source "$HOME"/.cargo/env
|
||||
source "$(dirname "$0")"/test-util.sh
|
||||
|
||||
process_common_args "$@"
|
||||
|
||||
@ -20,25 +22,26 @@ JAMMY_OS_IMAGE_NAME="jammy-server-cloudimg-amd64-custom-20230119-0.qcow2"
|
||||
JAMMY_OS_IMAGE_URL="https://cloud-hypervisor.azureedge.net/$JAMMY_OS_IMAGE_NAME"
|
||||
JAMMY_OS_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_IMAGE_NAME"
|
||||
if [ ! -f "$JAMMY_OS_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time wget --quiet $JAMMY_OS_IMAGE_URL || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
JAMMY_OS_RAW_IMAGE_NAME="jammy-server-cloudimg-amd64-custom-20230119-0.raw"
|
||||
JAMMY_OS_RAW_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_RAW_IMAGE_NAME"
|
||||
if [ ! -f "$JAMMY_OS_RAW_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time qemu-img convert -p -f qcow2 -O raw $JAMMY_OS_IMAGE_NAME $JAMMY_OS_RAW_IMAGE_NAME || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
CFLAGS=""
|
||||
if [[ "${BUILD_TARGET}" == "x86_64-unknown-linux-musl" ]]; then
|
||||
# shellcheck disable=SC2034
|
||||
CFLAGS="-I /usr/include/x86_64-linux-musl/ -idirafter /usr/include/"
|
||||
fi
|
||||
|
||||
cargo build --features mshv --all --release --target $BUILD_TARGET
|
||||
cargo build --features mshv --all --release --target "$BUILD_TARGET"
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC2048,SC2086
|
||||
set -x
|
||||
|
||||
# This set of vfio tests require to be ran on a specific machine with
|
||||
@ -7,8 +8,9 @@ set -x
|
||||
# out of the scope of this script, including the custom guest image with
|
||||
# Nvidia drivers installed, and properly configured Nvidia Tesla T4 card.
|
||||
|
||||
source $HOME/.cargo/env
|
||||
source $(dirname "$0")/test-util.sh
|
||||
# shellcheck source=/dev/null
|
||||
source "$HOME"/.cargo/env
|
||||
source "$(dirname "$0")"/test-util.sh
|
||||
|
||||
process_common_args "$@"
|
||||
|
||||
@ -18,10 +20,11 @@ download_hypervisor_fw
|
||||
|
||||
CFLAGS=""
|
||||
if [[ "${BUILD_TARGET}" == "x86_64-unknown-linux-musl" ]]; then
|
||||
# shellcheck disable=SC2034
|
||||
CFLAGS="-I /usr/include/x86_64-linux-musl/ -idirafter /usr/include/"
|
||||
fi
|
||||
|
||||
cargo build --features mshv --all --release --target $BUILD_TARGET
|
||||
cargo build --features mshv --all --release --target "$BUILD_TARGET"
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
time cargo test "vfio::test_nvidia" -- --test-threads=1 ${test_binary_args[*]}
|
||||
|
@ -1,9 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC2048,SC2086
|
||||
set -x
|
||||
|
||||
source $HOME/.cargo/env
|
||||
source $(dirname "$0")/test-util.sh
|
||||
source $(dirname "$0")/common-aarch64.sh
|
||||
# shellcheck source=/dev/null
|
||||
source "$HOME"/.cargo/env
|
||||
source "$(dirname "$0")"/test-util.sh
|
||||
source "$(dirname "$0")"/common-aarch64.sh
|
||||
|
||||
process_common_args "$@"
|
||||
|
||||
@ -27,8 +29,8 @@ if [[ ! -f ${WIN_IMAGE_FILE} || ! -f ${OVMF_FW} ]]; then
|
||||
fi
|
||||
|
||||
# Use device mapper to create a snapshot of the Windows image
|
||||
img_blk_size=$(du -b -B 512 ${WIN_IMAGE_FILE} | awk '{print $1;}')
|
||||
loop_device=$(losetup --find --show --read-only ${WIN_IMAGE_FILE})
|
||||
img_blk_size=$(du -b -B 512 "${WIN_IMAGE_FILE}" | awk '{print $1;}')
|
||||
loop_device=$(losetup --find --show --read-only "${WIN_IMAGE_FILE}")
|
||||
dmsetup create windows-base --table "0 $img_blk_size linear $loop_device 0"
|
||||
dmsetup mknodes
|
||||
dmsetup create windows-snapshot-base --table "0 $img_blk_size snapshot-origin /dev/mapper/windows-base"
|
||||
@ -36,11 +38,11 @@ dmsetup mknodes
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
cargo build --all --release --target $BUILD_TARGET
|
||||
cargo build --all --release --target "$BUILD_TARGET"
|
||||
|
||||
# Only run with 1 thread to avoid tests interfering with one another because
|
||||
# Windows has a static IP configured
|
||||
time cargo test "windows::$test_filter" --target $BUILD_TARGET -- ${test_binary_args[*]}
|
||||
time cargo test "windows::$test_filter" --target "$BUILD_TARGET" -- ${test_binary_args[*]}
|
||||
RES=$?
|
||||
|
||||
dmsetup remove_all -f
|
||||
|
@ -1,8 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC2048,SC2086
|
||||
set -x
|
||||
|
||||
source $HOME/.cargo/env
|
||||
source $(dirname "$0")/test-util.sh
|
||||
# shellcheck source=/dev/null
|
||||
source "$HOME"/.cargo/env
|
||||
source "$(dirname "$0")"/test-util.sh
|
||||
|
||||
process_common_args "$@"
|
||||
# For now these values are default for kvm
|
||||
@ -19,6 +21,7 @@ download_ovmf
|
||||
|
||||
CFLAGS=""
|
||||
if [[ "${BUILD_TARGET}" == "x86_64-unknown-linux-musl" ]]; then
|
||||
# shellcheck disable=SC2034
|
||||
CFLAGS="-I /usr/include/x86_64-linux-musl/ -idirafter /usr/include/"
|
||||
fi
|
||||
|
||||
@ -36,7 +39,7 @@ dmsetup mknodes
|
||||
dmsetup create windows-snapshot-base --table "0 $img_blk_size snapshot-origin /dev/mapper/windows-base"
|
||||
dmsetup mknodes
|
||||
|
||||
cargo build --features mshv --all --release --target $BUILD_TARGET
|
||||
cargo build --features mshv --all --release --target "$BUILD_TARGET"
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
|
@ -1,8 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC2048,SC2086
|
||||
set -x
|
||||
|
||||
source $HOME/.cargo/env
|
||||
source $(dirname "$0")/test-util.sh
|
||||
# shellcheck source=/dev/null
|
||||
source "$HOME"/.cargo/env
|
||||
source "$(dirname "$0")"/test-util.sh
|
||||
|
||||
WORKLOADS_DIR="$HOME/workloads"
|
||||
mkdir -p "$WORKLOADS_DIR"
|
||||
@ -16,7 +18,7 @@ if [ "$hypervisor" = "mshv" ]; then
|
||||
test_features="--features mshv"
|
||||
fi
|
||||
|
||||
cp scripts/sha1sums-x86_64 $WORKLOADS_DIR
|
||||
cp scripts/sha1sums-x86_64 "$WORKLOADS_DIR"
|
||||
|
||||
download_hypervisor_fw
|
||||
|
||||
@ -26,55 +28,55 @@ FOCAL_OS_IMAGE_NAME="focal-server-cloudimg-amd64-custom-20210609-0.qcow2"
|
||||
FOCAL_OS_IMAGE_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_IMAGE_NAME"
|
||||
FOCAL_OS_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_IMAGE_NAME"
|
||||
if [ ! -f "$FOCAL_OS_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time wget --quiet $FOCAL_OS_IMAGE_URL || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
FOCAL_OS_RAW_IMAGE_NAME="focal-server-cloudimg-amd64-custom-20210609-0.raw"
|
||||
FOCAL_OS_RAW_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME"
|
||||
if [ ! -f "$FOCAL_OS_RAW_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time qemu-img convert -p -f qcow2 -O raw $FOCAL_OS_IMAGE_NAME $FOCAL_OS_RAW_IMAGE_NAME || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
FOCAL_OS_QCOW_BACKING_FILE_IMAGE_NAME="focal-server-cloudimg-amd64-custom-20210609-0-backing.qcow2"
|
||||
FOCAL_OS_QCOW_BACKING_FILE_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_QCOW_BACKING_FILE_IMAGE_NAME"
|
||||
if [ ! -f "$FOCAL_OS_QCOW_BACKING_FILE_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
time qemu-img create -f qcow2 -b $FOCAL_OS_IMAGE -F qcow2 $FOCAL_OS_QCOW_BACKING_FILE_IMAGE_NAME
|
||||
popd
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time qemu-img create -f qcow2 -b "$FOCAL_OS_IMAGE" -F qcow2 $FOCAL_OS_QCOW_BACKING_FILE_IMAGE_NAME
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
JAMMY_OS_IMAGE_NAME="jammy-server-cloudimg-amd64-custom-20230119-0.qcow2"
|
||||
JAMMY_OS_IMAGE_URL="https://cloud-hypervisor.azureedge.net/$JAMMY_OS_IMAGE_NAME"
|
||||
JAMMY_OS_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_IMAGE_NAME"
|
||||
if [ ! -f "$JAMMY_OS_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time wget --quiet $JAMMY_OS_IMAGE_URL || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
JAMMY_OS_RAW_IMAGE_NAME="jammy-server-cloudimg-amd64-custom-20230119-0.raw"
|
||||
JAMMY_OS_RAW_IMAGE="$WORKLOADS_DIR/$JAMMY_OS_RAW_IMAGE_NAME"
|
||||
if [ ! -f "$JAMMY_OS_RAW_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time qemu-img convert -p -f qcow2 -O raw $JAMMY_OS_IMAGE_NAME $JAMMY_OS_RAW_IMAGE_NAME || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
ALPINE_MINIROOTFS_URL="http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/x86_64/alpine-minirootfs-3.11.3-x86_64.tar.gz"
|
||||
ALPINE_MINIROOTFS_TARBALL="$WORKLOADS_DIR/alpine-minirootfs-x86_64.tar.gz"
|
||||
if [ ! -f "$ALPINE_MINIROOTFS_TARBALL" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
time wget --quiet $ALPINE_MINIROOTFS_URL -O $ALPINE_MINIROOTFS_TARBALL || exit 1
|
||||
popd
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time wget --quiet $ALPINE_MINIROOTFS_URL -O "$ALPINE_MINIROOTFS_TARBALL" || exit 1
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
ALPINE_INITRAMFS_IMAGE="$WORKLOADS_DIR/alpine_initramfs.img"
|
||||
if [ ! -f "$ALPINE_INITRAMFS_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
mkdir alpine-minirootfs
|
||||
tar xf "$ALPINE_MINIROOTFS_TARBALL" -C alpine-minirootfs
|
||||
cat >alpine-minirootfs/init <<-EOF
|
||||
@ -84,19 +86,18 @@ if [ ! -f "$ALPINE_INITRAMFS_IMAGE" ]; then
|
||||
poweroff -f
|
||||
EOF
|
||||
chmod +x alpine-minirootfs/init
|
||||
cd alpine-minirootfs
|
||||
cd alpine-minirootfs || exit
|
||||
find . -print0 |
|
||||
cpio --null --create --verbose --owner root:root --format=newc >"$ALPINE_INITRAMFS_IMAGE"
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
pushd $WORKLOADS_DIR
|
||||
sha1sum sha1sums-x86_64 --check
|
||||
if [ $? -ne 0 ]; then
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
if ! sha1sum sha1sums-x86_64 --check; then
|
||||
echo "sha1sum validation of images failed, remove invalid images to fix the issue."
|
||||
exit 1
|
||||
fi
|
||||
popd
|
||||
popd || exit
|
||||
|
||||
# Build custom kernel based on virtio-pmem and virtio-fs upstream patches
|
||||
VMLINUX_IMAGE="$WORKLOADS_DIR/vmlinux"
|
||||
@ -107,51 +108,51 @@ fi
|
||||
VIRTIOFSD="$WORKLOADS_DIR/virtiofsd"
|
||||
VIRTIOFSD_DIR="virtiofsd_build"
|
||||
if [ ! -f "$VIRTIOFSD" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
git clone "https://gitlab.com/virtio-fs/virtiofsd.git" $VIRTIOFSD_DIR
|
||||
pushd $VIRTIOFSD_DIR
|
||||
pushd $VIRTIOFSD_DIR || exit
|
||||
git checkout v1.8.0
|
||||
time cargo build --release
|
||||
cp target/release/virtiofsd $VIRTIOFSD || exit 1
|
||||
popd
|
||||
cp target/release/virtiofsd "$VIRTIOFSD" || exit 1
|
||||
popd || exit
|
||||
rm -rf $VIRTIOFSD_DIR
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
BLK_IMAGE="$WORKLOADS_DIR/blk.img"
|
||||
MNT_DIR="mount_image"
|
||||
if [ ! -f "$BLK_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
fallocate -l 16M $BLK_IMAGE
|
||||
mkfs.ext4 -j $BLK_IMAGE
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
fallocate -l 16M "$BLK_IMAGE"
|
||||
mkfs.ext4 -j "$BLK_IMAGE"
|
||||
mkdir $MNT_DIR
|
||||
sudo mount -t ext4 $BLK_IMAGE $MNT_DIR
|
||||
sudo mount -t ext4 "$BLK_IMAGE" $MNT_DIR
|
||||
sudo bash -c "echo bar > $MNT_DIR/foo" || exit 1
|
||||
sudo umount $BLK_IMAGE
|
||||
sudo umount "$BLK_IMAGE"
|
||||
rm -r $MNT_DIR
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
SHARED_DIR="$WORKLOADS_DIR/shared_dir"
|
||||
if [ ! -d "$SHARED_DIR" ]; then
|
||||
mkdir -p $SHARED_DIR
|
||||
mkdir -p "$SHARED_DIR"
|
||||
echo "foo" >"$SHARED_DIR/file1"
|
||||
echo "bar" >"$SHARED_DIR/file3" || exit 1
|
||||
fi
|
||||
|
||||
VFIO_DIR="$WORKLOADS_DIR/vfio"
|
||||
VFIO_DISK_IMAGE="$WORKLOADS_DIR/vfio.img"
|
||||
rm -rf $VFIO_DIR $VFIO_DISK_IMAGE
|
||||
mkdir -p $VFIO_DIR
|
||||
cp $FOCAL_OS_RAW_IMAGE $VFIO_DIR
|
||||
cp $FW $VFIO_DIR
|
||||
cp $VMLINUX_IMAGE $VFIO_DIR || exit 1
|
||||
rm -rf "$VFIO_DIR" "$VFIO_DISK_IMAGE"
|
||||
mkdir -p "$VFIO_DIR"
|
||||
cp "$FOCAL_OS_RAW_IMAGE" "$VFIO_DIR"
|
||||
cp "$FW" "$VFIO_DIR"
|
||||
cp "$VMLINUX_IMAGE" "$VFIO_DIR" || exit 1
|
||||
|
||||
cargo build --features mshv --all --release --target $BUILD_TARGET
|
||||
cargo build --features mshv --all --release --target "$BUILD_TARGET"
|
||||
|
||||
# We always copy a fresh version of our binary for our L2 guest.
|
||||
cp target/$BUILD_TARGET/release/cloud-hypervisor $VFIO_DIR
|
||||
cp target/$BUILD_TARGET/release/ch-remote $VFIO_DIR
|
||||
cp target/"$BUILD_TARGET"/release/cloud-hypervisor "$VFIO_DIR"
|
||||
cp target/"$BUILD_TARGET"/release/ch-remote "$VFIO_DIR"
|
||||
|
||||
# Enable KSM with some reasonable parameters so that it won't take too long
|
||||
# for the memory to be merged between two processes.
|
||||
@ -161,8 +162,8 @@ sudo bash -c "echo 1 > /sys/kernel/mm/ksm/run"
|
||||
|
||||
# Both test_vfio, ovs-dpdk and vDPA tests rely on hugepages
|
||||
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||
PAGE_NUM=$((12288 * 1024 / HUGEPAGESIZE))
|
||||
echo "$PAGE_NUM" | sudo tee /proc/sys/vm/nr_hugepages
|
||||
sudo chmod a+rwX /dev/hugepages
|
||||
|
||||
# Update max locked memory to 'unlimited' to avoid issues with vDPA
|
||||
@ -185,7 +186,7 @@ fi
|
||||
|
||||
# Run tests on dbus_api
|
||||
if [ $RES -eq 0 ]; then
|
||||
cargo build --features "mshv,dbus_api" --all --release --target $BUILD_TARGET
|
||||
cargo build --features "mshv,dbus_api" --all --release --target "$BUILD_TARGET"
|
||||
export RUST_BACKTRACE=1
|
||||
# integration tests now do not reply on build feature "dbus_api"
|
||||
time cargo test $test_features "dbus_api::$test_filter" -- ${test_binary_args[*]}
|
||||
|
@ -1,10 +1,12 @@
|
||||
#!/usr/bin/env bash
|
||||
set -x
|
||||
|
||||
source $HOME/.cargo/env
|
||||
source $(dirname "$0")/test-util.sh
|
||||
# shellcheck source=/dev/null
|
||||
source "$HOME"/.cargo/env
|
||||
source "$(dirname "$0")"/test-util.sh
|
||||
|
||||
export TEST_ARCH=$(uname -m)
|
||||
TEST_ARCH=$(uname -m)
|
||||
export TEST_ARCH
|
||||
|
||||
WORKLOADS_DIR="$HOME/workloads"
|
||||
mkdir -p "$WORKLOADS_DIR"
|
||||
@ -15,20 +17,20 @@ build_fio() {
|
||||
|
||||
checkout_repo "$FIO_DIR" "$FIO_REPO" master "1953e1adb5a28ed21370e85991d7f5c3cdc699f3"
|
||||
if [ ! -f "$FIO_DIR/.built" ]; then
|
||||
pushd $FIO_DIR
|
||||
pushd "$FIO_DIR" || exit
|
||||
./configure
|
||||
make -j $(nproc)
|
||||
make -j "$(nproc)"
|
||||
cp fio "$WORKLOADS_DIR/fio"
|
||||
touch .built
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
}
|
||||
|
||||
process_common_args "$@"
|
||||
|
||||
cp scripts/sha1sums-${TEST_ARCH} $WORKLOADS_DIR
|
||||
cp scripts/sha1sums-"${TEST_ARCH}" "$WORKLOADS_DIR"
|
||||
|
||||
if [ ${TEST_ARCH} == "aarch64" ]; then
|
||||
if [ "${TEST_ARCH}" == "aarch64" ]; then
|
||||
FOCAL_OS_IMAGE_NAME="focal-server-cloudimg-arm64-custom-20210929-0.qcow2"
|
||||
else
|
||||
FOCAL_OS_IMAGE_NAME="focal-server-cloudimg-amd64-custom-20210609-0.qcow2"
|
||||
@ -37,12 +39,12 @@ fi
|
||||
FOCAL_OS_IMAGE_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_IMAGE_NAME"
|
||||
FOCAL_OS_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_IMAGE_NAME"
|
||||
if [ ! -f "$FOCAL_OS_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time wget --quiet $FOCAL_OS_IMAGE_URL || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
if [ ${TEST_ARCH} == "aarch64" ]; then
|
||||
if [ "${TEST_ARCH}" == "aarch64" ]; then
|
||||
FOCAL_OS_RAW_IMAGE_NAME="focal-server-cloudimg-arm64-custom-20210929-0.raw"
|
||||
else
|
||||
FOCAL_OS_RAW_IMAGE_NAME="focal-server-cloudimg-amd64-custom-20210609-0.raw"
|
||||
@ -50,20 +52,19 @@ fi
|
||||
|
||||
FOCAL_OS_RAW_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME"
|
||||
if [ ! -f "$FOCAL_OS_RAW_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
time qemu-img convert -p -f qcow2 -O raw $FOCAL_OS_IMAGE_NAME $FOCAL_OS_RAW_IMAGE_NAME || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
|
||||
pushd $WORKLOADS_DIR
|
||||
grep focal sha1sums-${TEST_ARCH} | sha1sum --check
|
||||
if [ $? -ne 0 ]; then
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
if ! grep focal sha1sums-"${TEST_ARCH}" | sha1sum --check; then
|
||||
echo "sha1sum validation of images failed, remove invalid images to fix the issue."
|
||||
exit 1
|
||||
fi
|
||||
popd
|
||||
popd || exit
|
||||
|
||||
if [ ${TEST_ARCH} == "aarch64" ]; then
|
||||
if [ "${TEST_ARCH}" == "aarch64" ]; then
|
||||
build_fio
|
||||
|
||||
# Update the fio in the cloud image to use io_uring on AArch64
|
||||
@ -84,15 +85,16 @@ build_custom_linux
|
||||
|
||||
CFLAGS=""
|
||||
if [[ "${BUILD_TARGET}" == "${TEST_ARCH}-unknown-linux-musl" ]]; then
|
||||
# shellcheck disable=SC2034
|
||||
CFLAGS="-I /usr/include/${TEST_ARCH}-linux-musl/ -idirafter /usr/include/"
|
||||
fi
|
||||
|
||||
cargo build --features mshv --all --release --target $BUILD_TARGET
|
||||
cargo build --features mshv --all --release --target "$BUILD_TARGET"
|
||||
|
||||
# setup hugepages
|
||||
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||
PAGE_NUM=$((12288 * 1024 / HUGEPAGESIZE))
|
||||
echo "$PAGE_NUM" | sudo tee /proc/sys/vm/nr_hugepages
|
||||
sudo chmod a+rwX /dev/hugepages
|
||||
|
||||
if [ -n "$test_filter" ]; then
|
||||
@ -100,15 +102,16 @@ if [ -n "$test_filter" ]; then
|
||||
fi
|
||||
|
||||
# Ensure that git commands can be run in this directory (for metrics report)
|
||||
git config --global --add safe.directory $PWD
|
||||
git config --global --add safe.directory "$PWD"
|
||||
|
||||
RUST_BACKTRACE_VALUE=$(echo $RUST_BACKTRACE)
|
||||
if [ -z $RUST_BACKTRACE_VALUE ]; then
|
||||
RUST_BACKTRACE_VALUE=$RUST_BACKTRACE
|
||||
if [ -z "$RUST_BACKTRACE_VALUE" ]; then
|
||||
export RUST_BACKTRACE=1
|
||||
else
|
||||
echo "RUST_BACKTRACE is set to: $RUST_BACKTRACE_VALUE"
|
||||
fi
|
||||
time target/$BUILD_TARGET/release/performance-metrics ${test_binary_args[*]}
|
||||
# shellcheck disable=SC2048,SC2086
|
||||
time target/"$BUILD_TARGET"/release/performance-metrics ${test_binary_args[*]}
|
||||
RES=$?
|
||||
|
||||
exit $RES
|
||||
|
@ -2,4 +2,4 @@
|
||||
set -e
|
||||
set -x
|
||||
|
||||
sudo docker run --rm -v ${PWD}:/local openapitools/openapi-generator-cli validate -i /local/vmm/src/api/openapi/cloud-hypervisor.yaml
|
||||
sudo docker run --rm -v "${PWD}":/local openapitools/openapi-generator-cli validate -i /local/vmm/src/api/openapi/cloud-hypervisor.yaml
|
||||
|
@ -1,12 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC2068
|
||||
|
||||
source $HOME/.cargo/env
|
||||
source $(dirname "$0")/test-util.sh
|
||||
# shellcheck source=/dev/null
|
||||
source "$HOME"/.cargo/env
|
||||
source "$(dirname "$0")"/test-util.sh
|
||||
|
||||
process_common_args "$@"
|
||||
|
||||
cargo_args=("")
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
if [[ $hypervisor = "mshv" ]]; then
|
||||
cargo_args+=("--features $hypervisor")
|
||||
elif [[ $(uname -m) = "x86_64" ]]; then
|
||||
@ -14,5 +17,5 @@ elif [[ $(uname -m) = "x86_64" ]]; then
|
||||
fi
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
cargo test --lib --bins --target $BUILD_TARGET --workspace ${cargo_args[@]} || exit 1
|
||||
cargo test --doc --target $BUILD_TARGET --workspace ${cargo_args[@]} || exit 1
|
||||
cargo test --lib --bins --target "$BUILD_TARGET" --workspace ${cargo_args[@]} || exit 1
|
||||
cargo test --doc --target "$BUILD_TARGET" --workspace ${cargo_args[@]} || exit 1
|
||||
|
@ -18,13 +18,13 @@ checkout_repo() {
|
||||
# If commit is not specified, compare local HEAD and remote HEAD.
|
||||
# Remove the folder if there is difference.
|
||||
if [ -d "$SRC_DIR" ]; then
|
||||
pushd $SRC_DIR
|
||||
pushd "$SRC_DIR" || exit
|
||||
git fetch
|
||||
SRC_LOCAL_COMMIT=$(git rev-parse HEAD)
|
||||
if [ -z "$GIT_COMMIT" ]; then
|
||||
GIT_COMMIT=$(git rev-parse remotes/origin/"$GIT_BRANCH")
|
||||
fi
|
||||
popd
|
||||
popd || exit
|
||||
if [ "$SRC_LOCAL_COMMIT" != "$GIT_COMMIT" ]; then
|
||||
rm -rf "$SRC_DIR"
|
||||
fi
|
||||
@ -34,10 +34,10 @@ checkout_repo() {
|
||||
if [ ! -d "$SRC_DIR" ]; then
|
||||
git clone --depth 1 "$GIT_URL" -b "$GIT_BRANCH" "$SRC_DIR"
|
||||
if [ "$GIT_COMMIT" ]; then
|
||||
pushd "$SRC_DIR"
|
||||
pushd "$SRC_DIR" || exit
|
||||
git fetch --depth 1 origin "$GIT_COMMIT"
|
||||
git reset --hard FETCH_HEAD
|
||||
popd
|
||||
popd || exit
|
||||
fi
|
||||
fi
|
||||
}
|
||||
@ -51,23 +51,23 @@ build_custom_linux() {
|
||||
|
||||
checkout_repo "$LINUX_CUSTOM_DIR" "$LINUX_CUSTOM_URL" "$LINUX_CUSTOM_BRANCH"
|
||||
|
||||
cp $SRCDIR/resources/linux-config-${ARCH} $LINUX_CUSTOM_DIR/.config
|
||||
cp "$SRCDIR"/resources/linux-config-"${ARCH}" "$LINUX_CUSTOM_DIR"/.config
|
||||
|
||||
pushd $LINUX_CUSTOM_DIR
|
||||
make -j $(nproc)
|
||||
if [ ${ARCH} == "x86_64" ]; then
|
||||
pushd "$LINUX_CUSTOM_DIR" || exit
|
||||
make -j "$(nproc)"
|
||||
if [ "${ARCH}" == "x86_64" ]; then
|
||||
cp vmlinux "$WORKLOADS_DIR/" || exit 1
|
||||
elif [ ${ARCH} == "aarch64" ]; then
|
||||
elif [ "${ARCH}" == "aarch64" ]; then
|
||||
cp arch/arm64/boot/Image "$WORKLOADS_DIR/" || exit 1
|
||||
cp arch/arm64/boot/Image.gz "$WORKLOADS_DIR/" || exit 1
|
||||
fi
|
||||
popd
|
||||
popd || exit
|
||||
}
|
||||
|
||||
cmd_help() {
|
||||
echo ""
|
||||
echo "Cloud Hypervisor $(basename $0)"
|
||||
echo "Usage: $(basename $0) [<args>]"
|
||||
echo "Cloud Hypervisor $(basename "$0")"
|
||||
echo "Usage: $(basename "$0") [<args>]"
|
||||
echo ""
|
||||
echo "Available arguments:"
|
||||
echo ""
|
||||
@ -91,6 +91,7 @@ process_common_args() {
|
||||
;;
|
||||
"--test-filter")
|
||||
shift
|
||||
# shellcheck disable=SC2034
|
||||
test_filter="$1"
|
||||
;;
|
||||
"--") {
|
||||
@ -107,8 +108,8 @@ process_common_args() {
|
||||
if [[ ! ("$hypervisor" = "kvm" || "$hypervisor" = "mshv") ]]; then
|
||||
die "Hypervisor value must be kvm or mshv"
|
||||
fi
|
||||
|
||||
test_binary_args=($@)
|
||||
# shellcheck disable=SC2034
|
||||
test_binary_args=("$@")
|
||||
}
|
||||
|
||||
download_hypervisor_fw() {
|
||||
@ -122,18 +123,18 @@ download_hypervisor_fw() {
|
||||
FW_URL=$(curl --silent https://api.github.com/repos/cloud-hypervisor/rust-hypervisor-firmware/releases/latest | grep "browser_download_url" | grep -o 'https://.*[^ "]')
|
||||
fi
|
||||
FW="$WORKLOADS_DIR/hypervisor-fw"
|
||||
pushd $WORKLOADS_DIR
|
||||
rm -f $FW
|
||||
time wget --quiet $FW_URL || exit 1
|
||||
popd
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
rm -f "$FW"
|
||||
time wget --quiet "$FW_URL" || exit 1
|
||||
popd || exit
|
||||
}
|
||||
|
||||
download_ovmf() {
|
||||
OVMF_FW_TAG="ch-6624aa331f"
|
||||
OVMF_FW_URL="https://github.com/cloud-hypervisor/edk2/releases/download/$OVMF_FW_TAG/CLOUDHV.fd"
|
||||
OVMF_FW="$WORKLOADS_DIR/CLOUDHV.fd"
|
||||
pushd $WORKLOADS_DIR
|
||||
rm -f $OVMF_FW
|
||||
pushd "$WORKLOADS_DIR" || exit
|
||||
rm -f "$OVMF_FW"
|
||||
time wget --quiet $OVMF_FW_URL || exit 1
|
||||
popd
|
||||
popd || exit
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user