mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2024-12-22 05:35:20 +00:00
scripts: fix shell scripts format according to shfmt
Unify coding style of shell scripts in accordance with shfmt checks. Signed-off-by: Ruslan Mstoi <ruslan.mstoi@intel.com>
This commit is contained in:
parent
612a8dfb1b
commit
318caeb9d8
@ -8,8 +8,6 @@
|
|||||||
a message about the compatibility of the image.
|
a message about the compatibility of the image.
|
||||||
'
|
'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
usage="$(basename "$0") [-h] -f -w -- program to check Cloud Hypervisor compatible image
|
usage="$(basename "$0") [-h] -f -w -- program to check Cloud Hypervisor compatible image
|
||||||
|
|
||||||
where:
|
where:
|
||||||
@ -18,12 +16,11 @@ where:
|
|||||||
-w directory to be used for temporary files"
|
-w directory to be used for temporary files"
|
||||||
|
|
||||||
function check_command {
|
function check_command {
|
||||||
if ! command -v $1 &> /dev/null
|
if ! command -v $1 &>/dev/null; then
|
||||||
then
|
echo "Command $1 could not be found"
|
||||||
echo "Command $1 could not be found"
|
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
};
|
}
|
||||||
|
|
||||||
function check_if_root {
|
function check_if_root {
|
||||||
if [ "$EUID" -ne 0 ]; then
|
if [ "$EUID" -ne 0 ]; then
|
||||||
@ -31,27 +28,32 @@ function check_if_root {
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
};
|
}
|
||||||
|
|
||||||
check_if_root
|
check_if_root
|
||||||
working_dir=""
|
working_dir=""
|
||||||
while getopts ':hf:w:' option; do
|
while getopts ':hf:w:' option; do
|
||||||
case "$option" in
|
case "$option" in
|
||||||
h) echo "$usage"
|
h)
|
||||||
exit
|
echo "$usage"
|
||||||
;;
|
exit
|
||||||
f) file_name=$OPTARG
|
;;
|
||||||
;;
|
f)
|
||||||
w) working_dir=$OPTARG
|
file_name=$OPTARG
|
||||||
;;
|
;;
|
||||||
:) printf "missing argument for -%s\n" "$OPTARG" >&2
|
w)
|
||||||
echo "$usage" >&2
|
working_dir=$OPTARG
|
||||||
exit 1
|
;;
|
||||||
;;
|
:)
|
||||||
\?) printf "illegal option: -%s\n" "$OPTARG" >&2
|
printf "missing argument for -%s\n" "$OPTARG" >&2
|
||||||
echo "$usage" >&2
|
echo "$usage" >&2
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
|
\?)
|
||||||
|
printf "illegal option: -%s\n" "$OPTARG" >&2
|
||||||
|
echo "$usage" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
@ -59,22 +61,22 @@ shift $((OPTIND - 1))
|
|||||||
|
|
||||||
if [ -z "${file_name}" ]; then
|
if [ -z "${file_name}" ]; then
|
||||||
echo "You must provide the image file name"
|
echo "You must provide the image file name"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if [[ ! -f ${file_name} ]]; then
|
if [[ ! -f ${file_name} ]]; then
|
||||||
echo "File ${file_name} does not exist"
|
echo "File ${file_name} does not exist"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
file_abs_path=`readlink -m ${file_name}`
|
file_abs_path=$(readlink -m ${file_name})
|
||||||
if [[ "${working_dir}" != "" && ! -d "${working_dir}" ]]; then
|
if [[ "${working_dir}" != "" && ! -d "${working_dir}" ]]; then
|
||||||
echo "Directory ${working_dir} does not exist"
|
echo "Directory ${working_dir} does not exist"
|
||||||
exit 1
|
exit 1
|
||||||
elif [[ "${working_dir}" == "" ]]; then
|
elif [[ "${working_dir}" == "" ]]; then
|
||||||
working_dir=`mktemp -d`
|
working_dir=$(mktemp -d)
|
||||||
tmp_created=1
|
tmp_created=1
|
||||||
else
|
else
|
||||||
working_dir=`readlink -m ${working_dir}`
|
working_dir=$(readlink -m ${working_dir})
|
||||||
fi
|
fi
|
||||||
|
|
||||||
filename="${file_name%.*}"
|
filename="${file_name%.*}"
|
||||||
@ -82,11 +84,10 @@ dest_file=${working_dir}/${filename}.raw
|
|||||||
image_type=$(qemu-img info ${file_abs_path} | grep 'file format:' | awk '{ print $3 }')
|
image_type=$(qemu-img info ${file_abs_path} | grep 'file format:' | awk '{ print $3 }')
|
||||||
echo "Image type detected as ${image_type}"
|
echo "Image type detected as ${image_type}"
|
||||||
|
|
||||||
|
|
||||||
if [[ "${image_type}" == "raw" ]]; then
|
if [[ "${image_type}" == "raw" ]]; then
|
||||||
dest_file=${file_abs_path}
|
dest_file=${file_abs_path}
|
||||||
elif [[ "$image_type" == "qcow2" ]]; then
|
elif [[ "$image_type" == "qcow2" ]]; then
|
||||||
if lsmod | grep "nbd" &> /dev/null ; then
|
if lsmod | grep "nbd" &>/dev/null; then
|
||||||
echo "Module nbd is loaded!"
|
echo "Module nbd is loaded!"
|
||||||
else
|
else
|
||||||
echo "Module nbd is not loaded. Trying to load the module"
|
echo "Module nbd is not loaded. Trying to load the module"
|
||||||
@ -106,18 +107,18 @@ check_command blkid
|
|||||||
part_type=$(blkid -o value -s PTTYPE ${dest_file})
|
part_type=$(blkid -o value -s PTTYPE ${dest_file})
|
||||||
|
|
||||||
check_command partx
|
check_command partx
|
||||||
nr_partitions=`partx -g ${dest_file} | wc -l`
|
nr_partitions=$(partx -g ${dest_file} | wc -l)
|
||||||
|
|
||||||
check_command fdisk
|
check_command fdisk
|
||||||
out=`fdisk -l ${dest_file} --bytes | grep -i -A ${nr_partitions} 'Device' | tail -n +2`
|
out=$(fdisk -l ${dest_file} --bytes | grep -i -A ${nr_partitions} 'Device' | tail -n +2)
|
||||||
|
|
||||||
IFS='
|
IFS='
|
||||||
'
|
'
|
||||||
i=0
|
i=0
|
||||||
declare -A lines
|
declare -A lines
|
||||||
for x in $out ; do
|
for x in $out; do
|
||||||
lines[$i]=$x
|
lines[$i]=$x
|
||||||
i=$((i+1))
|
i=$((i + 1))
|
||||||
done
|
done
|
||||||
|
|
||||||
declare -A partitions
|
declare -A partitions
|
||||||
@ -125,40 +126,38 @@ IFS=' '
|
|||||||
i=0
|
i=0
|
||||||
ROWS=${#lines[@]}
|
ROWS=${#lines[@]}
|
||||||
|
|
||||||
for line in "${lines[@]}";
|
for line in "${lines[@]}"; do
|
||||||
do
|
j=0
|
||||||
j=0
|
read -a str_arr <<<"$line"
|
||||||
read -a str_arr <<< "$line"
|
for val in "${str_arr[@]}"; do
|
||||||
for val in "${str_arr[@]}";
|
if [[ "$val" != "*" ]]; then
|
||||||
do
|
partitions[$i, $j]=$val
|
||||||
if [[ "$val" != "*" ]]; then
|
j=$((j + 1))
|
||||||
partitions[$i,$j]=$val
|
fi
|
||||||
j=$((j+1))
|
done
|
||||||
fi
|
i=$((i + 1))
|
||||||
done
|
|
||||||
i=$((i+1))
|
|
||||||
done
|
done
|
||||||
|
|
||||||
COLUMNS=$j
|
COLUMNS=$j
|
||||||
START_ADDRESS_INDEX=1
|
START_ADDRESS_INDEX=1
|
||||||
FILE_SYS_INDEX2=$((COLUMNS-1))
|
FILE_SYS_INDEX2=$((COLUMNS - 1))
|
||||||
FILE_SYS_INDEX1=$((COLUMNS-2))
|
FILE_SYS_INDEX1=$((COLUMNS - 2))
|
||||||
DEVICE_INDEX=0
|
DEVICE_INDEX=0
|
||||||
# Here we have all the partition info now lets mount and analyze the contents
|
# Here we have all the partition info now lets mount and analyze the contents
|
||||||
for ((i=0;i<ROWS;i++)) do
|
for ((i = 0; i < ROWS; i++)); do
|
||||||
if [[ "$part_type" == "gpt" && "${partitions[$i,${FILE_SYS_INDEX1}]}" == "Linux" && "${partitions[$i,${FILE_SYS_INDEX2}]}" == "filesystem" ]]; then
|
if [[ "$part_type" == "gpt" && "${partitions[$i, ${FILE_SYS_INDEX1}]}" == "Linux" && "${partitions[$i, ${FILE_SYS_INDEX2}]}" == "filesystem" ]]; then
|
||||||
echo "The image has GPT partitions"
|
echo "The image has GPT partitions"
|
||||||
MOUNT_ROW=$i
|
MOUNT_ROW=$i
|
||||||
break
|
break
|
||||||
elif [[ "$part_type" == "dos" && "${partitions[$i,${FILE_SYS_INDEX1}]}" == "Linux" && "${partitions[$i,${FILE_SYS_INDEX2}]}" == "" ]]; then
|
elif [[ "$part_type" == "dos" && "${partitions[$i, ${FILE_SYS_INDEX1}]}" == "Linux" && "${partitions[$i, ${FILE_SYS_INDEX2}]}" == "" ]]; then
|
||||||
echo "The image has DOS partitions"
|
echo "The image has DOS partitions"
|
||||||
MOUNT_ROW=$i
|
MOUNT_ROW=$i
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
start_address=${partitions[${MOUNT_ROW},${START_ADDRESS_INDEX}]}
|
start_address=${partitions[${MOUNT_ROW}, ${START_ADDRESS_INDEX}]}
|
||||||
offset=$((start_address*512))
|
offset=$((start_address * 512))
|
||||||
|
|
||||||
MOUNT_DIR=/mnt/clh-img-check/
|
MOUNT_DIR=/mnt/clh-img-check/
|
||||||
rm -rf ${MOUNT_DIR}
|
rm -rf ${MOUNT_DIR}
|
||||||
@ -166,22 +165,22 @@ mkdir ${MOUNT_DIR}
|
|||||||
if [[ "${image_type}" == "raw" ]]; then
|
if [[ "${image_type}" == "raw" ]]; then
|
||||||
mount -o ro,loop,offset=$offset ${dest_file} ${MOUNT_DIR}
|
mount -o ro,loop,offset=$offset ${dest_file} ${MOUNT_DIR}
|
||||||
elif [[ "${image_type}" == "qcow2" ]]; then
|
elif [[ "${image_type}" == "qcow2" ]]; then
|
||||||
mount -o ro ${partitions[${MOUNT_ROW},${DEVICE_INDEX}]} ${MOUNT_DIR}
|
mount -o ro ${partitions[${MOUNT_ROW}, ${DEVICE_INDEX}]} ${MOUNT_DIR}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
CONFIG_DIR=${MOUNT_DIR}boot/
|
CONFIG_DIR=${MOUNT_DIR}boot/
|
||||||
if [[ "$part_type" == "dos" ]]; then
|
if [[ "$part_type" == "dos" ]]; then
|
||||||
CONFIG_DIR=${MOUNT_DIR}
|
CONFIG_DIR=${MOUNT_DIR}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#check VIRTIO
|
#check VIRTIO
|
||||||
HAS_VIRTIO=1
|
HAS_VIRTIO=1
|
||||||
for conf_file in ${CONFIG_DIR}config*; do
|
for conf_file in ${CONFIG_DIR}config*; do
|
||||||
out=`grep -E "CONFIG_VIRTIO=y|CONFIG_VIRTIO_BLK=y|CONFIG_VIRTIO_BLK=m" ${conf_file} | wc -l`
|
out=$(grep -E "CONFIG_VIRTIO=y|CONFIG_VIRTIO_BLK=y|CONFIG_VIRTIO_BLK=m" ${conf_file} | wc -l)
|
||||||
if [[ "$out" != "2" ]]; then
|
if [[ "$out" != "2" ]]; then
|
||||||
echo "VIRTIO not found"
|
echo "VIRTIO not found"
|
||||||
HAS_VIRTIO=0
|
HAS_VIRTIO=0
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
#clean up
|
#clean up
|
||||||
@ -191,22 +190,22 @@ if [[ "${tmp_created}" == "1" ]]; then
|
|||||||
rm -rf ${working_dir}
|
rm -rf ${working_dir}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${image_type}" == "qcow2" ]];then
|
if [[ "${image_type}" == "qcow2" ]]; then
|
||||||
qemu-nbd --disconnect ${dest_file} > /dev/null
|
qemu-nbd --disconnect ${dest_file} >/dev/null
|
||||||
fi
|
fi
|
||||||
|
|
||||||
result=""
|
result=""
|
||||||
if [[ "${part_type}" == "dos" ]]; then
|
if [[ "${part_type}" == "dos" ]]; then
|
||||||
result="dos mode not supported"
|
result="dos mode not supported"
|
||||||
fi
|
fi
|
||||||
if [[ "${HAS_VIRTIO}" == "0" ]]; then
|
if [[ "${HAS_VIRTIO}" == "0" ]]; then
|
||||||
if [[ "$result" != "" ]]; then
|
if [[ "$result" != "" ]]; then
|
||||||
result="${result},"
|
result="${result},"
|
||||||
fi
|
fi
|
||||||
result="$result VirtIO module not found in the image"
|
result="$result VirtIO module not found in the image"
|
||||||
fi
|
fi
|
||||||
if [[ "$result" == "" ]];then
|
if [[ "$result" == "" ]]; then
|
||||||
echo "No incompatibilities found"
|
echo "No incompatibilities found"
|
||||||
else
|
else
|
||||||
echo "$result"
|
echo "$result"
|
||||||
fi
|
fi
|
||||||
|
@ -28,14 +28,14 @@ build_edk2() {
|
|||||||
checkout_repo "$EDK2_PLAT_DIR" "$EDK2_PLAT_REPO" master "8227e9e9f6a8aefbd772b40138f835121ccb2307"
|
checkout_repo "$EDK2_PLAT_DIR" "$EDK2_PLAT_REPO" master "8227e9e9f6a8aefbd772b40138f835121ccb2307"
|
||||||
checkout_repo "$ACPICA_DIR" "$ACPICA_REPO" master "b9c69f81a05c45611c91ea9cbce8756078d76233"
|
checkout_repo "$ACPICA_DIR" "$ACPICA_REPO" master "b9c69f81a05c45611c91ea9cbce8756078d76233"
|
||||||
|
|
||||||
if [[ ! -f "$EDK2_DIR/.built" || \
|
if [[ ! -f "$EDK2_DIR/.built" ||
|
||||||
! -f "$EDK2_PLAT_DIR/.built" || \
|
! -f "$EDK2_PLAT_DIR/.built" ||
|
||||||
! -f "$ACPICA_DIR/.built" ]]; then
|
! -f "$ACPICA_DIR/.built" ]]; then
|
||||||
pushd "$EDK2_BUILD_DIR"
|
pushd "$EDK2_BUILD_DIR"
|
||||||
# Build
|
# Build
|
||||||
make -C acpica -j `nproc`
|
make -C acpica -j $(nproc)
|
||||||
source edk2/edksetup.sh
|
source edk2/edksetup.sh
|
||||||
make -C edk2/BaseTools -j `nproc`
|
make -C edk2/BaseTools -j $(nproc)
|
||||||
build -a AARCH64 -t GCC5 -p ArmVirtPkg/ArmVirtCloudHv.dsc -b RELEASE -n 0
|
build -a AARCH64 -t GCC5 -p ArmVirtPkg/ArmVirtCloudHv.dsc -b RELEASE -n 0
|
||||||
cp Build/ArmVirtCloudHv-AARCH64/RELEASE_GCC5/FV/CLOUDHV_EFI.fd "$WORKLOADS_DIR"
|
cp Build/ArmVirtCloudHv-AARCH64/RELEASE_GCC5/FV/CLOUDHV_EFI.fd "$WORKLOADS_DIR"
|
||||||
touch "$EDK2_DIR"/.built
|
touch "$EDK2_DIR"/.built
|
||||||
@ -44,4 +44,3 @@ build_edk2() {
|
|||||||
popd
|
popd
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,4 +6,3 @@ mkdosfs -n CIDATA -C /tmp/ubuntu-cloudinit.img 8192
|
|||||||
mcopy -oi /tmp/ubuntu-cloudinit.img -s test_data/cloud-init/ubuntu/local/user-data ::
|
mcopy -oi /tmp/ubuntu-cloudinit.img -s test_data/cloud-init/ubuntu/local/user-data ::
|
||||||
mcopy -oi /tmp/ubuntu-cloudinit.img -s test_data/cloud-init/ubuntu/local/meta-data ::
|
mcopy -oi /tmp/ubuntu-cloudinit.img -s test_data/cloud-init/ubuntu/local/meta-data ::
|
||||||
mcopy -oi /tmp/ubuntu-cloudinit.img -s test_data/cloud-init/ubuntu/local/network-config ::
|
mcopy -oi /tmp/ubuntu-cloudinit.img -s test_data/cloud-init/ubuntu/local/network-config ::
|
||||||
|
|
||||||
|
@ -233,10 +233,10 @@ cmd_build() {
|
|||||||
"--debug") { build="debug"; } ;;
|
"--debug") { build="debug"; } ;;
|
||||||
"--release") { build="release"; } ;;
|
"--release") { build="release"; } ;;
|
||||||
"--runtime")
|
"--runtime")
|
||||||
shift
|
shift
|
||||||
DOCKER_RUNTIME="$1"
|
DOCKER_RUNTIME="$1"
|
||||||
export DOCKER_RUNTIME
|
export DOCKER_RUNTIME
|
||||||
;;
|
;;
|
||||||
"--libc")
|
"--libc")
|
||||||
shift
|
shift
|
||||||
[[ "$1" =~ ^(musl|gnu)$ ]] ||
|
[[ "$1" =~ ^(musl|gnu)$ ]] ||
|
||||||
@ -382,7 +382,7 @@ cmd_tests() {
|
|||||||
exported_device="/dev/mshv"
|
exported_device="/dev/mshv"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -e "${exported_device}" ] ; then
|
if [ ! -e "${exported_device}" ]; then
|
||||||
die "${exported_device} does not exist on the system"
|
die "${exported_device} does not exist on the system"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -4,16 +4,16 @@ set -x
|
|||||||
sudo apt install -y libncurses-dev gawk flex bison openssl libssl-dev dkms libelf-dev libudev-dev libpci-dev libiberty-dev autoconf git make dpkg-dev libmnl-dev pkg-config iproute2
|
sudo apt install -y libncurses-dev gawk flex bison openssl libssl-dev dkms libelf-dev libudev-dev libpci-dev libiberty-dev autoconf git make dpkg-dev libmnl-dev pkg-config iproute2
|
||||||
sudo sed -i -- 's/# deb-src/deb-src/g' /etc/apt/sources.list
|
sudo sed -i -- 's/# deb-src/deb-src/g' /etc/apt/sources.list
|
||||||
sudo apt update
|
sudo apt update
|
||||||
apt-get source linux-image-unsigned-`uname -r`
|
apt-get source linux-image-unsigned-$(uname -r)
|
||||||
pushd linux-azure*/drivers/vdpa/vdpa_sim/
|
pushd linux-azure*/drivers/vdpa/vdpa_sim/
|
||||||
cat <<'EOF' > Makefile
|
cat <<'EOF' >Makefile
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
obj-m += vdpa_sim.o
|
obj-m += vdpa_sim.o
|
||||||
obj-m += vdpa_sim_net.o
|
obj-m += vdpa_sim_net.o
|
||||||
obj-m += vdpa_sim_blk.o
|
obj-m += vdpa_sim_blk.o
|
||||||
EOF
|
EOF
|
||||||
make -C /lib/modules/`uname -r`/build M=$PWD
|
make -C /lib/modules/$(uname -r)/build M=$PWD
|
||||||
sudo make -C /lib/modules/`uname -r`/build M=$PWD modules_install
|
sudo make -C /lib/modules/$(uname -r)/build M=$PWD modules_install
|
||||||
popd
|
popd
|
||||||
sudo depmod -a
|
sudo depmod -a
|
||||||
sudo modprobe vdpa
|
sudo modprobe vdpa
|
||||||
|
@ -21,7 +21,7 @@ build_spdk_nvme() {
|
|||||||
./scripts/pkgdep.sh
|
./scripts/pkgdep.sh
|
||||||
./configure --with-vfio-user
|
./configure --with-vfio-user
|
||||||
chmod +x /usr/local/lib/python3.10/dist-packages/ninja/data/bin/ninja
|
chmod +x /usr/local/lib/python3.10/dist-packages/ninja/data/bin/ninja
|
||||||
make -j `nproc` || exit 1
|
make -j $(nproc) || exit 1
|
||||||
touch .built
|
touch .built
|
||||||
popd
|
popd
|
||||||
fi
|
fi
|
||||||
@ -137,7 +137,7 @@ update_workloads() {
|
|||||||
pushd $WORKLOADS_DIR
|
pushd $WORKLOADS_DIR
|
||||||
mkdir alpine-minirootfs
|
mkdir alpine-minirootfs
|
||||||
tar xf "$ALPINE_MINIROOTFS_TARBALL" -C alpine-minirootfs
|
tar xf "$ALPINE_MINIROOTFS_TARBALL" -C alpine-minirootfs
|
||||||
cat > alpine-minirootfs/init <<-EOF
|
cat >alpine-minirootfs/init <<-EOF
|
||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
mount -t devtmpfs dev /dev
|
mount -t devtmpfs dev /dev
|
||||||
echo \$TEST_STRING > /dev/console
|
echo \$TEST_STRING > /dev/console
|
||||||
@ -146,7 +146,7 @@ update_workloads() {
|
|||||||
chmod +x alpine-minirootfs/init
|
chmod +x alpine-minirootfs/init
|
||||||
cd alpine-minirootfs
|
cd alpine-minirootfs
|
||||||
find . -print0 |
|
find . -print0 |
|
||||||
cpio --null --create --verbose --owner root:root --format=newc > "$ALPINE_INITRAMFS_IMAGE"
|
cpio --null --create --verbose --owner root:root --format=newc >"$ALPINE_INITRAMFS_IMAGE"
|
||||||
popd
|
popd
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -167,10 +167,9 @@ update_workloads() {
|
|||||||
WGET_RETRY_MAX=10
|
WGET_RETRY_MAX=10
|
||||||
wget_retry=0
|
wget_retry=0
|
||||||
|
|
||||||
until [ "$wget_retry" -ge "$WGET_RETRY_MAX" ]
|
until [ "$wget_retry" -ge "$WGET_RETRY_MAX" ]; do
|
||||||
do
|
|
||||||
time wget $CH_RELEASE_URL -O "$CH_RELEASE_NAME" && break
|
time wget $CH_RELEASE_URL -O "$CH_RELEASE_NAME" && break
|
||||||
wget_retry=$((wget_retry+1))
|
wget_retry=$((wget_retry + 1))
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ $wget_retry -ge "$WGET_RETRY_MAX" ]; then
|
if [ $wget_retry -ge "$WGET_RETRY_MAX" ]; then
|
||||||
@ -213,8 +212,8 @@ update_workloads() {
|
|||||||
SHARED_DIR="$WORKLOADS_DIR/shared_dir"
|
SHARED_DIR="$WORKLOADS_DIR/shared_dir"
|
||||||
if [ ! -d "$SHARED_DIR" ]; then
|
if [ ! -d "$SHARED_DIR" ]; then
|
||||||
mkdir -p $SHARED_DIR
|
mkdir -p $SHARED_DIR
|
||||||
echo "foo" > "$SHARED_DIR/file1"
|
echo "foo" >"$SHARED_DIR/file1"
|
||||||
echo "bar" > "$SHARED_DIR/file3" || exit 1
|
echo "bar" >"$SHARED_DIR/file3" || exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Checkout and build SPDK NVMe
|
# Checkout and build SPDK NVMe
|
||||||
@ -232,7 +231,6 @@ if [[ "$hypervisor" = "mshv" ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
# lock the workloads folder to avoid parallel updating by different containers
|
# lock the workloads folder to avoid parallel updating by different containers
|
||||||
(
|
(
|
||||||
echo "try to lock $WORKLOADS_DIR folder and update"
|
echo "try to lock $WORKLOADS_DIR folder and update"
|
||||||
@ -258,8 +256,8 @@ sudo bash -c "echo 10 > /sys/kernel/mm/ksm/sleep_millisecs"
|
|||||||
sudo bash -c "echo 1 > /sys/kernel/mm/ksm/run"
|
sudo bash -c "echo 1 > /sys/kernel/mm/ksm/run"
|
||||||
|
|
||||||
# Both test_vfio and ovs-dpdk rely on hugepages
|
# Both test_vfio and ovs-dpdk rely on hugepages
|
||||||
HUGEPAGESIZE=`grep Hugepagesize /proc/meminfo | awk '{print $2}'`
|
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||||
PAGE_NUM=`echo $((12288 * 1024 / $HUGEPAGESIZE))`
|
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||||
sudo chmod a+rwX /dev/hugepages
|
sudo chmod a+rwX /dev/hugepages
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ process_common_args "$@"
|
|||||||
# For now these values are default for kvm
|
# For now these values are default for kvm
|
||||||
test_features=""
|
test_features=""
|
||||||
|
|
||||||
if [ "$hypervisor" = "mshv" ] ; then
|
if [ "$hypervisor" = "mshv" ]; then
|
||||||
test_features="--features mshv"
|
test_features="--features mshv"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -66,8 +66,8 @@ fi
|
|||||||
cargo build --features mshv --all --release --target $BUILD_TARGET
|
cargo build --features mshv --all --release --target $BUILD_TARGET
|
||||||
|
|
||||||
# Test ovs-dpdk relies on hugepages
|
# Test ovs-dpdk relies on hugepages
|
||||||
HUGEPAGESIZE=`grep Hugepagesize /proc/meminfo | awk '{print $2}'`
|
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||||
PAGE_NUM=`echo $((12288 * 1024 / $HUGEPAGESIZE))`
|
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||||
sudo chmod a+rwX /dev/hugepages
|
sudo chmod a+rwX /dev/hugepages
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ process_common_args "$@"
|
|||||||
# For now these values are default for kvm
|
# For now these values are default for kvm
|
||||||
test_features=""
|
test_features=""
|
||||||
|
|
||||||
if [ "$hypervisor" = "mshv" ] ; then
|
if [ "$hypervisor" = "mshv" ]; then
|
||||||
test_features="--features mshv"
|
test_features="--features mshv"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ process_common_args "$@"
|
|||||||
# For now these values are default for kvm
|
# For now these values are default for kvm
|
||||||
test_features=""
|
test_features=""
|
||||||
|
|
||||||
if [ "$hypervisor" = "mshv" ] ; then
|
if [ "$hypervisor" = "mshv" ]; then
|
||||||
test_features="--features mshv"
|
test_features="--features mshv"
|
||||||
fi
|
fi
|
||||||
WIN_IMAGE_FILE="/root/workloads/windows-server-2022-amd64-2.raw"
|
WIN_IMAGE_FILE="/root/workloads/windows-server-2022-amd64-2.raw"
|
||||||
|
@ -12,7 +12,7 @@ process_common_args "$@"
|
|||||||
# For now these values are default for kvm
|
# For now these values are default for kvm
|
||||||
test_features=""
|
test_features=""
|
||||||
|
|
||||||
if [ "$hypervisor" = "mshv" ] ; then
|
if [ "$hypervisor" = "mshv" ]; then
|
||||||
test_features="--features mshv"
|
test_features="--features mshv"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -77,7 +77,7 @@ if [ ! -f "$ALPINE_INITRAMFS_IMAGE" ]; then
|
|||||||
pushd $WORKLOADS_DIR
|
pushd $WORKLOADS_DIR
|
||||||
mkdir alpine-minirootfs
|
mkdir alpine-minirootfs
|
||||||
tar xf "$ALPINE_MINIROOTFS_TARBALL" -C alpine-minirootfs
|
tar xf "$ALPINE_MINIROOTFS_TARBALL" -C alpine-minirootfs
|
||||||
cat > alpine-minirootfs/init <<-EOF
|
cat >alpine-minirootfs/init <<-EOF
|
||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
mount -t devtmpfs dev /dev
|
mount -t devtmpfs dev /dev
|
||||||
echo \$TEST_STRING > /dev/console
|
echo \$TEST_STRING > /dev/console
|
||||||
@ -86,7 +86,7 @@ if [ ! -f "$ALPINE_INITRAMFS_IMAGE" ]; then
|
|||||||
chmod +x alpine-minirootfs/init
|
chmod +x alpine-minirootfs/init
|
||||||
cd alpine-minirootfs
|
cd alpine-minirootfs
|
||||||
find . -print0 |
|
find . -print0 |
|
||||||
cpio --null --create --verbose --owner root:root --format=newc > "$ALPINE_INITRAMFS_IMAGE"
|
cpio --null --create --verbose --owner root:root --format=newc >"$ALPINE_INITRAMFS_IMAGE"
|
||||||
popd
|
popd
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -118,26 +118,25 @@ if [ ! -f "$VIRTIOFSD" ]; then
|
|||||||
popd
|
popd
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
BLK_IMAGE="$WORKLOADS_DIR/blk.img"
|
BLK_IMAGE="$WORKLOADS_DIR/blk.img"
|
||||||
MNT_DIR="mount_image"
|
MNT_DIR="mount_image"
|
||||||
if [ ! -f "$BLK_IMAGE" ]; then
|
if [ ! -f "$BLK_IMAGE" ]; then
|
||||||
pushd $WORKLOADS_DIR
|
pushd $WORKLOADS_DIR
|
||||||
fallocate -l 16M $BLK_IMAGE
|
fallocate -l 16M $BLK_IMAGE
|
||||||
mkfs.ext4 -j $BLK_IMAGE
|
mkfs.ext4 -j $BLK_IMAGE
|
||||||
mkdir $MNT_DIR
|
mkdir $MNT_DIR
|
||||||
sudo mount -t ext4 $BLK_IMAGE $MNT_DIR
|
sudo mount -t ext4 $BLK_IMAGE $MNT_DIR
|
||||||
sudo bash -c "echo bar > $MNT_DIR/foo" || exit 1
|
sudo bash -c "echo bar > $MNT_DIR/foo" || exit 1
|
||||||
sudo umount $BLK_IMAGE
|
sudo umount $BLK_IMAGE
|
||||||
rm -r $MNT_DIR
|
rm -r $MNT_DIR
|
||||||
popd
|
popd
|
||||||
fi
|
fi
|
||||||
|
|
||||||
SHARED_DIR="$WORKLOADS_DIR/shared_dir"
|
SHARED_DIR="$WORKLOADS_DIR/shared_dir"
|
||||||
if [ ! -d "$SHARED_DIR" ]; then
|
if [ ! -d "$SHARED_DIR" ]; then
|
||||||
mkdir -p $SHARED_DIR
|
mkdir -p $SHARED_DIR
|
||||||
echo "foo" > "$SHARED_DIR/file1"
|
echo "foo" >"$SHARED_DIR/file1"
|
||||||
echo "bar" > "$SHARED_DIR/file3" || exit 1
|
echo "bar" >"$SHARED_DIR/file3" || exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
VFIO_DIR="$WORKLOADS_DIR/vfio"
|
VFIO_DIR="$WORKLOADS_DIR/vfio"
|
||||||
@ -148,7 +147,7 @@ cp $FOCAL_OS_RAW_IMAGE $VFIO_DIR
|
|||||||
cp $FW $VFIO_DIR
|
cp $FW $VFIO_DIR
|
||||||
cp $VMLINUX_IMAGE $VFIO_DIR || exit 1
|
cp $VMLINUX_IMAGE $VFIO_DIR || exit 1
|
||||||
|
|
||||||
cargo build --features mshv --all --release --target $BUILD_TARGET
|
cargo build --features mshv --all --release --target $BUILD_TARGET
|
||||||
|
|
||||||
# We always copy a fresh version of our binary for our L2 guest.
|
# We always copy a fresh version of our binary for our L2 guest.
|
||||||
cp target/$BUILD_TARGET/release/cloud-hypervisor $VFIO_DIR
|
cp target/$BUILD_TARGET/release/cloud-hypervisor $VFIO_DIR
|
||||||
@ -161,8 +160,8 @@ sudo bash -c "echo 10 > /sys/kernel/mm/ksm/sleep_millisecs"
|
|||||||
sudo bash -c "echo 1 > /sys/kernel/mm/ksm/run"
|
sudo bash -c "echo 1 > /sys/kernel/mm/ksm/run"
|
||||||
|
|
||||||
# Both test_vfio, ovs-dpdk and vDPA tests rely on hugepages
|
# Both test_vfio, ovs-dpdk and vDPA tests rely on hugepages
|
||||||
HUGEPAGESIZE=`grep Hugepagesize /proc/meminfo | awk '{print $2}'`
|
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||||
PAGE_NUM=`echo $((12288 * 1024 / $HUGEPAGESIZE))`
|
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||||
sudo chmod a+rwX /dev/hugepages
|
sudo chmod a+rwX /dev/hugepages
|
||||||
|
|
||||||
@ -186,7 +185,7 @@ fi
|
|||||||
|
|
||||||
# Run tests on dbus_api
|
# Run tests on dbus_api
|
||||||
if [ $RES -eq 0 ]; then
|
if [ $RES -eq 0 ]; then
|
||||||
cargo build --features "mshv,dbus_api" --all --release --target $BUILD_TARGET
|
cargo build --features "mshv,dbus_api" --all --release --target $BUILD_TARGET
|
||||||
export RUST_BACKTRACE=1
|
export RUST_BACKTRACE=1
|
||||||
# integration tests now do not reply on build feature "dbus_api"
|
# integration tests now do not reply on build feature "dbus_api"
|
||||||
time cargo test $test_features "dbus_api::$test_filter" -- ${test_binary_args[*]}
|
time cargo test $test_features "dbus_api::$test_filter" -- ${test_binary_args[*]}
|
||||||
|
@ -17,7 +17,7 @@ build_fio() {
|
|||||||
if [ ! -f "$FIO_DIR/.built" ]; then
|
if [ ! -f "$FIO_DIR/.built" ]; then
|
||||||
pushd $FIO_DIR
|
pushd $FIO_DIR
|
||||||
./configure
|
./configure
|
||||||
make -j `nproc`
|
make -j $(nproc)
|
||||||
cp fio "$WORKLOADS_DIR/fio"
|
cp fio "$WORKLOADS_DIR/fio"
|
||||||
touch .built
|
touch .built
|
||||||
popd
|
popd
|
||||||
@ -29,9 +29,9 @@ process_common_args "$@"
|
|||||||
cp scripts/sha1sums-${TEST_ARCH} $WORKLOADS_DIR
|
cp scripts/sha1sums-${TEST_ARCH} $WORKLOADS_DIR
|
||||||
|
|
||||||
if [ ${TEST_ARCH} == "aarch64" ]; then
|
if [ ${TEST_ARCH} == "aarch64" ]; then
|
||||||
FOCAL_OS_IMAGE_NAME="focal-server-cloudimg-arm64-custom-20210929-0.qcow2"
|
FOCAL_OS_IMAGE_NAME="focal-server-cloudimg-arm64-custom-20210929-0.qcow2"
|
||||||
else
|
else
|
||||||
FOCAL_OS_IMAGE_NAME="focal-server-cloudimg-amd64-custom-20210609-0.qcow2"
|
FOCAL_OS_IMAGE_NAME="focal-server-cloudimg-amd64-custom-20210609-0.qcow2"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
FOCAL_OS_IMAGE_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_IMAGE_NAME"
|
FOCAL_OS_IMAGE_URL="https://cloud-hypervisor.azureedge.net/$FOCAL_OS_IMAGE_NAME"
|
||||||
@ -90,8 +90,8 @@ fi
|
|||||||
cargo build --features mshv --all --release --target $BUILD_TARGET
|
cargo build --features mshv --all --release --target $BUILD_TARGET
|
||||||
|
|
||||||
# setup hugepages
|
# setup hugepages
|
||||||
HUGEPAGESIZE=`grep Hugepagesize /proc/meminfo | awk '{print $2}'`
|
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||||
PAGE_NUM=`echo $((12288 * 1024 / $HUGEPAGESIZE))`
|
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||||
sudo chmod a+rwX /dev/hugepages
|
sudo chmod a+rwX /dev/hugepages
|
||||||
|
|
||||||
@ -102,11 +102,11 @@ fi
|
|||||||
# Ensure that git commands can be run in this directory (for metrics report)
|
# Ensure that git commands can be run in this directory (for metrics report)
|
||||||
git config --global --add safe.directory $PWD
|
git config --global --add safe.directory $PWD
|
||||||
|
|
||||||
RUST_BACKTRACE_VALUE=`echo $RUST_BACKTRACE`
|
RUST_BACKTRACE_VALUE=$(echo $RUST_BACKTRACE)
|
||||||
if [ -z $RUST_BACKTRACE_VALUE ];then
|
if [ -z $RUST_BACKTRACE_VALUE ]; then
|
||||||
export RUST_BACKTRACE=1
|
export RUST_BACKTRACE=1
|
||||||
else
|
else
|
||||||
echo "RUST_BACKTRACE is set to: $RUST_BACKTRACE_VALUE"
|
echo "RUST_BACKTRACE is set to: $RUST_BACKTRACE_VALUE"
|
||||||
fi
|
fi
|
||||||
time target/$BUILD_TARGET/release/performance-metrics ${test_binary_args[*]}
|
time target/$BUILD_TARGET/release/performance-metrics ${test_binary_args[*]}
|
||||||
RES=$?
|
RES=$?
|
||||||
|
@ -54,12 +54,12 @@ build_custom_linux() {
|
|||||||
cp $SRCDIR/resources/linux-config-${ARCH} $LINUX_CUSTOM_DIR/.config
|
cp $SRCDIR/resources/linux-config-${ARCH} $LINUX_CUSTOM_DIR/.config
|
||||||
|
|
||||||
pushd $LINUX_CUSTOM_DIR
|
pushd $LINUX_CUSTOM_DIR
|
||||||
make -j `nproc`
|
make -j $(nproc)
|
||||||
if [ ${ARCH} == "x86_64" ]; then
|
if [ ${ARCH} == "x86_64" ]; then
|
||||||
cp vmlinux "$WORKLOADS_DIR/" || exit 1
|
cp vmlinux "$WORKLOADS_DIR/" || exit 1
|
||||||
elif [ ${ARCH} == "aarch64" ]; then
|
elif [ ${ARCH} == "aarch64" ]; then
|
||||||
cp arch/arm64/boot/Image "$WORKLOADS_DIR/" || exit 1
|
cp arch/arm64/boot/Image "$WORKLOADS_DIR/" || exit 1
|
||||||
cp arch/arm64/boot/Image.gz "$WORKLOADS_DIR/" || exit 1
|
cp arch/arm64/boot/Image.gz "$WORKLOADS_DIR/" || exit 1
|
||||||
fi
|
fi
|
||||||
popd
|
popd
|
||||||
}
|
}
|
||||||
@ -80,28 +80,31 @@ cmd_help() {
|
|||||||
|
|
||||||
process_common_args() {
|
process_common_args() {
|
||||||
while [ $# -gt 0 ]; do
|
while [ $# -gt 0 ]; do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
"-h"|"--help") { cmd_help; exit 1; } ;;
|
"-h" | "--help") {
|
||||||
"--hypervisor")
|
cmd_help
|
||||||
shift
|
exit 1
|
||||||
hypervisor="$1"
|
} ;;
|
||||||
;;
|
"--hypervisor")
|
||||||
"--test-filter")
|
shift
|
||||||
shift
|
hypervisor="$1"
|
||||||
test_filter="$1"
|
;;
|
||||||
;;
|
"--test-filter")
|
||||||
"--") {
|
shift
|
||||||
shift
|
test_filter="$1"
|
||||||
break
|
;;
|
||||||
} ;;
|
"--") {
|
||||||
*)
|
shift
|
||||||
echo "Unknown test scripts argument: $1. Please use '-- --help' for help."
|
break
|
||||||
exit
|
} ;;
|
||||||
;;
|
*)
|
||||||
esac
|
echo "Unknown test scripts argument: $1. Please use '-- --help' for help."
|
||||||
shift
|
exit
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
shift
|
||||||
done
|
done
|
||||||
if [[ ! ("$hypervisor" = "kvm" || "$hypervisor" = "mshv") ]]; then
|
if [[ ! ("$hypervisor" = "kvm" || "$hypervisor" = "mshv") ]]; then
|
||||||
die "Hypervisor value must be kvm or mshv"
|
die "Hypervisor value must be kvm or mshv"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -112,8 +115,8 @@ download_hypervisor_fw() {
|
|||||||
if [ -n "$AUTH_DOWNLOAD_TOKEN" ]; then
|
if [ -n "$AUTH_DOWNLOAD_TOKEN" ]; then
|
||||||
echo "Using authenticated download from GitHub"
|
echo "Using authenticated download from GitHub"
|
||||||
FW_URL=$(curl --silent https://api.github.com/repos/cloud-hypervisor/rust-hypervisor-firmware/releases/latest \
|
FW_URL=$(curl --silent https://api.github.com/repos/cloud-hypervisor/rust-hypervisor-firmware/releases/latest \
|
||||||
--header "Authorization: Token $AUTH_DOWNLOAD_TOKEN" \
|
--header "Authorization: Token $AUTH_DOWNLOAD_TOKEN" \
|
||||||
--header "X-GitHub-Api-Version: 2022-11-28" | grep "browser_download_url" | grep -o 'https://.*[^ "]')
|
--header "X-GitHub-Api-Version: 2022-11-28" | grep "browser_download_url" | grep -o 'https://.*[^ "]')
|
||||||
else
|
else
|
||||||
echo "Using anonymous download from GitHub"
|
echo "Using anonymous download from GitHub"
|
||||||
FW_URL=$(curl --silent https://api.github.com/repos/cloud-hypervisor/rust-hypervisor-firmware/releases/latest | grep "browser_download_url" | grep -o 'https://.*[^ "]')
|
FW_URL=$(curl --silent https://api.github.com/repos/cloud-hypervisor/rust-hypervisor-firmware/releases/latest | grep "browser_download_url" | grep -o 'https://.*[^ "]')
|
||||||
|
Loading…
Reference in New Issue
Block a user