mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2024-12-22 05:35:20 +00:00
scripts: fix shell scripts format according to shfmt
Unify coding style of shell scripts in accordance with shfmt checks. Signed-off-by: Ruslan Mstoi <ruslan.mstoi@intel.com>
This commit is contained in:
parent
612a8dfb1b
commit
318caeb9d8
@ -8,8 +8,6 @@
|
||||
a message about the compatibility of the image.
|
||||
'
|
||||
|
||||
|
||||
|
||||
usage="$(basename "$0") [-h] -f -w -- program to check Cloud Hypervisor compatible image
|
||||
|
||||
where:
|
||||
@ -18,12 +16,11 @@ where:
|
||||
-w directory to be used for temporary files"
|
||||
|
||||
function check_command {
|
||||
if ! command -v $1 &> /dev/null
|
||||
then
|
||||
if ! command -v $1 &>/dev/null; then
|
||||
echo "Command $1 could not be found"
|
||||
exit 1
|
||||
fi
|
||||
};
|
||||
}
|
||||
|
||||
function check_if_root {
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
@ -31,24 +28,29 @@ function check_if_root {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
check_if_root
|
||||
working_dir=""
|
||||
while getopts ':hf:w:' option; do
|
||||
case "$option" in
|
||||
h) echo "$usage"
|
||||
h)
|
||||
echo "$usage"
|
||||
exit
|
||||
;;
|
||||
f) file_name=$OPTARG
|
||||
f)
|
||||
file_name=$OPTARG
|
||||
;;
|
||||
w) working_dir=$OPTARG
|
||||
w)
|
||||
working_dir=$OPTARG
|
||||
;;
|
||||
:) printf "missing argument for -%s\n" "$OPTARG" >&2
|
||||
:)
|
||||
printf "missing argument for -%s\n" "$OPTARG" >&2
|
||||
echo "$usage" >&2
|
||||
exit 1
|
||||
;;
|
||||
\?) printf "illegal option: -%s\n" "$OPTARG" >&2
|
||||
\?)
|
||||
printf "illegal option: -%s\n" "$OPTARG" >&2
|
||||
echo "$usage" >&2
|
||||
exit 1
|
||||
;;
|
||||
@ -66,15 +68,15 @@ if [[ ! -f ${file_name} ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
file_abs_path=`readlink -m ${file_name}`
|
||||
file_abs_path=$(readlink -m ${file_name})
|
||||
if [[ "${working_dir}" != "" && ! -d "${working_dir}" ]]; then
|
||||
echo "Directory ${working_dir} does not exist"
|
||||
exit 1
|
||||
elif [[ "${working_dir}" == "" ]]; then
|
||||
working_dir=`mktemp -d`
|
||||
working_dir=$(mktemp -d)
|
||||
tmp_created=1
|
||||
else
|
||||
working_dir=`readlink -m ${working_dir}`
|
||||
working_dir=$(readlink -m ${working_dir})
|
||||
fi
|
||||
|
||||
filename="${file_name%.*}"
|
||||
@ -82,7 +84,6 @@ dest_file=${working_dir}/${filename}.raw
|
||||
image_type=$(qemu-img info ${file_abs_path} | grep 'file format:' | awk '{ print $3 }')
|
||||
echo "Image type detected as ${image_type}"
|
||||
|
||||
|
||||
if [[ "${image_type}" == "raw" ]]; then
|
||||
dest_file=${file_abs_path}
|
||||
elif [[ "$image_type" == "qcow2" ]]; then
|
||||
@ -106,10 +107,10 @@ check_command blkid
|
||||
part_type=$(blkid -o value -s PTTYPE ${dest_file})
|
||||
|
||||
check_command partx
|
||||
nr_partitions=`partx -g ${dest_file} | wc -l`
|
||||
nr_partitions=$(partx -g ${dest_file} | wc -l)
|
||||
|
||||
check_command fdisk
|
||||
out=`fdisk -l ${dest_file} --bytes | grep -i -A ${nr_partitions} 'Device' | tail -n +2`
|
||||
out=$(fdisk -l ${dest_file} --bytes | grep -i -A ${nr_partitions} 'Device' | tail -n +2)
|
||||
|
||||
IFS='
|
||||
'
|
||||
@ -125,12 +126,10 @@ IFS=' '
|
||||
i=0
|
||||
ROWS=${#lines[@]}
|
||||
|
||||
for line in "${lines[@]}";
|
||||
do
|
||||
for line in "${lines[@]}"; do
|
||||
j=0
|
||||
read -a str_arr <<<"$line"
|
||||
for val in "${str_arr[@]}";
|
||||
do
|
||||
for val in "${str_arr[@]}"; do
|
||||
if [[ "$val" != "*" ]]; then
|
||||
partitions[$i, $j]=$val
|
||||
j=$((j + 1))
|
||||
@ -145,7 +144,7 @@ FILE_SYS_INDEX2=$((COLUMNS-1))
|
||||
FILE_SYS_INDEX1=$((COLUMNS - 2))
|
||||
DEVICE_INDEX=0
|
||||
# Here we have all the partition info now lets mount and analyze the contents
|
||||
for ((i=0;i<ROWS;i++)) do
|
||||
for ((i = 0; i < ROWS; i++)); do
|
||||
if [[ "$part_type" == "gpt" && "${partitions[$i, ${FILE_SYS_INDEX1}]}" == "Linux" && "${partitions[$i, ${FILE_SYS_INDEX2}]}" == "filesystem" ]]; then
|
||||
echo "The image has GPT partitions"
|
||||
MOUNT_ROW=$i
|
||||
@ -177,7 +176,7 @@ fi
|
||||
#check VIRTIO
|
||||
HAS_VIRTIO=1
|
||||
for conf_file in ${CONFIG_DIR}config*; do
|
||||
out=`grep -E "CONFIG_VIRTIO=y|CONFIG_VIRTIO_BLK=y|CONFIG_VIRTIO_BLK=m" ${conf_file} | wc -l`
|
||||
out=$(grep -E "CONFIG_VIRTIO=y|CONFIG_VIRTIO_BLK=y|CONFIG_VIRTIO_BLK=m" ${conf_file} | wc -l)
|
||||
if [[ "$out" != "2" ]]; then
|
||||
echo "VIRTIO not found"
|
||||
HAS_VIRTIO=0
|
||||
|
@ -28,14 +28,14 @@ build_edk2() {
|
||||
checkout_repo "$EDK2_PLAT_DIR" "$EDK2_PLAT_REPO" master "8227e9e9f6a8aefbd772b40138f835121ccb2307"
|
||||
checkout_repo "$ACPICA_DIR" "$ACPICA_REPO" master "b9c69f81a05c45611c91ea9cbce8756078d76233"
|
||||
|
||||
if [[ ! -f "$EDK2_DIR/.built" || \
|
||||
! -f "$EDK2_PLAT_DIR/.built" || \
|
||||
if [[ ! -f "$EDK2_DIR/.built" ||
|
||||
! -f "$EDK2_PLAT_DIR/.built" ||
|
||||
! -f "$ACPICA_DIR/.built" ]]; then
|
||||
pushd "$EDK2_BUILD_DIR"
|
||||
# Build
|
||||
make -C acpica -j `nproc`
|
||||
make -C acpica -j $(nproc)
|
||||
source edk2/edksetup.sh
|
||||
make -C edk2/BaseTools -j `nproc`
|
||||
make -C edk2/BaseTools -j $(nproc)
|
||||
build -a AARCH64 -t GCC5 -p ArmVirtPkg/ArmVirtCloudHv.dsc -b RELEASE -n 0
|
||||
cp Build/ArmVirtCloudHv-AARCH64/RELEASE_GCC5/FV/CLOUDHV_EFI.fd "$WORKLOADS_DIR"
|
||||
touch "$EDK2_DIR"/.built
|
||||
@ -44,4 +44,3 @@ build_edk2() {
|
||||
popd
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -6,4 +6,3 @@ mkdosfs -n CIDATA -C /tmp/ubuntu-cloudinit.img 8192
|
||||
mcopy -oi /tmp/ubuntu-cloudinit.img -s test_data/cloud-init/ubuntu/local/user-data ::
|
||||
mcopy -oi /tmp/ubuntu-cloudinit.img -s test_data/cloud-init/ubuntu/local/meta-data ::
|
||||
mcopy -oi /tmp/ubuntu-cloudinit.img -s test_data/cloud-init/ubuntu/local/network-config ::
|
||||
|
||||
|
@ -4,7 +4,7 @@ set -x
|
||||
sudo apt install -y libncurses-dev gawk flex bison openssl libssl-dev dkms libelf-dev libudev-dev libpci-dev libiberty-dev autoconf git make dpkg-dev libmnl-dev pkg-config iproute2
|
||||
sudo sed -i -- 's/# deb-src/deb-src/g' /etc/apt/sources.list
|
||||
sudo apt update
|
||||
apt-get source linux-image-unsigned-`uname -r`
|
||||
apt-get source linux-image-unsigned-$(uname -r)
|
||||
pushd linux-azure*/drivers/vdpa/vdpa_sim/
|
||||
cat <<'EOF' >Makefile
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
@ -12,8 +12,8 @@ obj-m += vdpa_sim.o
|
||||
obj-m += vdpa_sim_net.o
|
||||
obj-m += vdpa_sim_blk.o
|
||||
EOF
|
||||
make -C /lib/modules/`uname -r`/build M=$PWD
|
||||
sudo make -C /lib/modules/`uname -r`/build M=$PWD modules_install
|
||||
make -C /lib/modules/$(uname -r)/build M=$PWD
|
||||
sudo make -C /lib/modules/$(uname -r)/build M=$PWD modules_install
|
||||
popd
|
||||
sudo depmod -a
|
||||
sudo modprobe vdpa
|
||||
|
@ -21,7 +21,7 @@ build_spdk_nvme() {
|
||||
./scripts/pkgdep.sh
|
||||
./configure --with-vfio-user
|
||||
chmod +x /usr/local/lib/python3.10/dist-packages/ninja/data/bin/ninja
|
||||
make -j `nproc` || exit 1
|
||||
make -j $(nproc) || exit 1
|
||||
touch .built
|
||||
popd
|
||||
fi
|
||||
@ -167,8 +167,7 @@ update_workloads() {
|
||||
WGET_RETRY_MAX=10
|
||||
wget_retry=0
|
||||
|
||||
until [ "$wget_retry" -ge "$WGET_RETRY_MAX" ]
|
||||
do
|
||||
until [ "$wget_retry" -ge "$WGET_RETRY_MAX" ]; do
|
||||
time wget $CH_RELEASE_URL -O "$CH_RELEASE_NAME" && break
|
||||
wget_retry=$((wget_retry + 1))
|
||||
done
|
||||
@ -232,7 +231,6 @@ if [[ "$hypervisor" = "mshv" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# lock the workloads folder to avoid parallel updating by different containers
|
||||
(
|
||||
echo "try to lock $WORKLOADS_DIR folder and update"
|
||||
@ -258,8 +256,8 @@ sudo bash -c "echo 10 > /sys/kernel/mm/ksm/sleep_millisecs"
|
||||
sudo bash -c "echo 1 > /sys/kernel/mm/ksm/run"
|
||||
|
||||
# Both test_vfio and ovs-dpdk rely on hugepages
|
||||
HUGEPAGESIZE=`grep Hugepagesize /proc/meminfo | awk '{print $2}'`
|
||||
PAGE_NUM=`echo $((12288 * 1024 / $HUGEPAGESIZE))`
|
||||
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||
sudo chmod a+rwX /dev/hugepages
|
||||
|
||||
|
@ -66,8 +66,8 @@ fi
|
||||
cargo build --features mshv --all --release --target $BUILD_TARGET
|
||||
|
||||
# Test ovs-dpdk relies on hugepages
|
||||
HUGEPAGESIZE=`grep Hugepagesize /proc/meminfo | awk '{print $2}'`
|
||||
PAGE_NUM=`echo $((12288 * 1024 / $HUGEPAGESIZE))`
|
||||
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||
sudo chmod a+rwX /dev/hugepages
|
||||
|
||||
|
@ -118,7 +118,6 @@ if [ ! -f "$VIRTIOFSD" ]; then
|
||||
popd
|
||||
fi
|
||||
|
||||
|
||||
BLK_IMAGE="$WORKLOADS_DIR/blk.img"
|
||||
MNT_DIR="mount_image"
|
||||
if [ ! -f "$BLK_IMAGE" ]; then
|
||||
@ -161,8 +160,8 @@ sudo bash -c "echo 10 > /sys/kernel/mm/ksm/sleep_millisecs"
|
||||
sudo bash -c "echo 1 > /sys/kernel/mm/ksm/run"
|
||||
|
||||
# Both test_vfio, ovs-dpdk and vDPA tests rely on hugepages
|
||||
HUGEPAGESIZE=`grep Hugepagesize /proc/meminfo | awk '{print $2}'`
|
||||
PAGE_NUM=`echo $((12288 * 1024 / $HUGEPAGESIZE))`
|
||||
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||
sudo chmod a+rwX /dev/hugepages
|
||||
|
||||
|
@ -17,7 +17,7 @@ build_fio() {
|
||||
if [ ! -f "$FIO_DIR/.built" ]; then
|
||||
pushd $FIO_DIR
|
||||
./configure
|
||||
make -j `nproc`
|
||||
make -j $(nproc)
|
||||
cp fio "$WORKLOADS_DIR/fio"
|
||||
touch .built
|
||||
popd
|
||||
@ -90,8 +90,8 @@ fi
|
||||
cargo build --features mshv --all --release --target $BUILD_TARGET
|
||||
|
||||
# setup hugepages
|
||||
HUGEPAGESIZE=`grep Hugepagesize /proc/meminfo | awk '{print $2}'`
|
||||
PAGE_NUM=`echo $((12288 * 1024 / $HUGEPAGESIZE))`
|
||||
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||
sudo chmod a+rwX /dev/hugepages
|
||||
|
||||
@ -102,7 +102,7 @@ fi
|
||||
# Ensure that git commands can be run in this directory (for metrics report)
|
||||
git config --global --add safe.directory $PWD
|
||||
|
||||
RUST_BACKTRACE_VALUE=`echo $RUST_BACKTRACE`
|
||||
RUST_BACKTRACE_VALUE=$(echo $RUST_BACKTRACE)
|
||||
if [ -z $RUST_BACKTRACE_VALUE ]; then
|
||||
export RUST_BACKTRACE=1
|
||||
else
|
||||
|
@ -54,7 +54,7 @@ build_custom_linux() {
|
||||
cp $SRCDIR/resources/linux-config-${ARCH} $LINUX_CUSTOM_DIR/.config
|
||||
|
||||
pushd $LINUX_CUSTOM_DIR
|
||||
make -j `nproc`
|
||||
make -j $(nproc)
|
||||
if [ ${ARCH} == "x86_64" ]; then
|
||||
cp vmlinux "$WORKLOADS_DIR/" || exit 1
|
||||
elif [ ${ARCH} == "aarch64" ]; then
|
||||
@ -81,7 +81,10 @@ cmd_help() {
|
||||
process_common_args() {
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
"-h"|"--help") { cmd_help; exit 1; } ;;
|
||||
"-h" | "--help") {
|
||||
cmd_help
|
||||
exit 1
|
||||
} ;;
|
||||
"--hypervisor")
|
||||
shift
|
||||
hypervisor="$1"
|
||||
|
Loading…
Reference in New Issue
Block a user