mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2024-12-22 05:35:20 +00:00
scripts: fix shell scripts format according to shfmt
Unify coding style of shell scripts in accordance with shfmt checks. Signed-off-by: Ruslan Mstoi <ruslan.mstoi@intel.com>
This commit is contained in:
parent
612a8dfb1b
commit
318caeb9d8
@ -8,8 +8,6 @@
|
||||
a message about the compatibility of the image.
|
||||
'
|
||||
|
||||
|
||||
|
||||
usage="$(basename "$0") [-h] -f -w -- program to check Cloud Hypervisor compatible image
|
||||
|
||||
where:
|
||||
@ -18,12 +16,11 @@ where:
|
||||
-w directory to be used for temporary files"
|
||||
|
||||
function check_command {
|
||||
if ! command -v $1 &> /dev/null
|
||||
then
|
||||
if ! command -v $1 &>/dev/null; then
|
||||
echo "Command $1 could not be found"
|
||||
exit 1
|
||||
fi
|
||||
};
|
||||
}
|
||||
|
||||
function check_if_root {
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
@ -31,24 +28,29 @@ function check_if_root {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
check_if_root
|
||||
working_dir=""
|
||||
while getopts ':hf:w:' option; do
|
||||
case "$option" in
|
||||
h) echo "$usage"
|
||||
h)
|
||||
echo "$usage"
|
||||
exit
|
||||
;;
|
||||
f) file_name=$OPTARG
|
||||
f)
|
||||
file_name=$OPTARG
|
||||
;;
|
||||
w) working_dir=$OPTARG
|
||||
w)
|
||||
working_dir=$OPTARG
|
||||
;;
|
||||
:) printf "missing argument for -%s\n" "$OPTARG" >&2
|
||||
:)
|
||||
printf "missing argument for -%s\n" "$OPTARG" >&2
|
||||
echo "$usage" >&2
|
||||
exit 1
|
||||
;;
|
||||
\?) printf "illegal option: -%s\n" "$OPTARG" >&2
|
||||
\?)
|
||||
printf "illegal option: -%s\n" "$OPTARG" >&2
|
||||
echo "$usage" >&2
|
||||
exit 1
|
||||
;;
|
||||
@ -66,15 +68,15 @@ if [[ ! -f ${file_name} ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
file_abs_path=`readlink -m ${file_name}`
|
||||
file_abs_path=$(readlink -m ${file_name})
|
||||
if [[ "${working_dir}" != "" && ! -d "${working_dir}" ]]; then
|
||||
echo "Directory ${working_dir} does not exist"
|
||||
exit 1
|
||||
elif [[ "${working_dir}" == "" ]]; then
|
||||
working_dir=`mktemp -d`
|
||||
working_dir=$(mktemp -d)
|
||||
tmp_created=1
|
||||
else
|
||||
working_dir=`readlink -m ${working_dir}`
|
||||
working_dir=$(readlink -m ${working_dir})
|
||||
fi
|
||||
|
||||
filename="${file_name%.*}"
|
||||
@ -82,11 +84,10 @@ dest_file=${working_dir}/${filename}.raw
|
||||
image_type=$(qemu-img info ${file_abs_path} | grep 'file format:' | awk '{ print $3 }')
|
||||
echo "Image type detected as ${image_type}"
|
||||
|
||||
|
||||
if [[ "${image_type}" == "raw" ]]; then
|
||||
dest_file=${file_abs_path}
|
||||
elif [[ "$image_type" == "qcow2" ]]; then
|
||||
if lsmod | grep "nbd" &> /dev/null ; then
|
||||
if lsmod | grep "nbd" &>/dev/null; then
|
||||
echo "Module nbd is loaded!"
|
||||
else
|
||||
echo "Module nbd is not loaded. Trying to load the module"
|
||||
@ -106,18 +107,18 @@ check_command blkid
|
||||
part_type=$(blkid -o value -s PTTYPE ${dest_file})
|
||||
|
||||
check_command partx
|
||||
nr_partitions=`partx -g ${dest_file} | wc -l`
|
||||
nr_partitions=$(partx -g ${dest_file} | wc -l)
|
||||
|
||||
check_command fdisk
|
||||
out=`fdisk -l ${dest_file} --bytes | grep -i -A ${nr_partitions} 'Device' | tail -n +2`
|
||||
out=$(fdisk -l ${dest_file} --bytes | grep -i -A ${nr_partitions} 'Device' | tail -n +2)
|
||||
|
||||
IFS='
|
||||
'
|
||||
i=0
|
||||
declare -A lines
|
||||
for x in $out ; do
|
||||
for x in $out; do
|
||||
lines[$i]=$x
|
||||
i=$((i+1))
|
||||
i=$((i + 1))
|
||||
done
|
||||
|
||||
declare -A partitions
|
||||
@ -125,40 +126,38 @@ IFS=' '
|
||||
i=0
|
||||
ROWS=${#lines[@]}
|
||||
|
||||
for line in "${lines[@]}";
|
||||
do
|
||||
for line in "${lines[@]}"; do
|
||||
j=0
|
||||
read -a str_arr <<< "$line"
|
||||
for val in "${str_arr[@]}";
|
||||
do
|
||||
read -a str_arr <<<"$line"
|
||||
for val in "${str_arr[@]}"; do
|
||||
if [[ "$val" != "*" ]]; then
|
||||
partitions[$i,$j]=$val
|
||||
j=$((j+1))
|
||||
partitions[$i, $j]=$val
|
||||
j=$((j + 1))
|
||||
fi
|
||||
done
|
||||
i=$((i+1))
|
||||
i=$((i + 1))
|
||||
done
|
||||
|
||||
COLUMNS=$j
|
||||
START_ADDRESS_INDEX=1
|
||||
FILE_SYS_INDEX2=$((COLUMNS-1))
|
||||
FILE_SYS_INDEX1=$((COLUMNS-2))
|
||||
FILE_SYS_INDEX2=$((COLUMNS - 1))
|
||||
FILE_SYS_INDEX1=$((COLUMNS - 2))
|
||||
DEVICE_INDEX=0
|
||||
# Here we have all the partition info now lets mount and analyze the contents
|
||||
for ((i=0;i<ROWS;i++)) do
|
||||
if [[ "$part_type" == "gpt" && "${partitions[$i,${FILE_SYS_INDEX1}]}" == "Linux" && "${partitions[$i,${FILE_SYS_INDEX2}]}" == "filesystem" ]]; then
|
||||
for ((i = 0; i < ROWS; i++)); do
|
||||
if [[ "$part_type" == "gpt" && "${partitions[$i, ${FILE_SYS_INDEX1}]}" == "Linux" && "${partitions[$i, ${FILE_SYS_INDEX2}]}" == "filesystem" ]]; then
|
||||
echo "The image has GPT partitions"
|
||||
MOUNT_ROW=$i
|
||||
break
|
||||
elif [[ "$part_type" == "dos" && "${partitions[$i,${FILE_SYS_INDEX1}]}" == "Linux" && "${partitions[$i,${FILE_SYS_INDEX2}]}" == "" ]]; then
|
||||
elif [[ "$part_type" == "dos" && "${partitions[$i, ${FILE_SYS_INDEX1}]}" == "Linux" && "${partitions[$i, ${FILE_SYS_INDEX2}]}" == "" ]]; then
|
||||
echo "The image has DOS partitions"
|
||||
MOUNT_ROW=$i
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
start_address=${partitions[${MOUNT_ROW},${START_ADDRESS_INDEX}]}
|
||||
offset=$((start_address*512))
|
||||
start_address=${partitions[${MOUNT_ROW}, ${START_ADDRESS_INDEX}]}
|
||||
offset=$((start_address * 512))
|
||||
|
||||
MOUNT_DIR=/mnt/clh-img-check/
|
||||
rm -rf ${MOUNT_DIR}
|
||||
@ -166,7 +165,7 @@ mkdir ${MOUNT_DIR}
|
||||
if [[ "${image_type}" == "raw" ]]; then
|
||||
mount -o ro,loop,offset=$offset ${dest_file} ${MOUNT_DIR}
|
||||
elif [[ "${image_type}" == "qcow2" ]]; then
|
||||
mount -o ro ${partitions[${MOUNT_ROW},${DEVICE_INDEX}]} ${MOUNT_DIR}
|
||||
mount -o ro ${partitions[${MOUNT_ROW}, ${DEVICE_INDEX}]} ${MOUNT_DIR}
|
||||
fi
|
||||
|
||||
CONFIG_DIR=${MOUNT_DIR}boot/
|
||||
@ -177,7 +176,7 @@ fi
|
||||
#check VIRTIO
|
||||
HAS_VIRTIO=1
|
||||
for conf_file in ${CONFIG_DIR}config*; do
|
||||
out=`grep -E "CONFIG_VIRTIO=y|CONFIG_VIRTIO_BLK=y|CONFIG_VIRTIO_BLK=m" ${conf_file} | wc -l`
|
||||
out=$(grep -E "CONFIG_VIRTIO=y|CONFIG_VIRTIO_BLK=y|CONFIG_VIRTIO_BLK=m" ${conf_file} | wc -l)
|
||||
if [[ "$out" != "2" ]]; then
|
||||
echo "VIRTIO not found"
|
||||
HAS_VIRTIO=0
|
||||
@ -191,8 +190,8 @@ if [[ "${tmp_created}" == "1" ]]; then
|
||||
rm -rf ${working_dir}
|
||||
fi
|
||||
|
||||
if [[ "${image_type}" == "qcow2" ]];then
|
||||
qemu-nbd --disconnect ${dest_file} > /dev/null
|
||||
if [[ "${image_type}" == "qcow2" ]]; then
|
||||
qemu-nbd --disconnect ${dest_file} >/dev/null
|
||||
fi
|
||||
|
||||
result=""
|
||||
@ -205,7 +204,7 @@ if [[ "${HAS_VIRTIO}" == "0" ]]; then
|
||||
fi
|
||||
result="$result VirtIO module not found in the image"
|
||||
fi
|
||||
if [[ "$result" == "" ]];then
|
||||
if [[ "$result" == "" ]]; then
|
||||
echo "No incompatibilities found"
|
||||
else
|
||||
echo "$result"
|
||||
|
@ -28,14 +28,14 @@ build_edk2() {
|
||||
checkout_repo "$EDK2_PLAT_DIR" "$EDK2_PLAT_REPO" master "8227e9e9f6a8aefbd772b40138f835121ccb2307"
|
||||
checkout_repo "$ACPICA_DIR" "$ACPICA_REPO" master "b9c69f81a05c45611c91ea9cbce8756078d76233"
|
||||
|
||||
if [[ ! -f "$EDK2_DIR/.built" || \
|
||||
! -f "$EDK2_PLAT_DIR/.built" || \
|
||||
if [[ ! -f "$EDK2_DIR/.built" ||
|
||||
! -f "$EDK2_PLAT_DIR/.built" ||
|
||||
! -f "$ACPICA_DIR/.built" ]]; then
|
||||
pushd "$EDK2_BUILD_DIR"
|
||||
# Build
|
||||
make -C acpica -j `nproc`
|
||||
make -C acpica -j $(nproc)
|
||||
source edk2/edksetup.sh
|
||||
make -C edk2/BaseTools -j `nproc`
|
||||
make -C edk2/BaseTools -j $(nproc)
|
||||
build -a AARCH64 -t GCC5 -p ArmVirtPkg/ArmVirtCloudHv.dsc -b RELEASE -n 0
|
||||
cp Build/ArmVirtCloudHv-AARCH64/RELEASE_GCC5/FV/CLOUDHV_EFI.fd "$WORKLOADS_DIR"
|
||||
touch "$EDK2_DIR"/.built
|
||||
@ -44,4 +44,3 @@ build_edk2() {
|
||||
popd
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -6,4 +6,3 @@ mkdosfs -n CIDATA -C /tmp/ubuntu-cloudinit.img 8192
|
||||
mcopy -oi /tmp/ubuntu-cloudinit.img -s test_data/cloud-init/ubuntu/local/user-data ::
|
||||
mcopy -oi /tmp/ubuntu-cloudinit.img -s test_data/cloud-init/ubuntu/local/meta-data ::
|
||||
mcopy -oi /tmp/ubuntu-cloudinit.img -s test_data/cloud-init/ubuntu/local/network-config ::
|
||||
|
||||
|
@ -382,7 +382,7 @@ cmd_tests() {
|
||||
exported_device="/dev/mshv"
|
||||
fi
|
||||
|
||||
if [ ! -e "${exported_device}" ] ; then
|
||||
if [ ! -e "${exported_device}" ]; then
|
||||
die "${exported_device} does not exist on the system"
|
||||
fi
|
||||
|
||||
|
@ -4,16 +4,16 @@ set -x
|
||||
sudo apt install -y libncurses-dev gawk flex bison openssl libssl-dev dkms libelf-dev libudev-dev libpci-dev libiberty-dev autoconf git make dpkg-dev libmnl-dev pkg-config iproute2
|
||||
sudo sed -i -- 's/# deb-src/deb-src/g' /etc/apt/sources.list
|
||||
sudo apt update
|
||||
apt-get source linux-image-unsigned-`uname -r`
|
||||
apt-get source linux-image-unsigned-$(uname -r)
|
||||
pushd linux-azure*/drivers/vdpa/vdpa_sim/
|
||||
cat <<'EOF' > Makefile
|
||||
cat <<'EOF' >Makefile
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-m += vdpa_sim.o
|
||||
obj-m += vdpa_sim_net.o
|
||||
obj-m += vdpa_sim_blk.o
|
||||
EOF
|
||||
make -C /lib/modules/`uname -r`/build M=$PWD
|
||||
sudo make -C /lib/modules/`uname -r`/build M=$PWD modules_install
|
||||
make -C /lib/modules/$(uname -r)/build M=$PWD
|
||||
sudo make -C /lib/modules/$(uname -r)/build M=$PWD modules_install
|
||||
popd
|
||||
sudo depmod -a
|
||||
sudo modprobe vdpa
|
||||
|
@ -21,7 +21,7 @@ build_spdk_nvme() {
|
||||
./scripts/pkgdep.sh
|
||||
./configure --with-vfio-user
|
||||
chmod +x /usr/local/lib/python3.10/dist-packages/ninja/data/bin/ninja
|
||||
make -j `nproc` || exit 1
|
||||
make -j $(nproc) || exit 1
|
||||
touch .built
|
||||
popd
|
||||
fi
|
||||
@ -137,7 +137,7 @@ update_workloads() {
|
||||
pushd $WORKLOADS_DIR
|
||||
mkdir alpine-minirootfs
|
||||
tar xf "$ALPINE_MINIROOTFS_TARBALL" -C alpine-minirootfs
|
||||
cat > alpine-minirootfs/init <<-EOF
|
||||
cat >alpine-minirootfs/init <<-EOF
|
||||
#! /bin/sh
|
||||
mount -t devtmpfs dev /dev
|
||||
echo \$TEST_STRING > /dev/console
|
||||
@ -146,7 +146,7 @@ update_workloads() {
|
||||
chmod +x alpine-minirootfs/init
|
||||
cd alpine-minirootfs
|
||||
find . -print0 |
|
||||
cpio --null --create --verbose --owner root:root --format=newc > "$ALPINE_INITRAMFS_IMAGE"
|
||||
cpio --null --create --verbose --owner root:root --format=newc >"$ALPINE_INITRAMFS_IMAGE"
|
||||
popd
|
||||
fi
|
||||
|
||||
@ -167,10 +167,9 @@ update_workloads() {
|
||||
WGET_RETRY_MAX=10
|
||||
wget_retry=0
|
||||
|
||||
until [ "$wget_retry" -ge "$WGET_RETRY_MAX" ]
|
||||
do
|
||||
until [ "$wget_retry" -ge "$WGET_RETRY_MAX" ]; do
|
||||
time wget $CH_RELEASE_URL -O "$CH_RELEASE_NAME" && break
|
||||
wget_retry=$((wget_retry+1))
|
||||
wget_retry=$((wget_retry + 1))
|
||||
done
|
||||
|
||||
if [ $wget_retry -ge "$WGET_RETRY_MAX" ]; then
|
||||
@ -213,8 +212,8 @@ update_workloads() {
|
||||
SHARED_DIR="$WORKLOADS_DIR/shared_dir"
|
||||
if [ ! -d "$SHARED_DIR" ]; then
|
||||
mkdir -p $SHARED_DIR
|
||||
echo "foo" > "$SHARED_DIR/file1"
|
||||
echo "bar" > "$SHARED_DIR/file3" || exit 1
|
||||
echo "foo" >"$SHARED_DIR/file1"
|
||||
echo "bar" >"$SHARED_DIR/file3" || exit 1
|
||||
fi
|
||||
|
||||
# Checkout and build SPDK NVMe
|
||||
@ -232,7 +231,6 @@ if [[ "$hypervisor" = "mshv" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# lock the workloads folder to avoid parallel updating by different containers
|
||||
(
|
||||
echo "try to lock $WORKLOADS_DIR folder and update"
|
||||
@ -258,8 +256,8 @@ sudo bash -c "echo 10 > /sys/kernel/mm/ksm/sleep_millisecs"
|
||||
sudo bash -c "echo 1 > /sys/kernel/mm/ksm/run"
|
||||
|
||||
# Both test_vfio and ovs-dpdk rely on hugepages
|
||||
HUGEPAGESIZE=`grep Hugepagesize /proc/meminfo | awk '{print $2}'`
|
||||
PAGE_NUM=`echo $((12288 * 1024 / $HUGEPAGESIZE))`
|
||||
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||
sudo chmod a+rwX /dev/hugepages
|
||||
|
||||
|
@ -12,7 +12,7 @@ process_common_args "$@"
|
||||
# For now these values are default for kvm
|
||||
test_features=""
|
||||
|
||||
if [ "$hypervisor" = "mshv" ] ; then
|
||||
if [ "$hypervisor" = "mshv" ]; then
|
||||
test_features="--features mshv"
|
||||
fi
|
||||
|
||||
@ -66,8 +66,8 @@ fi
|
||||
cargo build --features mshv --all --release --target $BUILD_TARGET
|
||||
|
||||
# Test ovs-dpdk relies on hugepages
|
||||
HUGEPAGESIZE=`grep Hugepagesize /proc/meminfo | awk '{print $2}'`
|
||||
PAGE_NUM=`echo $((12288 * 1024 / $HUGEPAGESIZE))`
|
||||
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||
sudo chmod a+rwX /dev/hugepages
|
||||
|
||||
|
@ -12,7 +12,7 @@ process_common_args "$@"
|
||||
# For now these values are default for kvm
|
||||
test_features=""
|
||||
|
||||
if [ "$hypervisor" = "mshv" ] ; then
|
||||
if [ "$hypervisor" = "mshv" ]; then
|
||||
test_features="--features mshv"
|
||||
fi
|
||||
|
||||
|
@ -8,7 +8,7 @@ process_common_args "$@"
|
||||
# For now these values are default for kvm
|
||||
test_features=""
|
||||
|
||||
if [ "$hypervisor" = "mshv" ] ; then
|
||||
if [ "$hypervisor" = "mshv" ]; then
|
||||
test_features="--features mshv"
|
||||
fi
|
||||
WIN_IMAGE_FILE="/root/workloads/windows-server-2022-amd64-2.raw"
|
||||
|
@ -12,7 +12,7 @@ process_common_args "$@"
|
||||
# For now these values are default for kvm
|
||||
test_features=""
|
||||
|
||||
if [ "$hypervisor" = "mshv" ] ; then
|
||||
if [ "$hypervisor" = "mshv" ]; then
|
||||
test_features="--features mshv"
|
||||
fi
|
||||
|
||||
@ -77,7 +77,7 @@ if [ ! -f "$ALPINE_INITRAMFS_IMAGE" ]; then
|
||||
pushd $WORKLOADS_DIR
|
||||
mkdir alpine-minirootfs
|
||||
tar xf "$ALPINE_MINIROOTFS_TARBALL" -C alpine-minirootfs
|
||||
cat > alpine-minirootfs/init <<-EOF
|
||||
cat >alpine-minirootfs/init <<-EOF
|
||||
#! /bin/sh
|
||||
mount -t devtmpfs dev /dev
|
||||
echo \$TEST_STRING > /dev/console
|
||||
@ -86,7 +86,7 @@ if [ ! -f "$ALPINE_INITRAMFS_IMAGE" ]; then
|
||||
chmod +x alpine-minirootfs/init
|
||||
cd alpine-minirootfs
|
||||
find . -print0 |
|
||||
cpio --null --create --verbose --owner root:root --format=newc > "$ALPINE_INITRAMFS_IMAGE"
|
||||
cpio --null --create --verbose --owner root:root --format=newc >"$ALPINE_INITRAMFS_IMAGE"
|
||||
popd
|
||||
fi
|
||||
|
||||
@ -118,7 +118,6 @@ if [ ! -f "$VIRTIOFSD" ]; then
|
||||
popd
|
||||
fi
|
||||
|
||||
|
||||
BLK_IMAGE="$WORKLOADS_DIR/blk.img"
|
||||
MNT_DIR="mount_image"
|
||||
if [ ! -f "$BLK_IMAGE" ]; then
|
||||
@ -136,8 +135,8 @@ fi
|
||||
SHARED_DIR="$WORKLOADS_DIR/shared_dir"
|
||||
if [ ! -d "$SHARED_DIR" ]; then
|
||||
mkdir -p $SHARED_DIR
|
||||
echo "foo" > "$SHARED_DIR/file1"
|
||||
echo "bar" > "$SHARED_DIR/file3" || exit 1
|
||||
echo "foo" >"$SHARED_DIR/file1"
|
||||
echo "bar" >"$SHARED_DIR/file3" || exit 1
|
||||
fi
|
||||
|
||||
VFIO_DIR="$WORKLOADS_DIR/vfio"
|
||||
@ -161,8 +160,8 @@ sudo bash -c "echo 10 > /sys/kernel/mm/ksm/sleep_millisecs"
|
||||
sudo bash -c "echo 1 > /sys/kernel/mm/ksm/run"
|
||||
|
||||
# Both test_vfio, ovs-dpdk and vDPA tests rely on hugepages
|
||||
HUGEPAGESIZE=`grep Hugepagesize /proc/meminfo | awk '{print $2}'`
|
||||
PAGE_NUM=`echo $((12288 * 1024 / $HUGEPAGESIZE))`
|
||||
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||
sudo chmod a+rwX /dev/hugepages
|
||||
|
||||
|
@ -17,7 +17,7 @@ build_fio() {
|
||||
if [ ! -f "$FIO_DIR/.built" ]; then
|
||||
pushd $FIO_DIR
|
||||
./configure
|
||||
make -j `nproc`
|
||||
make -j $(nproc)
|
||||
cp fio "$WORKLOADS_DIR/fio"
|
||||
touch .built
|
||||
popd
|
||||
@ -90,8 +90,8 @@ fi
|
||||
cargo build --features mshv --all --release --target $BUILD_TARGET
|
||||
|
||||
# setup hugepages
|
||||
HUGEPAGESIZE=`grep Hugepagesize /proc/meminfo | awk '{print $2}'`
|
||||
PAGE_NUM=`echo $((12288 * 1024 / $HUGEPAGESIZE))`
|
||||
HUGEPAGESIZE=$(grep Hugepagesize /proc/meminfo | awk '{print $2}')
|
||||
PAGE_NUM=$(echo $((12288 * 1024 / $HUGEPAGESIZE)))
|
||||
echo $PAGE_NUM | sudo tee /proc/sys/vm/nr_hugepages
|
||||
sudo chmod a+rwX /dev/hugepages
|
||||
|
||||
@ -102,8 +102,8 @@ fi
|
||||
# Ensure that git commands can be run in this directory (for metrics report)
|
||||
git config --global --add safe.directory $PWD
|
||||
|
||||
RUST_BACKTRACE_VALUE=`echo $RUST_BACKTRACE`
|
||||
if [ -z $RUST_BACKTRACE_VALUE ];then
|
||||
RUST_BACKTRACE_VALUE=$(echo $RUST_BACKTRACE)
|
||||
if [ -z $RUST_BACKTRACE_VALUE ]; then
|
||||
export RUST_BACKTRACE=1
|
||||
else
|
||||
echo "RUST_BACKTRACE is set to: $RUST_BACKTRACE_VALUE"
|
||||
|
@ -54,7 +54,7 @@ build_custom_linux() {
|
||||
cp $SRCDIR/resources/linux-config-${ARCH} $LINUX_CUSTOM_DIR/.config
|
||||
|
||||
pushd $LINUX_CUSTOM_DIR
|
||||
make -j `nproc`
|
||||
make -j $(nproc)
|
||||
if [ ${ARCH} == "x86_64" ]; then
|
||||
cp vmlinux "$WORKLOADS_DIR/" || exit 1
|
||||
elif [ ${ARCH} == "aarch64" ]; then
|
||||
@ -81,7 +81,10 @@ cmd_help() {
|
||||
process_common_args() {
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
"-h"|"--help") { cmd_help; exit 1; } ;;
|
||||
"-h" | "--help") {
|
||||
cmd_help
|
||||
exit 1
|
||||
} ;;
|
||||
"--hypervisor")
|
||||
shift
|
||||
hypervisor="$1"
|
||||
|
Loading…
Reference in New Issue
Block a user