scripts: Improve AArch64 CI for parallel executors

Lock "work_loads" folder when one job is syncing files. If another job
arrives, wait until the lock is released.

Signed-off-by: Michael Zhao <michael.zhao@arm.com>
This commit is contained in:
Michael Zhao 2020-07-28 11:57:01 +08:00 committed by Rob Bradford
parent 12c5b7668a
commit 7e3cbf04de

View File

@ -6,162 +6,175 @@ source $HOME/.cargo/env
export BUILD_TARGET=${BUILD_TARGET-aarch64-unknown-linux-gnu} export BUILD_TARGET=${BUILD_TARGET-aarch64-unknown-linux-gnu}
WORKLOADS_DIR="$HOME/workloads" WORKLOADS_DIR="$HOME/workloads"
WORKLOADS_LOCK="$WORKLOADS_DIR/integration_test.lock"
mkdir -p "$WORKLOADS_DIR" mkdir -p "$WORKLOADS_DIR"
cp scripts/sha1sums-aarch64 $WORKLOADS_DIR update_workloads() {
cp scripts/sha1sums-aarch64 $WORKLOADS_DIR
BIONIC_OS_IMAGE_DOWNLOAD_NAME="bionic-server-cloudimg-arm64.img"
BIONIC_OS_IMAGE_DOWNLOAD_URL="https://cloudhypervisorstorage.blob.core.windows.net/images/$BIONIC_OS_IMAGE_DOWNLOAD_NAME"
BIONIC_OS_DOWNLOAD_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_IMAGE_DOWNLOAD_NAME"
if [ ! -f "$BIONIC_OS_DOWNLOAD_IMAGE" ]; then
pushd $WORKLOADS_DIR
time wget --quiet $BIONIC_OS_IMAGE_DOWNLOAD_URL || exit 1
popd
fi
BIONIC_OS_RAW_IMAGE_NAME="bionic-server-cloudimg-arm64.raw"
BIONIC_OS_RAW_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_RAW_IMAGE_NAME"
if [ ! -f "$BIONIC_OS_RAW_IMAGE" ]; then
pushd $WORKLOADS_DIR
time qemu-img convert -p -f qcow2 -O raw $BIONIC_OS_IMAGE_DOWNLOAD_NAME $BIONIC_OS_RAW_IMAGE_NAME || exit 1
popd
fi
# Convert the raw image to qcow2 image to remove compressed blocks from the disk. Therefore letting the
# qcow2 format image can be directly used in the integration test.
BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="bionic-server-cloudimg-arm64.qcow2"
BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
if [ ! -f "$BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
pushd $WORKLOADS_DIR
time qemu-img convert -p -f raw -O qcow2 $BIONIC_OS_RAW_IMAGE_NAME $BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE || exit 1
popd
fi
FOCAL_OS_IMAGE_DOWNLOAD_NAME="focal-server-cloudimg-arm64.img"
FOCAL_OS_IMAGE_DOWNLOAD_URL="https://cloudhypervisorstorage.blob.core.windows.net/images/$FOCAL_OS_IMAGE_DOWNLOAD_NAME"
FOCAL_OS_DOWNLOAD_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_IMAGE_DOWNLOAD_NAME"
if [ ! -f "$FOCAL_OS_DOWNLOAD_IMAGE" ]; then
pushd $WORKLOADS_DIR
time wget --quiet $FOCAL_OS_IMAGE_DOWNLOAD_URL || exit 1
popd
fi
FOCAL_OS_RAW_IMAGE_NAME="focal-server-cloudimg-arm64.raw"
FOCAL_OS_RAW_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME"
if [ ! -f "$FOCAL_OS_RAW_IMAGE" ]; then
pushd $WORKLOADS_DIR
time qemu-img convert -p -f qcow2 -O raw $FOCAL_OS_IMAGE_DOWNLOAD_NAME $FOCAL_OS_RAW_IMAGE_NAME || exit 1
popd
fi
# Convert the raw image to qcow2 image to remove compressed blocks from the disk. Therefore letting the
# qcow2 format image can be directly used in the integration test.
FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="focal-server-cloudimg-arm64.qcow2"
FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
if [ ! -f "$FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
pushd $WORKLOADS_DIR
time qemu-img convert -p -f raw -O qcow2 $FOCAL_OS_RAW_IMAGE_NAME $FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE || exit 1
popd
fi
ALPINE_MINIROOTFS_URL="http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/aarch64/alpine-minirootfs-3.11.3-aarch64.tar.gz"
ALPINE_MINIROOTFS_TARBALL="$WORKLOADS_DIR/alpine-minirootfs-aarch64.tar.gz"
if [ ! -f "$ALPINE_MINIROOTFS_TARBALL" ]; then
pushd $WORKLOADS_DIR
time wget --quiet $ALPINE_MINIROOTFS_URL -O $ALPINE_MINIROOTFS_TARBALL || exit 1
popd
fi
ALPINE_INITRAMFS_IMAGE="$WORKLOADS_DIR/alpine_initramfs.img"
if [ ! -f "$ALPINE_INITRAMFS_IMAGE" ]; then
pushd $WORKLOADS_DIR
mkdir alpine-minirootfs
tar xf "$ALPINE_MINIROOTFS_TARBALL" -C alpine-minirootfs
cat > alpine-minirootfs/init <<-EOF
#! /bin/sh
mount -t devtmpfs dev /dev
echo \$TEST_STRING > /dev/console
poweroff -f
EOF
chmod +x alpine-minirootfs/init
cd alpine-minirootfs
find . -print0 |
cpio --null --create --verbose --owner root:root --format=newc > "$ALPINE_INITRAMFS_IMAGE"
popd
fi
BIONIC_OS_IMAGE_DOWNLOAD_NAME="bionic-server-cloudimg-arm64.img" \
BIONIC_OS_IMAGE_DOWNLOAD_URL="https://cloudhypervisorstorage.blob.core.windows.net/images/$BIONIC_OS_IMAGE_DOWNLOAD_NAME"
BIONIC_OS_DOWNLOAD_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_IMAGE_DOWNLOAD_NAME"
if [ ! -f "$BIONIC_OS_DOWNLOAD_IMAGE" ]; then
pushd $WORKLOADS_DIR pushd $WORKLOADS_DIR
time wget --quiet $BIONIC_OS_IMAGE_DOWNLOAD_URL || exit 1 sha1sum sha1sums-aarch64 --check
if [ $? -ne 0 ]; then
echo "sha1sum validation of images failed, remove invalid images to fix the issue."
exit 1
fi
popd popd
fi
BIONIC_OS_RAW_IMAGE_NAME="bionic-server-cloudimg-arm64.raw" \ # Build custom kernel based on virtio-pmem and virtio-fs upstream patches
BIONIC_OS_RAW_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_RAW_IMAGE_NAME" PE_IMAGE="$WORKLOADS_DIR/Image"
if [ ! -f "$BIONIC_OS_RAW_IMAGE" ]; then LINUX_CUSTOM_DIR="$WORKLOADS_DIR/linux-custom"
pushd $WORKLOADS_DIR
time qemu-img convert -p -f qcow2 -O raw $BIONIC_OS_IMAGE_DOWNLOAD_NAME $BIONIC_OS_RAW_IMAGE_NAME || exit 1
popd
fi
# Convert the raw image to qcow2 image to remove compressed blocks from the disk. Therefore letting the build_custom_linux_kernel() {
# qcow2 format image can be directly used in the integration test. pushd $LINUX_CUSTOM_DIR
BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="bionic-server-cloudimg-arm64.qcow2" \ time make -j `nproc`
BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME" cp arch/arm64/boot/Image $WORKLOADS_DIR/Image || exit 1
if [ ! -f "$BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then popd
pushd $WORKLOADS_DIR }
time qemu-img convert -p -f raw -O qcow2 $BIONIC_OS_RAW_IMAGE_NAME $BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE || exit 1
popd
fi
FOCAL_OS_IMAGE_DOWNLOAD_NAME="focal-server-cloudimg-arm64.img" \ if [ ! -d "$LINUX_CUSTOM_DIR" ]; then
FOCAL_OS_IMAGE_DOWNLOAD_URL="https://cloudhypervisorstorage.blob.core.windows.net/images/$FOCAL_OS_IMAGE_DOWNLOAD_NAME" pushd $WORKLOADS_DIR
FOCAL_OS_DOWNLOAD_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_IMAGE_DOWNLOAD_NAME" time git clone --depth 1 "https://github.com/cloud-hypervisor/linux.git" -b "virtio-fs-virtio-iommu-virtio-mem-5.6-rc4" $LINUX_CUSTOM_DIR
if [ ! -f "$FOCAL_OS_DOWNLOAD_IMAGE" ]; then popd
pushd $WORKLOADS_DIR else
time wget --quiet $FOCAL_OS_IMAGE_DOWNLOAD_URL || exit 1 pushd $LINUX_CUSTOM_DIR
popd git fetch
fi git checkout -f
popd
fi
FOCAL_OS_RAW_IMAGE_NAME="focal-server-cloudimg-arm64.raw" \ SRCDIR=$PWD
FOCAL_OS_RAW_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME" cp $SRCDIR/resources/linux-config-aarch64 $LINUX_CUSTOM_DIR/.config
if [ ! -f "$FOCAL_OS_RAW_IMAGE" ]; then build_custom_linux_kernel
pushd $WORKLOADS_DIR
time qemu-img convert -p -f qcow2 -O raw $FOCAL_OS_IMAGE_DOWNLOAD_NAME $FOCAL_OS_RAW_IMAGE_NAME || exit 1
popd
fi
# Convert the raw image to qcow2 image to remove compressed blocks from the disk. Therefore letting the VIRTIOFSD="$WORKLOADS_DIR/virtiofsd"
# qcow2 format image can be directly used in the integration test. QEMU_DIR="qemu_build"
FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="focal-server-cloudimg-arm64.qcow2" \
FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
if [ ! -f "$FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
pushd $WORKLOADS_DIR
time qemu-img convert -p -f raw -O qcow2 $FOCAL_OS_RAW_IMAGE_NAME $FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE || exit 1
popd
fi
ALPINE_MINIROOTFS_URL="http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/aarch64/alpine-minirootfs-3.11.3-aarch64.tar.gz" if [ ! -f "$VIRTIOFSD" ]; then
ALPINE_MINIROOTFS_TARBALL="$WORKLOADS_DIR/alpine-minirootfs-aarch64.tar.gz" pushd $WORKLOADS_DIR
if [ ! -f "$ALPINE_MINIROOTFS_TARBALL" ]; then git clone --depth 1 "https://gitlab.com/virtio-fs/qemu.git" -b "virtio-fs-dev" $QEMU_DIR
pushd $WORKLOADS_DIR pushd $QEMU_DIR
time wget --quiet $ALPINE_MINIROOTFS_URL -O $ALPINE_MINIROOTFS_TARBALL || exit 1 time ./configure --prefix=$PWD --target-list=aarch64-softmmu
popd time make virtiofsd -j `nproc`
fi cp virtiofsd $VIRTIOFSD || exit 1
popd
rm -rf $QEMU_DIR
sudo setcap cap_chown,cap_dac_override,cap_dac_read_search,cap_fowner,cap_fsetid,cap_setgid,cap_setuid,cap_mknod,cap_setfcap,cap_sys_admin+epi "virtiofsd" || exit 1
popd
fi
ALPINE_INITRAMFS_IMAGE="$WORKLOADS_DIR/alpine_initramfs.img" BLK_IMAGE="$WORKLOADS_DIR/blk.img"
if [ ! -f "$ALPINE_INITRAMFS_IMAGE" ]; then MNT_DIR="mount_image"
pushd $WORKLOADS_DIR if [ ! -f "$BLK_IMAGE" ]; then
mkdir alpine-minirootfs pushd $WORKLOADS_DIR
tar xf "$ALPINE_MINIROOTFS_TARBALL" -C alpine-minirootfs fallocate -l 16M $BLK_IMAGE
cat > alpine-minirootfs/init <<-EOF mkfs.ext4 -j $BLK_IMAGE
#! /bin/sh mkdir $MNT_DIR
mount -t devtmpfs dev /dev sudo mount -t ext4 $BLK_IMAGE $MNT_DIR
echo \$TEST_STRING > /dev/console sudo bash -c "echo bar > $MNT_DIR/foo" || exit 1
poweroff -f sudo umount $BLK_IMAGE
EOF rm -r $MNT_DIR
chmod +x alpine-minirootfs/init popd
cd alpine-minirootfs fi
find . -print0 |
cpio --null --create --verbose --owner root:root --format=newc > "$ALPINE_INITRAMFS_IMAGE"
popd
fi
pushd $WORKLOADS_DIR SHARED_DIR="$WORKLOADS_DIR/shared_dir"
sha1sum sha1sums-aarch64 --check if [ ! -d "$SHARED_DIR" ]; then
if [ $? -ne 0 ]; then mkdir -p $SHARED_DIR
echo "sha1sum validation of images failed, remove invalid images to fix the issue." echo "foo" > "$SHARED_DIR/file1"
exit 1 echo "bar" > "$SHARED_DIR/file3" || exit 1
fi fi
popd
# Build custom kernel based on virtio-pmem and virtio-fs upstream patches
PE_IMAGE="$WORKLOADS_DIR/Image"
LINUX_CUSTOM_DIR="$WORKLOADS_DIR/linux-custom"
build_custom_linux_kernel() {
pushd $LINUX_CUSTOM_DIR
time make -j `nproc`
cp arch/arm64/boot/Image $WORKLOADS_DIR/Image || exit 1
popd
} }
if [ ! -d "$LINUX_CUSTOM_DIR" ]; then # lock the workloads folder to avoid parallel updating by different containers
pushd $WORKLOADS_DIR (
time git clone --depth 1 "https://github.com/cloud-hypervisor/linux.git" -b "virtio-fs-virtio-iommu-virtio-mem-5.6-rc4" $LINUX_CUSTOM_DIR echo "try to lock $WORKLOADS_DIR folder and update"
popd flock -x 12 && update_workloads
) 12>$WORKLOADS_LOCK
else # Create tap interface without multipe queues support for vhost_user_net test.
pushd $LINUX_CUSTOM_DIR sudo ip tuntap add name vunet-tap0 mode tap
git fetch # Create tap interface with multipe queues support for vhost_user_net test.
git checkout -f sudo ip tuntap add name vunet-tap1 mode tap multi_queue
popd
fi
SRCDIR=$PWD
cp $SRCDIR/resources/linux-config-aarch64 $LINUX_CUSTOM_DIR/.config
build_custom_linux_kernel
VIRTIOFSD="$WORKLOADS_DIR/virtiofsd"
QEMU_DIR="qemu_build"
if [ ! -f "$VIRTIOFSD" ]; then
pushd $WORKLOADS_DIR
git clone --depth 1 "https://gitlab.com/virtio-fs/qemu.git" -b "virtio-fs-dev" $QEMU_DIR
pushd $QEMU_DIR
time ./configure --prefix=$PWD --target-list=aarch64-softmmu
time make virtiofsd -j `nproc`
cp virtiofsd $VIRTIOFSD || exit 1
popd
rm -rf $QEMU_DIR
sudo setcap cap_chown,cap_dac_override,cap_dac_read_search,cap_fowner,cap_fsetid,cap_setgid,cap_setuid,cap_mknod,cap_setfcap,cap_sys_admin+epi "virtiofsd" || exit 1
popd
fi
BLK_IMAGE="$WORKLOADS_DIR/blk.img"
MNT_DIR="mount_image"
if [ ! -f "$BLK_IMAGE" ]; then
pushd $WORKLOADS_DIR
fallocate -l 16M $BLK_IMAGE
mkfs.ext4 -j $BLK_IMAGE
mkdir $MNT_DIR
sudo mount -t ext4 $BLK_IMAGE $MNT_DIR
sudo bash -c "echo bar > $MNT_DIR/foo" || exit 1
sudo umount $BLK_IMAGE
rm -r $MNT_DIR
popd
fi
SHARED_DIR="$WORKLOADS_DIR/shared_dir"
if [ ! -d "$SHARED_DIR" ]; then
mkdir -p $SHARED_DIR
echo "foo" > "$SHARED_DIR/file1"
echo "bar" > "$SHARED_DIR/file3" || exit 1
fi
BUILD_TARGET="aarch64-unknown-linux-${CH_LIBC}" BUILD_TARGET="aarch64-unknown-linux-${CH_LIBC}"
CFLAGS="" CFLAGS=""
@ -176,7 +189,7 @@ sed -i 's/"with-serde",\ //g' hypervisor/Cargo.toml
cargo_args=("$@") cargo_args=("$@")
cargo_args+=("--no-default-features") cargo_args+=("--no-default-features")
cargo_args+=("--features mmio,kvm") cargo_args+=("--features mmio,kvm")
cargo build --release --target $BUILD_TARGET ${cargo_args[@]} cargo build --all --release --target $BUILD_TARGET ${cargo_args[@]}
strip target/$BUILD_TARGET/release/cloud-hypervisor strip target/$BUILD_TARGET/release/cloud-hypervisor
strip target/$BUILD_TARGET/release/vhost_user_net strip target/$BUILD_TARGET/release/vhost_user_net
strip target/$BUILD_TARGET/release/ch-remote strip target/$BUILD_TARGET/release/ch-remote
@ -221,4 +234,8 @@ EOF
RES=$? RES=$?
fi fi
# Tear vhost_user_net test network down
sudo ip link del vunet-tap0
sudo ip link del vunet-tap1
exit $RES exit $RES