scripts: Improve AArch64 CI for parallel executors

Lock "work_loads" folder when one job is syncing files. If another job
arrives, wait until the lock is released.

Signed-off-by: Michael Zhao <michael.zhao@arm.com>
This commit is contained in:
Michael Zhao 2020-07-28 11:57:01 +08:00 committed by Rob Bradford
parent 12c5b7668a
commit 7e3cbf04de

View File

@ -6,11 +6,14 @@ source $HOME/.cargo/env
export BUILD_TARGET=${BUILD_TARGET-aarch64-unknown-linux-gnu} export BUILD_TARGET=${BUILD_TARGET-aarch64-unknown-linux-gnu}
WORKLOADS_DIR="$HOME/workloads" WORKLOADS_DIR="$HOME/workloads"
WORKLOADS_LOCK="$WORKLOADS_DIR/integration_test.lock"
mkdir -p "$WORKLOADS_DIR" mkdir -p "$WORKLOADS_DIR"
update_workloads() {
cp scripts/sha1sums-aarch64 $WORKLOADS_DIR cp scripts/sha1sums-aarch64 $WORKLOADS_DIR
BIONIC_OS_IMAGE_DOWNLOAD_NAME="bionic-server-cloudimg-arm64.img" \ BIONIC_OS_IMAGE_DOWNLOAD_NAME="bionic-server-cloudimg-arm64.img"
BIONIC_OS_IMAGE_DOWNLOAD_URL="https://cloudhypervisorstorage.blob.core.windows.net/images/$BIONIC_OS_IMAGE_DOWNLOAD_NAME" BIONIC_OS_IMAGE_DOWNLOAD_URL="https://cloudhypervisorstorage.blob.core.windows.net/images/$BIONIC_OS_IMAGE_DOWNLOAD_NAME"
BIONIC_OS_DOWNLOAD_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_IMAGE_DOWNLOAD_NAME" BIONIC_OS_DOWNLOAD_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_IMAGE_DOWNLOAD_NAME"
if [ ! -f "$BIONIC_OS_DOWNLOAD_IMAGE" ]; then if [ ! -f "$BIONIC_OS_DOWNLOAD_IMAGE" ]; then
@ -19,7 +22,7 @@ if [ ! -f "$BIONIC_OS_DOWNLOAD_IMAGE" ]; then
popd popd
fi fi
BIONIC_OS_RAW_IMAGE_NAME="bionic-server-cloudimg-arm64.raw" \ BIONIC_OS_RAW_IMAGE_NAME="bionic-server-cloudimg-arm64.raw"
BIONIC_OS_RAW_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_RAW_IMAGE_NAME" BIONIC_OS_RAW_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_RAW_IMAGE_NAME"
if [ ! -f "$BIONIC_OS_RAW_IMAGE" ]; then if [ ! -f "$BIONIC_OS_RAW_IMAGE" ]; then
pushd $WORKLOADS_DIR pushd $WORKLOADS_DIR
@ -29,7 +32,7 @@ fi
# Convert the raw image to qcow2 image to remove compressed blocks from the disk. Therefore letting the # Convert the raw image to qcow2 image to remove compressed blocks from the disk. Therefore letting the
# qcow2 format image can be directly used in the integration test. # qcow2 format image can be directly used in the integration test.
BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="bionic-server-cloudimg-arm64.qcow2" \ BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="bionic-server-cloudimg-arm64.qcow2"
BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME" BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$BIONIC_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
if [ ! -f "$BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then if [ ! -f "$BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
pushd $WORKLOADS_DIR pushd $WORKLOADS_DIR
@ -37,7 +40,7 @@ if [ ! -f "$BIONIC_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
popd popd
fi fi
FOCAL_OS_IMAGE_DOWNLOAD_NAME="focal-server-cloudimg-arm64.img" \ FOCAL_OS_IMAGE_DOWNLOAD_NAME="focal-server-cloudimg-arm64.img"
FOCAL_OS_IMAGE_DOWNLOAD_URL="https://cloudhypervisorstorage.blob.core.windows.net/images/$FOCAL_OS_IMAGE_DOWNLOAD_NAME" FOCAL_OS_IMAGE_DOWNLOAD_URL="https://cloudhypervisorstorage.blob.core.windows.net/images/$FOCAL_OS_IMAGE_DOWNLOAD_NAME"
FOCAL_OS_DOWNLOAD_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_IMAGE_DOWNLOAD_NAME" FOCAL_OS_DOWNLOAD_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_IMAGE_DOWNLOAD_NAME"
if [ ! -f "$FOCAL_OS_DOWNLOAD_IMAGE" ]; then if [ ! -f "$FOCAL_OS_DOWNLOAD_IMAGE" ]; then
@ -46,7 +49,7 @@ if [ ! -f "$FOCAL_OS_DOWNLOAD_IMAGE" ]; then
popd popd
fi fi
FOCAL_OS_RAW_IMAGE_NAME="focal-server-cloudimg-arm64.raw" \ FOCAL_OS_RAW_IMAGE_NAME="focal-server-cloudimg-arm64.raw"
FOCAL_OS_RAW_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME" FOCAL_OS_RAW_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_RAW_IMAGE_NAME"
if [ ! -f "$FOCAL_OS_RAW_IMAGE" ]; then if [ ! -f "$FOCAL_OS_RAW_IMAGE" ]; then
pushd $WORKLOADS_DIR pushd $WORKLOADS_DIR
@ -56,7 +59,7 @@ fi
# Convert the raw image to qcow2 image to remove compressed blocks from the disk. Therefore letting the # Convert the raw image to qcow2 image to remove compressed blocks from the disk. Therefore letting the
# qcow2 format image can be directly used in the integration test. # qcow2 format image can be directly used in the integration test.
FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="focal-server-cloudimg-arm64.qcow2" \ FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME="focal-server-cloudimg-arm64.qcow2"
FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME" FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE="$WORKLOADS_DIR/$FOCAL_OS_QCOW2_IMAGE_UNCOMPRESSED_NAME"
if [ ! -f "$FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then if [ ! -f "$FOCAL_OS_QCOW2_UNCOMPRESSED_IMAGE" ]; then
pushd $WORKLOADS_DIR pushd $WORKLOADS_DIR
@ -100,7 +103,6 @@ popd
# Build custom kernel based on virtio-pmem and virtio-fs upstream patches # Build custom kernel based on virtio-pmem and virtio-fs upstream patches
PE_IMAGE="$WORKLOADS_DIR/Image" PE_IMAGE="$WORKLOADS_DIR/Image"
LINUX_CUSTOM_DIR="$WORKLOADS_DIR/linux-custom" LINUX_CUSTOM_DIR="$WORKLOADS_DIR/linux-custom"
build_custom_linux_kernel() { build_custom_linux_kernel() {
@ -114,7 +116,6 @@ if [ ! -d "$LINUX_CUSTOM_DIR" ]; then
pushd $WORKLOADS_DIR pushd $WORKLOADS_DIR
time git clone --depth 1 "https://github.com/cloud-hypervisor/linux.git" -b "virtio-fs-virtio-iommu-virtio-mem-5.6-rc4" $LINUX_CUSTOM_DIR time git clone --depth 1 "https://github.com/cloud-hypervisor/linux.git" -b "virtio-fs-virtio-iommu-virtio-mem-5.6-rc4" $LINUX_CUSTOM_DIR
popd popd
else else
pushd $LINUX_CUSTOM_DIR pushd $LINUX_CUSTOM_DIR
git fetch git fetch
@ -162,6 +163,18 @@ if [ ! -d "$SHARED_DIR" ]; then
echo "foo" > "$SHARED_DIR/file1" echo "foo" > "$SHARED_DIR/file1"
echo "bar" > "$SHARED_DIR/file3" || exit 1 echo "bar" > "$SHARED_DIR/file3" || exit 1
fi fi
}
# lock the workloads folder to avoid parallel updating by different containers
(
echo "try to lock $WORKLOADS_DIR folder and update"
flock -x 12 && update_workloads
) 12>$WORKLOADS_LOCK
# Create tap interface without multipe queues support for vhost_user_net test.
sudo ip tuntap add name vunet-tap0 mode tap
# Create tap interface with multipe queues support for vhost_user_net test.
sudo ip tuntap add name vunet-tap1 mode tap multi_queue
BUILD_TARGET="aarch64-unknown-linux-${CH_LIBC}" BUILD_TARGET="aarch64-unknown-linux-${CH_LIBC}"
CFLAGS="" CFLAGS=""
@ -176,7 +189,7 @@ sed -i 's/"with-serde",\ //g' hypervisor/Cargo.toml
cargo_args=("$@") cargo_args=("$@")
cargo_args+=("--no-default-features") cargo_args+=("--no-default-features")
cargo_args+=("--features mmio,kvm") cargo_args+=("--features mmio,kvm")
cargo build --release --target $BUILD_TARGET ${cargo_args[@]} cargo build --all --release --target $BUILD_TARGET ${cargo_args[@]}
strip target/$BUILD_TARGET/release/cloud-hypervisor strip target/$BUILD_TARGET/release/cloud-hypervisor
strip target/$BUILD_TARGET/release/vhost_user_net strip target/$BUILD_TARGET/release/vhost_user_net
strip target/$BUILD_TARGET/release/ch-remote strip target/$BUILD_TARGET/release/ch-remote
@ -221,4 +234,8 @@ EOF
RES=$? RES=$?
fi fi
# Tear vhost_user_net test network down
sudo ip link del vunet-tap0
sudo ip link del vunet-tap1
exit $RES exit $RES