Skip to content
Snippets Groups Projects
Commit 03cb6f83 authored by Vignesh Raman's avatar Vignesh Raman
Browse files

Lava test

parent dfdbf346
No related branches found
No related tags found
No related merge requests found
Pipeline #134125 failed
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2024 Collabora, Helen Koike <helen.koike@collabora.com>
set -exo pipefail
source tools/ci/gitlab-ci/ci-scripts/ici-functions.sh
ici_prepare_build
pushd build
# compile the kernel
make CF=-D__CHECK_ENDIAN__ \
-C "$ICI_KERNEL_DIR" \
O=$(pwd) \
-j$(nproc) \
$KCI_KERNEL_IMAGE_NAME \
2>&1 | tee output.txt
export INSTALL_PATH="${CI_PROJECT_DIR}/artifacts/"
INSTALL_PATH+="kernel-install-${KCI_KERNEL_ARCH}"
mkdir -p "$INSTALL_PATH"
for image in ${KCI_KERNEL_IMAGE_NAME}; do
cp arch/${KCI_KERNEL_ARCH}/boot/${image} $INSTALL_PATH/.
done
make modules
# install kernel modules to artifacts/kernel-install
make -C "$ICI_KERNEL_DIR" O=$(pwd) modules_install INSTALL_MOD_PATH="$INSTALL_PATH"
# export config as artifact
cp .config "${CI_PROJECT_DIR}/artifacts/${KCI_KERNEL_ARCH}_config"
# if the compilation has warnings, exit with the warning code
if grep -iq "warning" output.txt; then
exit 101
fi
popd
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2024 Collabora, Helen Koike <helen.koike@collabora.com>
# SPDX-License-Identifier: MIT
set -exo pipefail
set -ex
source tools/ci/gitlab-ci/ci-scripts/ici-functions.sh
# Clean up stale rebases that GitLab might not have removed when reusing a checkout dir
rm -rf .git/rebase-apply .git/rebase-merge
ici_prepare_build
git config --global pull.rebase true
pushd build
export ARCH="${KCI_KERNEL_ARCH}"
export LLVM=1
# compile the kernel
make CF=-D__CHECK_ENDIAN__ \
-C "$ICI_KERNEL_DIR" \
O=$(pwd) \
-j$(nproc) \
$KCI_KERNEL_IMAGE_NAME \
2>&1 | tee output.txt
if [[ "$KCI_KERNEL_ARCH" = "arm64" ]]; then
DEVICE_TREES="arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8016-sbc-usb-host.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8096-db820c.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r5.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sm8350-hdk.dtb"
elif [[ "$KCI_KERNEL_ARCH" = "arm" ]]; then
DEVICE_TREES="arch/arm/boot/dts/rockchip/rk3288-veyron-jaq.dtb"
DEVICE_TREES+=" arch/arm/boot/dts/allwinner/sun8i-h3-libretech-all-h3-cc.dtb"
DEVICE_TREES+=" arch/arm/boot/dts/nxp/imx/imx6q-cubox-i.dtb"
else
DEVICE_TREES=""
fi
export INSTALL_PATH="${CI_PROJECT_DIR}/artifacts/"
INSTALL_PATH+="kernel-install-${KCI_KERNEL_ARCH}"
mkdir -p "$INSTALL_PATH"
# Try to merge fixes from target repo
if [ "$(git ls-remote --exit-code --heads ${UPSTREAM_REPO} ${TARGET_BRANCH}-external-fixes)" ]; then
git pull ${UPSTREAM_REPO} ${TARGET_BRANCH}-external-fixes
fi
for image in ${KCI_KERNEL_IMAGE_NAME}; do
cp arch/${KCI_KERNEL_ARCH}/boot/${image} $INSTALL_PATH/.
# Try to merge fixes from local repo if this isn't a merge request
# otherwise try merging the fixes from the merge target
if [ -z "$CI_MERGE_REQUEST_PROJECT_PATH" ]; then
if [ "$(git ls-remote --exit-code --heads origin ${TARGET_BRANCH}-external-fixes)" ]; then
git pull origin ${TARGET_BRANCH}-external-fixes
fi
else
if [ "$(git ls-remote --exit-code --heads ${CI_MERGE_REQUEST_PROJECT_URL} ${CI_MERGE_REQUEST_TARGET_BRANCH_NAME}-external-fixes)" ]; then
git pull ${CI_MERGE_REQUEST_PROJECT_URL} ${CI_MERGE_REQUEST_TARGET_BRANCH_NAME}-external-fixes
fi
fi
# Merge config
if [[ -n "${MERGE_FRAGMENT}" ]]; then
./scripts/kconfig/merge_config.sh ${KCI_DEFCONFIG} drivers/gpu/drm/ci/${MERGE_FRAGMENT}
else
make `basename ${KCI_DEFCONFIG}`
fi
# Apply kconfig overrides
for opt in $ENABLE_KCONFIGS; do
./scripts/config --enable CONFIG_$opt
done
for opt in $DISABLE_KCONFIGS; do
./scripts/config --disable CONFIG_$opt
done
KERNEL_ARTIFACTS="${CI_PROJECT_DIR}/artifacts/kernel"
INSTALL_PATH="${CI_PROJECT_DIR}/artifacts/install"
make modules
mkdir -p "${KERNEL_ARTIFACTS}"
mkdir -p "${INSTALL_PATH}"
# install kernel modules to artifacts/kernel-install
make -C "$ICI_KERNEL_DIR" O=$(pwd) modules_install INSTALL_MOD_PATH="$INSTALL_PATH"
make -j"${FDO_CI_CONCURRENT:-4}" ${KCI_KERNEL_IMAGE_NAME}
# export config as artifact
cp .config "${CI_PROJECT_DIR}/artifacts/${KCI_KERNEL_ARCH}_config"
for image in ${KCI_KERNEL_IMAGE_NAME}; do
cp arch/${KCI_KERNEL_ARCH}/boot/${image} ${KERNEL_ARTIFACTS}/.
done
# if the compilation has warnings, exit with the warning code
if grep -iq "warning" output.txt; then
exit 101
if [[ -n ${DEVICE_TREES} ]]; then
make -j"${FDO_CI_CONCURRENT:-4}" dtbs
cp ${DEVICE_TREES} ${KERNEL_ARTIFACTS}/.
fi
popd
make -j"${FDO_CI_CONCURRENT:-4}" LLVM=1 modules
mkdir -p install/modules/
INSTALL_MOD_PATH="$INSTALL_PATH" make modules_install
cp .config ${KERNEL_ARTIFACTS}/${KCI_KERNEL_ARCH}_config
xz -7 -c -T${FDO_CI_CONCURRENT:-4} vmlinux > ${KERNEL_ARTIFACTS}/vmlinux.xz
mkdir -p "${KERNEL_ARTIFACTS}/kernel-files/install/modules"
mv "${INSTALL_PATH}/lib" "${KERNEL_ARTIFACTS}/kernel-files/install/modules/"
tar --zstd -cf "${KERNEL_ARTIFACTS}/kernel-files.tar.zst" -C "${KERNEL_ARTIFACTS}" kernel-files
rm -rf "${KERNEL_ARTIFACTS}/kernel-files" "${INSTALL_PATH}"
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2024 Collabora, Helen Koike <helen.koike@collabora.com>
set -exo pipefail
# Get the last commit message
commit_message=$(git log -1 --pretty=%B)
pattern='(KCI_[A-Za-z_]+)=("[^"]*"|[^ ]+)'
while read -r line; do
if [[ $line =~ $pattern ]]; then
variable_name="${BASH_REMATCH[1]}"
variable_value="${BASH_REMATCH[2]}"
# Remove quotes if present
variable_value="${variable_value%\"}"
variable_value="${variable_value#\"}"
# Export the variable
export "$variable_name=$variable_value"
echo "Exported $variable_name=$variable_value"
fi
done <<< "$commit_message"
......@@ -2,22 +2,12 @@
#
# Copyright (C) 2024 Collabora, Helen Koike <helen.koike@collabora.com>
include:
- local: 'tools/ci/gitlab-ci/arm_native_compile.yml'
rules:
- if: '$CI_SERVER_HOST == "gitlab.freedesktop.org"'
- local: 'tools/ci/gitlab-ci/arm_cross_compile.yml'
rules:
- if: '$CI_SERVER_HOST != "gitlab.freedesktop.org"'
variables:
FDO_REPO_SUFFIX: "$BUILD_OS/$KCI_BUILD_ARCH"
FDO_DISTRIBUTION_TAG: "2025-01-21-debian"
FDO_DISTRIBUTION_EXEC: ./tools/ci/gitlab-ci/ci-scripts/install-smatch.sh
.debian:
variables:
BUILD_OS: debian
FDO_REPO_SUFFIX: "$BUILD_OS/$KCI_BUILD_ARCH"
FDO_DISTRIBUTION_TAG: "20250418-test"
FDO_DISTRIBUTION_EXEC: ./tools/ci/gitlab-ci/ci-scripts/install-smatch.sh
FDO_DISTRIBUTION_VERSION: trixie-slim
FDO_DISTRIBUTION_PACKAGES: >-
bc
......@@ -69,6 +59,31 @@ variables:
texlive-xetex
udev
virtme-ng
clang
lld
llvm
zstd
# Alpine based x86_64 build image
.alpine/x86_64_build-base:
stage: container
extends:
- .fdo.container-build@alpine
variables:
FDO_DISTRIBUTION_VERSION: "3.21"
FDO_BASE_IMAGE: alpine:$FDO_DISTRIBUTION_VERSION
FDO_DISTRIBUTION_TAG: "20250418-alpine"
FDO_DISTRIBUTION_PACKAGES: >-
bash
curl
iputils
openssh-client
FDO_DISTRIBUTION_EXEC: ""
# Alpine based x86_64 image for LAVA SSH dockerized client
alpine/x86_64_lava_ssh_client:
extends:
- .alpine/x86_64_build-base
.x86_64-config:
variables:
......@@ -110,5 +125,24 @@ debian/x86_64_build:
extends:
- .debian-x86_64
- .fdo.suffixed-image@debian
- .container+build-rules
needs:
- job: debian/x86_64_build
debian/arm64_build:
extends:
- .debian-arm64
- .fdo.container-build@debian
tags:
- aarch64
stage: container
.use-debian/arm64_build:
tags:
- aarch64
extends:
- .debian-arm64
- .fdo.suffixed-image@debian
- .container+build-rules
needs:
- job: debian/arm64_build
......@@ -2,40 +2,49 @@
#
# Copyright (C) 2024 Collabora, Helen Koike <helen.koike@collabora.com>
workflow:
name: $PIPELINE_NAME
# YAML anchors for rule conditions
# --------------------------------
.rules-anchors:
rules:
# when triggered as a multi-project pipeline for an MR
- if: $CI_PIPELINE_SOURCE == 'pipeline' && $PARENT_MERGE_REQUEST_IID != null && $PARENT_MERGE_REQUEST_IID != ""
variables:
PIPELINE_NAME: 'Downstream pipeline for $PARENT_PROJECT_PATH!$PARENT_MERGE_REQUEST_IID'
# when triggered as a multi-project pipeline
- if: $CI_PIPELINE_SOURCE == 'pipeline'
variables:
PIPELINE_NAME: 'Downstream pipeline for $PARENT_PROJECT_PATH'
# when triggered via a schedule
- if: $CI_PIPELINE_SOURCE == 'schedule'
variables:
PIPELINE_NAME: 'Scheduled pipeline for $ONLY_JOB_NAME'
# for merge requests
- if: $CI_MERGE_REQUEST_ID
# when triggered via the REST api
- if: $CI_PIPELINE_SOURCE == 'api'
# for the tip of the default branch
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
# when triggered via a trigger token
- if: $CI_PIPELINE_SOURCE == 'trigger'
# when triggered from a button press in the web interface
- if: $CI_PIPELINE_SOURCE == 'web'
# for branch tips without open MRs, ignoring special branches
- if: $CI_PIPELINE_SOURCE == 'push' && $CI_OPEN_MERGE_REQUESTS == null
# when forced via '-o ci.variable="FORCE_CI=true"' during pushing
- if: $FORCE_CI == 'true'
# do not duplicate pipelines on merge pipelines
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS && $CI_PIPELINE_SOURCE == "push"
when: never
# merge pipeline
- if: &is-merge-attempt $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"
# post-merge pipeline
- if: &is-post-merge $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "push"
# Pre-merge pipeline
- if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event"
# Push to a branch on a fork
- if: &is-fork-push $CI_PIPELINE_SOURCE == "push"
# nightly pipeline
- if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule"
# pipeline for direct pushes that bypassed the CI
- if: &is-direct-push $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot"
.container+build-rules:
rules:
# Build everything in merge pipelines
- if: *is-merge-attempt
when: on_success
# Same as above, but for pre-merge pipelines
- if: *is-pre-merge
when: manual
# Build everything after someone bypassed the CI
- if: *is-direct-push
when: manual
# Build everything in scheduled pipelines
- if: *is-scheduled-pipeline
when: on_success
# Allow building everything in fork pipelines, but build nothing unless
# manually triggered
- when: manual
variables:
SMATCH_DB_DIR: /smatch/smatch_data
# exit code of bash script on `script` will be the exit code of the job
FF_USE_NEW_BASH_EVAL_STRATEGY: "true"
TARGET_BRANCH: drm-next
KCI_SCENARIO:
description: Set to any non-empty value to disable scenarios
value: ""
......@@ -48,7 +57,9 @@ default:
include:
- remote: 'https://gitlab.freedesktop.org/freedesktop/ci-templates/-/raw/16bc29078de5e0a067ff84a1a199a3760d3b3811/templates/debian.yml'
- remote: 'https://gitlab.freedesktop.org/freedesktop/ci-templates/-/raw/16bc29078de5e0a067ff84a1a199a3760d3b3811/templates/alpine.yml'
- tools/ci/gitlab-ci/lava/lava-gitlab-ci.yml
- tools/ci/gitlab-ci/container.yml
- tools/ci/gitlab-ci/cache.yml
- tools/ci/gitlab-ci/build.yml
......@@ -56,9 +67,6 @@ include:
- tools/ci/gitlab-ci/static-checks.yml
- tools/ci/gitlab-ci/scenarios.yml
before_script:
- source tools/ci/gitlab-ci/ci-scripts/parse_commit_message.sh
.use-debian/x86_64_build:
allow_failure:
# Code to exit with a warning
......@@ -70,3 +78,4 @@ stages:
- build
- test
- cache
- msm
#!/bin/bash
# shellcheck disable=SC1090
# shellcheck disable=SC1091
# shellcheck disable=SC2086 # we want word splitting
# shellcheck disable=SC2155
# Second-stage init, used to set up devices and our job environment before
# running tests.
shopt -s extglob
# Make sure to kill itself and all the children process from this script on
# exiting, since any console output may interfere with LAVA signals handling,
# which based on the log console.
cleanup() {
if [ "$BACKGROUND_PIDS" = "" ]; then
return 0
fi
set +x
echo "Killing all child processes"
for pid in $BACKGROUND_PIDS
do
kill "$pid" 2>/dev/null || true
done
# Sleep just a little to give enough time for subprocesses to be gracefully
# killed. Then apply a SIGKILL if necessary.
sleep 5
for pid in $BACKGROUND_PIDS
do
kill -9 "$pid" 2>/dev/null || true
done
BACKGROUND_PIDS=
set -x
}
trap cleanup INT TERM EXIT
# Space separated values with the PIDS of the processes started in the
# background by this script
BACKGROUND_PIDS=
for path in '/dut-env-vars.sh' '/set-job-env-vars.sh' './set-job-env-vars.sh'; do
[ -f "$path" ] && source "$path"
done
. "$SCRIPTS_DIR"/setup-test-env.sh
# Flush out anything which might be stuck in a serial buffer
echo
echo
echo
section_switch init_stage2 "Pre-testing hardware setup"
set -ex
# Set up any devices required by the jobs
[ -z "$HWCI_KERNEL_MODULES" ] || {
echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe
}
# Set up ZRAM
HWCI_ZRAM_SIZE=2G
if /sbin/zramctl --find --size $HWCI_ZRAM_SIZE -a zstd; then
mkswap /dev/zram0
swapon /dev/zram0
echo "zram: $HWCI_ZRAM_SIZE activated"
else
echo "zram: skipping, not supported"
fi
#
# Load the KVM module specific to the detected CPU virtualization extensions:
# - vmx for Intel VT
# - svm for AMD-V
#
# Additionally, download the kernel image to boot the VM via HWCI_TEST_SCRIPT.
#
if [ "$HWCI_KVM" = "true" ]; then
unset KVM_KERNEL_MODULE
{
grep -qs '\bvmx\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_intel
} || {
grep -qs '\bsvm\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_amd
}
{
[ -z "${KVM_KERNEL_MODULE}" ] && \
echo "WARNING: Failed to detect CPU virtualization extensions"
} || \
modprobe ${KVM_KERNEL_MODULE}
mkdir -p /kernel
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
-o "/kernel/${KERNEL_IMAGE_NAME}" \
"${KERNEL_IMAGE_BASE}/amd64/${KERNEL_IMAGE_NAME}"
fi
# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
# it in /install
ln -sf $CI_PROJECT_DIR/install /install
export LD_LIBRARY_PATH=/install/lib
export LIBGL_DRIVERS_PATH=/install/lib/dri
# https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22495#note_1876691
# The navi21 boards seem to have trouble with ld.so.cache, so try explicitly
# telling it to look in /usr/local/lib.
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
export XDG_CACHE_HOME=/tmp
# Make sure Python can find all our imports
export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))")
# If we need to specify a driver, it means several drivers could pick up this gpu;
# ensure that the other driver can't accidentally be used
if [ -n "$MESA_LOADER_DRIVER_OVERRIDE" ]; then
rm /install/lib/dri/!($MESA_LOADER_DRIVER_OVERRIDE)_dri.so
fi
ls -1 /install/lib/dri/*_dri.so || true
if [ "$HWCI_FREQ_MAX" = "true" ]; then
# Ensure initialization of the DRM device (needed by MSM)
head -0 /dev/dri/renderD128
# Disable GPU frequency scaling
DEVFREQ_GOVERNOR=$(find /sys/devices -name governor | grep gpu || true)
test -z "$DEVFREQ_GOVERNOR" || echo performance > $DEVFREQ_GOVERNOR || true
# Disable CPU frequency scaling
echo performance | tee -a /sys/devices/system/cpu/cpufreq/policy*/scaling_governor || true
# Disable GPU runtime power management
GPU_AUTOSUSPEND=$(find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1)
test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true
# Lock Intel GPU frequency to 70% of the maximum allowed by hardware
# and enable throttling detection & reporting.
# Additionally, set the upper limit for CPU scaling frequency to 65% of the
# maximum permitted, as an additional measure to mitigate thermal throttling.
/install/common/intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d
fi
# Start a little daemon to capture sysfs records and produce a JSON file
KDL_PATH=/install/common/kdl.sh
if [ -x "$KDL_PATH" ]; then
echo "launch kdl.sh!"
$KDL_PATH &
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
else
echo "kdl.sh not found!"
fi
# Increase freedreno hangcheck timer because it's right at the edge of the
# spilling tests timing out (and some traces, too)
if [ -n "$FREEDRENO_HANGCHECK_MS" ]; then
echo $FREEDRENO_HANGCHECK_MS | tee -a /sys/kernel/debug/dri/128/hangcheck_period_ms
fi
# Start a little daemon to capture the first devcoredump we encounter. (They
# expire after 5 minutes, so we poll for them).
CAPTURE_DEVCOREDUMP=/install/common/capture-devcoredump.sh
if [ -x "$CAPTURE_DEVCOREDUMP" ]; then
$CAPTURE_DEVCOREDUMP &
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
fi
ARCH=$(uname -m)
export VK_DRIVER_FILES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$ARCH.json"
# If we want Xorg to be running for the test, then we start it up before the
# HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise
# without using -displayfd you can race with Xorg's startup), but xinit will eat
# your client's return code
if [ -n "$HWCI_START_XORG" ]; then
echo "touch /xorg-started; sleep 100000" > /xorg-script
env \
xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile "$RESULTS_DIR/Xorg.0.log" &
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
# Wait for xorg to be ready for connections.
for _ in 1 2 3 4 5; do
if [ -e /xorg-started ]; then
break
fi
sleep 5
done
export DISPLAY=:0
fi
if [ -n "$HWCI_START_WESTON" ]; then
WESTON_X11_SOCK="/tmp/.X11-unix/X0"
if [ -n "$HWCI_START_XORG" ]; then
echo "Please consider dropping HWCI_START_XORG and instead using Weston XWayland for testing."
WESTON_X11_SOCK="/tmp/.X11-unix/X1"
fi
export WAYLAND_DISPLAY=wayland-0
# Display server is Weston Xwayland when HWCI_START_XORG is not set or Xorg when it's
export DISPLAY=:0
mkdir -p /tmp/.X11-unix
env \
weston -Bheadless-backend.so --use-gl -Swayland-0 --xwayland --idle-time=0 &
BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
while [ ! -S "$WESTON_X11_SOCK" ]; do sleep 1; done
fi
set +x
section_end init_stage2
echo "Running ${HWCI_TEST_SCRIPT} ${HWCI_TEST_ARGS} ..."
set +e
$HWCI_TEST_SCRIPT ${HWCI_TEST_ARGS:-}; EXIT_CODE=$?
set -e
section_start post_test_cleanup "Cleaning up after testing, uploading results"
set -x
# Make sure that capture-devcoredump is done before we start trying to tar up
# artifacts -- if it's writing while tar is reading, tar will throw an error and
# kill the job.
cleanup
# upload artifacts (lava jobs)
if [ -n "$S3_RESULTS_UPLOAD" ]; then
tar --zstd -cf results.tar.zst results/;
s3_upload results.tar.zst https://"$S3_RESULTS_UPLOAD"/
fi
# We still need to echo the hwci: mesa message, as some scripts rely on it, such
# as the python ones inside the bare-metal folder
[ ${EXIT_CODE} -eq 0 ] && RESULT=pass || RESULT=fail
set +x
section_end post_test_cleanup
# Print the final result; both bare-metal and LAVA look for this string to get
# the result of our run, so try really hard to get it out rather than losing
# the run. The device gets shut down right at this point, and a630 seems to
# enjoy corrupting the last line of serial output before shutdown.
for _ in $(seq 0 3); do echo "hwci: mesa: $RESULT, exit_code: $EXIT_CODE"; sleep 1; echo; done
exit $EXIT_CODE
variables:
LAVA_SSH_CLIENT_IMAGE: "${CI_REGISTRY_IMAGE}/alpine/x86_64_lava_ssh_client:${ALPINE_X86_64_LAVA_SSH_TAG}--${MESA_TEMPLATES_COMMIT}"
.lava-test:
# Cancel job if a newer commit is pushed to the same branch
interruptible: true
# The jobs themselves shouldn't actually run for an hour, of course.
# Jobs are picked up greedily by a GitLab CI runner which is deliberately
# overprovisioned compared to the number of available devices. They are
# submitted to the LAVA co-ordinator with a job priority which gives
# pre-merge priority over everyone else. User-submitted and nightly jobs
# can thus spend ages just waiting around in a queue to be run at some
# point as they get pre-empted by other things.
# Non-queue time has strict timeouts for each stage, e.g. for downloading
# the artifacts, booting the device, device setup, running the tests, etc,
# which is handled by LAVA itself.
# So the only reason we should see anyone bouncing off this timeout is due
# to a lack of available devices to run the jobs.
timeout: 1h
variables:
GIT_STRATEGY: none # testing doesn't build anything from source
FDO_CI_CONCURRENT: 6 # should be replaced by per-machine definitions
# the dispatchers use this to cache data locally
LAVA_HTTP_CACHE_URI: "http://caching-proxy/cache/?uri="
# base system generated by the container build job, shared between many pipelines
BASE_SYSTEM_HOST_PREFIX: "${S3_HOST}/${S3_KERNEL_BUCKET}"
BASE_SYSTEM_MAINLINE_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${FDO_UPSTREAM_REPO}/${DISTRIBUTION_TAG}/${DEBIAN_ARCH}"
BASE_SYSTEM_FORK_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${CI_PROJECT_PATH}/${DISTRIBUTION_TAG}/${DEBIAN_ARCH}"
# per-job build artifacts
JOB_ROOTFS_OVERLAY_PATH: "${JOB_ARTIFACTS_BASE}/job-rootfs-overlay.tar.gz"
JOB_RESULTS_PATH: "${JOB_ARTIFACTS_BASE}/results.tar.zst"
LAVA_S3_ARTIFACT_NAME: "mesa-${ARCH}-default-debugoptimized"
S3_ARTIFACT_NAME: "mesa-python-ci-artifacts"
S3_RESULTS_UPLOAD: "${JOB_ARTIFACTS_BASE}"
VISIBILITY_GROUP: "Collabora+fdo"
STORAGE_MAINLINE_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${FDO_UPSTREAM_REPO}/${DATA_STORAGE_PATH}"
STORAGE_FORK_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${CI_PROJECT_PATH}/${DATA_STORAGE_PATH}"
script:
- . artifacts/setup-test-env.sh
- ./artifacts/lava/lava-submit.sh
artifacts:
name: "${CI_PROJECT_NAME}_${CI_JOB_NAME}"
when: always
paths:
- results/
reports:
junit: results/junit.xml
tags:
- $RUNNER_TAG
after_script:
- curl -L --retry 4 -f --retry-connrefused --retry-delay 30 -s "https://${JOB_RESULTS_PATH}" | tar --warning=no-timestamp --zstd -x
needs:
- job: alpine/x86_64_lava_ssh_client
artifacts: false
# - job: debian/x86_64_pyutils
# artifacts: false
# - job: python-artifacts
# artifacts: false
.lava-test:arm32:
variables:
ARCH: arm32
DEBIAN_ARCH: armhf
KERNEL_IMAGE_NAME: zImage
KERNEL_IMAGE_TYPE: "zimage"
BOOT_METHOD: u-boot
extends:
- .use-debian/arm64_build # for same $MESA_ARTIFACTS_TAG as in kernel+rootfs_arm32
# - .use-debian/x86_64_pyutils
- .lava-test
# - .use-kernel+rootfs-arm
needs:
- !reference [.lava-test, needs]
# - job: kernel+rootfs_arm32
# artifacts: false
- job: debian-arm32
artifacts: false
.lava-test:arm64:
variables:
ARCH: arm64
DEBIAN_ARCH: arm64
KERNEL_IMAGE_NAME: Image
KERNEL_IMAGE_TYPE: "image"
BOOT_METHOD: u-boot
extends:
- .use-debian/arm64_build
# - .use-debian/x86_64_pyutils
- .lava-test
# - .use-kernel+rootfs-arm
needs:
- !reference [.lava-test, needs]
# - job: kernel+rootfs_arm64
# artifacts: false
- job: debian-arm64
artifacts: false
.lava-test:x86_64:
variables:
ARCH: x86_64
DEBIAN_ARCH: amd64
KERNEL_IMAGE_NAME: bzImage
KERNEL_IMAGE_TYPE: "zimage"
BOOT_METHOD: u-boot
extends:
- .use-debian/x86_64_build
# - .use-debian/x86_64_pyutils
- .lava-test
# - .use-kernel+rootfs-x86_64
needs:
- !reference [.lava-test, needs]
# - job: kernel+rootfs_x86_64
# artifacts: false
# - job: debian-testing
# artifacts: false
#!/usr/bin/env bash
# SPDX-License-Identifier: MIT
# shellcheck disable=SC2086 # we want word splitting
# shellcheck disable=SC1091 # paths only become valid at runtime
# If we run in the fork (not from mesa or Marge-bot), reuse mainline kernel and rootfs, if exist.
_check_artifact_path() {
_url="https://${1}/${2}"
if curl -s -o /dev/null -I -L -f --retry 4 --retry-delay 15 "${_url}"; then
echo -n "${_url}"
fi
}
get_path_to_artifact() {
_mainline_artifact="$(_check_artifact_path ${BASE_SYSTEM_MAINLINE_HOST_PATH} ${1})"
if [ -n "${_mainline_artifact}" ]; then
echo -n "${_mainline_artifact}"
return
fi
_fork_artifact="$(_check_artifact_path ${BASE_SYSTEM_FORK_HOST_PATH} ${1})"
if [ -n "${_fork_artifact}" ]; then
echo -n "${_fork_artifact}"
return
fi
set +x
error "Sorry, I couldn't find a viable built path for ${1} in either mainline or a fork." >&2
echo "" >&2
echo "If you're working on CI, this probably means that you're missing a dependency:" >&2
echo "this job ran ahead of the job which was supposed to upload that artifact." >&2
echo "" >&2
echo "If you aren't working on CI, please ping @mesa/ci-helpers to see if we can help." >&2
echo "" >&2
echo "This job is going to fail, because I can't find the resources I need. Sorry." >&2
set -x
exit 1
}
. "${SCRIPTS_DIR}/setup-test-env.sh"
section_start prepare_rootfs "Preparing root filesystem"
set -ex
section_switch rootfs "Assembling root filesystem"
ROOTFS_URL="$(get_path_to_artifact lava-rootfs.tar.zst)"
[ $? != 1 ] || exit 1
#rm -rf results
#mkdir -p results/job-rootfs-overlay/
#artifacts/ci-common/export-gitlab-job-env-for-dut.sh \
# > results/job-rootfs-overlay/set-job-env-vars.sh
#cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
#cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
#s3_upload job-rootfs-overlay.tar.gz "https://${JOB_ARTIFACTS_BASE}"
# Prepare env vars for upload.
#section_switch variables "Environment variables passed through to device:"
#cat results/job-rootfs-overlay/set-job-env-vars.sh
#section_switch lava_submit "Submitting job for scheduling"
pip3 install --break-system-packages git+https://gitlab.freedesktop.org/vigneshraman/lava-job-submitter@mesa-sync-v1
touch results/lava.log
tail -f results/lava.log &
PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
--farm "${FARM}" \
--device-type "${DEVICE_TYPE}" \
--boot-method "${BOOT_METHOD}" \
--job-timeout-min $((CI_JOB_TIMEOUT/60 - 5)) \
--dump-yaml \
--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
--rootfs-url "${ROOTFS_URL}" \
--kernel-url-prefix "https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}" \
--kernel-external "${EXTERNAL_KERNEL_TAG}" \
--first-stage-init artifacts/ci-common/init-stage1.sh \
--dtb-filename "${DTB}" \
# --jwt-file "${S3_JWT_FILE}" \
--kernel-image-name "${KERNEL_IMAGE_NAME}" \
--kernel-image-type "${KERNEL_IMAGE_TYPE}" \
--visibility-group "${VISIBILITY_GROUP}" \
--lava-tags "${LAVA_TAGS}" \
--mesa-job-name "$CI_JOB_NAME" \
--structured-log-file "results/lava_job_detail.json" \
--ssh-client-image "${LAVA_SSH_CLIENT_IMAGE}" \
--project-name "${CI_PROJECT_NAME}" \
--starting-section "${CURRENT_SECTION}" \
--job-submitted-at "${CI_JOB_STARTED_AT}" \
- append-overlay \
--name=kernel-build \
--url="${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${DEBIAN_ARCH}/kernel-files.tar.zst" \
--compression=zstd \
--path="${CI_PROJECT_DIR}" \
--format=tar \
# - append-overlay \
# --name=job-overlay \
# --url="https://${JOB_ROOTFS_OVERLAY_PATH}" \
# --compression=gz \
# --path="/" \
# --format=tar \
- submit \
>> results/lava.log
......@@ -3,7 +3,9 @@
# Copyright (C) 2025 Collabora, Vignesh Raman <vignesh.raman@collabora.com>
vkms:none:
extends: build:x86_64
extends:
- build:x86_64
- .container+build-rules
stage: test
timeout: "1h30m"
variables:
......@@ -27,6 +29,73 @@ vkms:none:
- build:x86_64
- igt:x86_64
.lava-test:
timeout: "1h30m"
variables:
FARM: collabora
script:
# Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY
- rm -rf install
- tar -xf artifacts/install.tar
- mv -n install/* artifacts/.
# Override it with our lava-submit.sh script
- ./artifacts/lava-submit.sh
.lava-igt:arm32:
extends:
- .lava-test
- .container+build-rules
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "armhf"
needs:
- alpine/x86_64_lava_ssh_client
- build:arm32
- igt:arm32
.lava-igt:arm64:
extends:
- .lava-test
- .container+build-rules
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "arm64"
needs:
- alpine/x86_64_lava_ssh_client
- build:arm64
- igt:arm64
.lava-igt:x86_64:
extends:
- .lava-test
- .container+build-rules
variables:
HWCI_TEST_SCRIPT: "/install/igt_runner.sh"
DEBIAN_ARCH: "amd64"
needs:
- alpine/x86_64_lava_ssh_client
- build:x86_64
- igt:x86_64
.msm-sc7180:
extends:
- .lava-igt:arm64
stage: msm
variables:
DRIVER_NAME: msm
BOOT_METHOD: depthcharge
KERNEL_IMAGE_TYPE: ""
msm:sc7180-trogdor-lazor-limozeen:
extends:
- .msm-sc7180
parallel: 1
variables:
DEVICE_TYPE: sc7180-trogdor-lazor-limozeen
DTB: sc7180-trogdor-lazor-limozeen-nots-r5
GPU_VERSION: ${DEVICE_TYPE}
RUNNER_TAG: mesa-ci-x86-64-lava-sc7180-trogdor-lazor-limozeen
test-boot:
rules:
- when: never
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment