Commit c8bcfd1c authored by Sjoerd Simons's avatar Sjoerd Simons
Browse files

Merge branch 'wip-dockerize-ci' into 'collabora/next'

Dockerization CI and publishing of the lava-server image

See merge request !18
parents 38a37103 f33f043c
Pipeline #26169 passed with stages
in 25 minutes and 9 seconds
......@@ -17,6 +17,8 @@ bin
dist
django.log
doc/html
docker-compose.local.yaml
docker/collabora/lava-env-dispatcher
lava_server/settings/local_settings.py
tags
tmp*/
......
stages:
- test
- publish
test:
stage: test
image: docker
variables:
DOCKER_HOST: tcp://docker:2375
DOCKER_TLS_CERTDIR: ""
tags:
- aws
services:
- docker:dind
script:
- apk add --no-cache curl docker-compose
- docker info
- docker/collabora/ci-run
publish:
stage: publish
image:
name: gcr.io/kaniko-project/executor:debug-v0.16.0
entrypoint: [""]
tags:
- aws
script:
- |
cat << EOF > /kaniko/.docker/config.json
{
"auths":{
"$CI_REGISTRY": {
"username":"$CI_REGISTRY_USER",
"password":"$CI_REGISTRY_PASSWORD"
}
}
}
EOF
- >
/kaniko/executor
--context $CI_PROJECT_DIR
--dockerfile $CI_PROJECT_DIR/docker/collabora/Dockerfile
--destination $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
--build-arg REGISTRY=$CI_REGISTRY_IMAGE
--single-snapshot
- echo Pushed
FROM debian:bullseye-slim AS build
FROM debian:bullseye-slim AS lava-build
ENV DEBIAN_FRONTEND noninteractive
......@@ -13,7 +13,7 @@ RUN apt update && apt install -y --no-install-recommends devscripts dpkg-dev xz-
FROM debian:bullseye-slim AS server
COPY --from=build /lava-server*.deb /lava-common*.deb /
COPY --from=lava-build /lava-server*.deb /lava-common*.deb /
COPY docker/collabora/start-lava.sh /
# LAVA won't like not finding some variables when it imports its configuration.
......
FROM debian:bullseye-slim
ENV DEBIAN_FRONTEND noninteractive
COPY . /lava
RUN apt update && apt install -y --no-install-recommends devscripts dpkg-dev xz-utils && \
cd /lava && \
export LAVA_VERSION=$(dpkg-parsechangelog -S Version | cut -d '-' -f 1) && \
tar --xform "s,^\.,lava-${LAVA_VERSION}," -Jcpf ../lava_${LAVA_VERSION}.orig.tar.xz . && \
apt-get -o Debug::pkgProblemResolver=yes --no-install-recommends --yes build-dep . && \
debuild --no-sign
FROM debian:bullseye-slim AS dispatcher
COPY --from=collabora-lava-build /lava-dispatcher*.deb /lava-common*.deb /
COPY docker/collabora/start-worker.sh /
RUN apt update && \
apt install -y git /*.deb && \
rm /*deb
CMD "/start-worker.sh"
FROM debian:bullseye-slim
ENV DEBIAN_FRONTEND noninteractive
COPY docker/collabora/qtest.jinja2 /
COPY docker/collabora/test-job.yaml /
COPY docker/collabora/test-health-check.yaml /
RUN apt update && apt install -y --no-install-recommends lavacli
# Dockerized LAVA
The [Dockerfile](Dockerfile) in this directory creates an image based on the
LAVA Debian packaging that exists in the repository for lava-server. You can
use it using regular docker commands and an environment file with at least the
following variables:
```
INSTANCE_NAME=<arbitrary name>
DATABASE_URL=<database>://<user>:<password>@<host>:<port>/<database>
SECRET_KEY=<django's secret key>
ALLOWED_HOSTS=<hosts that can connect to the django server>
```
All of these are regular Django settings that would go into settings.py
`or local_settings.py except for DATABASE_URL, which is specific to the
django-environ library. The reason we use that instead of having variables
for each piece of the database configuration is django-environ handles
corner cases like using sqlite's memory backend for us.
Find more information on the format and database-specific quirks in
[django-environ's documentation](https://django-environ.readthedocs.io/en/latest/).
In addition to lava-server there are also Dockerfile setups for a sinmple
packages build [Dockerfile.build](Dockerfile.build), a dispatcher/worker
[Dockerfile.dispatcher](Dockerfile.dispatcher), which depends on the image from
the packages build, and one for providing the `lavacli` command line utility
[Dockerfile.lavacli](Dockerfile.lavacli).
To use the dispatcher/worker image you can specify the following
variables:
```
WORKER_NAME=--name <name>
URL=--url http://<host>:<port>/
WS_URL=--ws-url http://<host>:<ws-port>/ws/
LOGLEVEL=--log-level <level>
TOKEN=--token <token>
```
They are basically the same you will need to set up in the configuration
file if you use the package. Note that lava-server opens two ports, which
by default are 8000 and 8001, the first one is for `URL` and the second
is the web socket port, `WS_URL`.
## Using docker-compose
A [basic docker-compose file](docker-compose.yaml) exists that can serve as a
basis for your own setup. It specifies 2 volumes, `lavaconfig` and `lavadbdata`
to be used for mounting configuration and database directories respectively.
The actual volumes creted and used by docker-compose will get a prefix based on
the directory that hosts the compose file.
For instance, if you simply use the files provided in this repository to start
your lava-server instance the volumes will have a `collabora_` prefix:
```
kov@cereja ~/L/lava> docker volume ls
DRIVER VOLUME NAME
local collabora_lavaconfig
local collabora_lavadbdata
kov@cereja ~/L/lava>
```
It is possible to simply use this base compose file along with a minimal local
override, as long as you have the PostgreSQL server set up with the appropriate
user and database created. First create the override file at the root of the
repository to specify the variables:
```
$ cat docker-compose.local.yaml
version: "3"
services:
lava:
environment:
- INSTANCE_NAME=collabora
- DATABASE_URL=postgresql://lavaserver:lavapass@db:5432/lavaserver
- SECRET_KEY=a-ib@8x^c!-n=xf!831@ka$0&x2h8k%ml+is_x!+hn&kcjklt)
- ALLOWED_HOSTS=*
$
```
You can then fire it up from the root of the repository by runnng
docker-compose like this:
```
$ docker-compose -f docker/collabora/docker-compose.yaml -f docker-compose.local.yaml up
```
### Ports and reverse-proxying
Running the default compose service will open 2 new ports on your localhost:
`8000` is the main LAVA server instance. It is where you should point your main
reverse proxy to. A second port, `8001` is also opened and is for the web
socket used by the dispatcher/worker.
This is what an apache2 mod-proxy configuration would look like:
```
# Send web socket requests to lava-publisher
ProxyPass /ws/ ws://127.0.0.1:8001/ws/
ProxyPassReverse /ws/ ws://127.0.0.1:8001/ws/
# Send request to Gunicorn
ProxyPass / http://127.0.0.1:8000/
ProxyPassReverse / http://127.0.0.1:8000/
ProxyPreserveHost On
```
### Dispatcher configuration and tokens
When setting up the worker, if you are using a gateway like Apache or Nginx you
don't need to worry about the two different ports as they will all be available
through the same one. If you set up a worker talking directly to the
containers, though, don't forget that there are two separate ports.
The key settings are `URL`, which should point to the server URL, and `WS_URL`
that should point to the server URL with `/ws/` appended at the end. This
suffix is crucial for it to work.
The final key setting is `TOKEN`, which you obtain when you create the worker
on the LAVA server. If you use the Django admin web interface, you will see the
token at the bottom of the page. You can just copy it out of there and put it
in your configuration file.
The token is also shown when you use the `lava-server manage` command line
tool to create the worker on the server, of when querying worker details:
```
# lava-server manage workers details ci.worker
hostname : my.worker
state : Offline
health : Active
description:
token : kfiq2vPsNqH7Pj22rMCQYN0lLDeV7nMe
devices : 1
#
```
A worker configuration for the worker above may look something like this:
```
$ cat /etc/lava-dispatcher/lava-worker
# Configuration for lava-worker daemon
# worker name
# Should be set for host that have random hostname (containers, ...)
# The name can be any unique string.
WORKER_NAME="--name my.worker"
# Logging level should be uppercase (DEBUG, INFO, WARN, ERROR)
# LOGLEVEL="DEBUG"
# Server connection
URL="http://lava.internal/"
TOKEN="--token kfiq2vPsNqH7Pj22rMCQYN0lLDeV7nMe"
WS_URL="--ws-url http://lava.internal/ws/"
# HTTP_TIMEOUT="--http-timeout 600"
$
```
Note that `WORKER_NAME` may be very important here as well. If your worker does
not have an FQDN or if you set a name other than the server's FQDN when
creating it on server, then you need to set this to match.
### Dockerized database
If you prefer to not have to deal with the database yourself, you can use the
sample [docker-compose.server-db.yaml](docker-compose.server-db.yaml) override.
You can use it where it is, and it will use the sample database configuration.
Or, if you prefer setting your own username, password, secret key and so on,
you should copy the file over to the root of the repository renaming it to
`docker-compose.local.yaml` so you can use the same command as above, and edit
it to your hearts' content.
If you do prefer to use the provided sample configuration, you can simply run
it like this:
```
$ docker-compose -f docker/collabora/docker-compose.yaml -f docker/collabora/docker-compose.server-db.yaml up
```
### Dockerized dispatcher/worker
We recommend running the worker directly on a host using the Debian package.
The main reason for that is giving access to all of the resources needed to
properly control the boards may be tricky, while at the same time the worker is
small and doesn't have a lot of configuration or state.
If you do prefer to run the worker on docker you can use the
[Dockerfile.dispatcher](Dockerfile.dispatcher) file after building the base
build image from [Dockerfile.build](Dockerfile.build).
### Dockerize all the things!
In the repository you can find
[docker-compose.ci.yaml](docker-compose.ci.yaml), used for our CI test run but
which can be used as a docker-compose override to run everything as a container
on the same host. If you intend to use that you will benefit from reading
through the scripts used by the CI, since you first need to set up a worker on
the server before the dispatcher service will work, for instance.
A good place to start is the [ci-run](ci-run) script, which calls out to the
other scripts that build, set up and start the various services in order.
#!/bin/sh
set -e
SCRIPT_DIR=$(dirname $0)
SKIP_BUILD=0
SKIP_CLEANUP=0
while [ ! -z $1 ]; do
case "$1" in
"skip-build")
SKIP_BUILD=1
;;
"skip-cleanup")
SKIP_CLEANUP=1
;;
*)
;;
esac
shift;
done
if [ $SKIP_BUILD = 0 ]; then
. "$SCRIPT_DIR/scripts/build-images"
fi
. "$SCRIPT_DIR/scripts/start-lava"
. "$SCRIPT_DIR/scripts/test-lava"
if [ $SKIP_CLEANUP = 0 ]; then
. "$SCRIPT_DIR/scripts/cleanup"
fi
version: "3"
services:
lava:
depends_on:
- db
env_file: lava-env-dockerdb
ports:
- "8000:8000"
- "8001:8001"
volumes:
- "lavaconfig:/etc/lava-server"
db:
image: postgres:latest
restart: always
environment:
- POSTGRES_USER=lavaserver
- POSTGRES_PASSWORD=lavapass
volumes:
- "lavadbdata:/var/lib/postgresql/data"
lava-dispatcher:
image: collabora-lava-dispatcher:latest
build:
context: ../..
dockerfile: docker/collabora/Dockerfile.dispatcher
depends_on:
- lava
env_file: lava-env-dispatcher
version: "3"
services:
lava:
depends_on:
- db
env_file: lava-env-dockerdb
db:
image: postgres:latest
restart: always
environment:
- POSTGRES_USER=lavaserver
- POSTGRES_PASSWORD=lavapass
volumes:
- "lavadbdata:/var/lib/postgresql/data"
version: "3"
services:
lava:
image: collabora-lava:latest
build:
context: ../..
dockerfile: docker/collabora/Dockerfile
ports:
- "8000:8000"
- "8001:8001"
volumes:
- "lavaconfig:/etc/lava-server"
volumes:
lavaconfig:
lavadbdata:
WORKER_NAME=--name ci.worker
URL=--url http://lava:8000/
WS_URL=--ws-url http://lava:8001/ws/
LOGLEVEL=--log-level DEBUG
TOKEN=--token @@TOKEN@@
INSTANCE_NAME=collabora
DATABASE_URL=postgresql://lavaserver:lavapass@db:5432/lavaserver
SECRET_KEY=a-ib@8x^c!-n=xf!831@ka$0&x2h8k%ml+is_x!+hn&kcjklt)
ALLOWED_HOSTS=*
{% extends 'qemu.jinja2' %}
{% set mac_addr = '52:54:00:12:34:59' %}
{% set memory = '1024' %}
#!/bin/sh
set -e
if [ ! -f docker/collabora/Dockerfile.build ]; then
echo 'You need to run this script from the root of the repository.'
exit 1
fi
# Image builds
docker build -t collabora-lava-build:latest -f docker/collabora/Dockerfile.build .
docker build -t collabora-lava:latest -f docker/collabora/Dockerfile .
docker build -t collabora-lava-dispatcher:latest -f docker/collabora/Dockerfile.dispatcher .
docker build -t collabora-lavacli:latest -f docker/collabora/Dockerfile.lavacli .
#!/bin/sh
set -e
if [ ! -f docker/collabora/Dockerfile.build ]; then
echo 'You need to run this script from the root of the repository.'
exit 1
fi
docker volume rm -f lavacliconfig || true
docker-compose -f docker/collabora/docker-compose.yaml -f docker/collabora/docker-compose.ci.yaml down
docker volume rm -f lava_lavaconfig lava_lavadbdata || true
docker rmi collabora-lava-build:latest
docker rmi collabora-lava:latest
docker rmi collabora-lava-dispatcher:latest
docker rmi collabora-lavacli:latest
docker rmi debian:bullseye-slim
docker rmi postgres:latest
docker rmi curlimages/curl
docker image prune -f
#!/bin/sh
set -e
if [ ! -f docker/collabora/Dockerfile.build ]; then
echo 'You need to run this script from the root of the repository.'
exit 1
fi
COMPOSE="docker-compose -f docker/collabora/docker-compose.yaml -f docker/collabora/docker-compose.ci.yaml"
# docker-compose expects to find this file, even for the 'down' action, make sure we
# have something there.
test -f docker/collabora/lava-env-dispatcher || \
cp docker/collabora/lava-env-dispatcher.in \
docker/collabora/lava-env-dispatcher
# Make sure our services are down and empty
$COMPOSE down
docker volume rm -f collabora_lavadbdata || true
# Start the LAVA container and wait for it to reply something that makes sense, meaning
# it actually started gunicorn.
$COMPOSE up --detach lava
# Keep track of LAVA's startup
docker logs -f collabora_lava_1 &
# We use docker to check if LAVA has finished starting since we can
# plug directly into the collabora_default network, and avoid any
# issues with docker-in-docker not exposing the porti on localhost.
docker pull curlimages/curl
CURL="docker run --rm --network collabora_default curlimages/curl"
echo 'Waiting on LAVA to start...'
TIMEOUT=30
while ! $CURL -s http://lava:8000/ 2>&1 > /dev/null; do
if [ $TIMEOUT = 0 ]; then
docker ps
exit 1
fi
TIMEOUT=$(($TIMEOUT - 1))
sleep 1
done
DOCKER="docker exec collabora_lava_1"
# Pre-add user, device type, device, and device dictionary
$DOCKER lava-server manage users add lava --passwd lava
$DOCKER lava-server manage authorize_superuser --username lava
$DOCKER lava-server manage device-types add qemu
# Add the worker and get the token, so we can then start the dispatcher service
TOKEN=$($DOCKER lava-server manage workers add ci.worker | cut -d ' ' -f 2)
sed s/@@TOKEN@@/$TOKEN/ docker/collabora/lava-env-dispatcher.in > docker/collabora/lava-env-dispatcher
$COMPOSE up --detach lava-dispatcher
# Add the device
$DOCKER lava-server manage devices add --device-type qemu --worker ci.worker qtest
#!/bin/sh
set -e
if [ ! -f docker/collabora/Dockerfile.build ]; then
echo 'You need to run this script from the root of the repository.'
exit 1
fi
DOCKER="docker run --rm --network collabora_default -v lavacliconfi:/root/.config collabora-lavacli:latest"
# Set up lavacli to add the health check, device dictionary and submit the job
TOKEN=$(docker exec collabora_lava_1 lava-server manage tokens add --user lava)
docker volume create lavacliconfig
$DOCKER lavacli identities add --token "${TOKEN}" --uri http://lava:8000/RPC2 --username lava default
$DOCKER lavacli devices dict set qtest /qtest.jinja2
# Now run a regular job
JOB_ID=$($DOCKER lavacli jobs submit /test-job.yaml)
echo "Job $JOB_ID started..."
# Set and run our health check
$DOCKER lavacli device-types health-check set qemu /test-health-check.yaml
docker exec collabora_lava_1 lava-server manage devices check qtest
echo "Health check submitted..."
check_job_result() {
JOB_ID=$1
$DOCKER lavacli jobs wait $JOB_ID
$DOCKER lavacli jobs show $JOB_ID
JOB_RESULT=$($DOCKER lavacli jobs show $JOB_ID | grep '^Health' | awk '{ print $3 }')
if [ "$JOB_RESULT" = "Complete" ]; then
echo "Job $JOB_ID succeeded."
else
echo "Job $JOB_ID failed!"
fi
}
check_job_result 1
check_job_result 2
#!/bin/sh -e
#!/bin/sh
set -e
DATABASE=`python3 -c 'import os; from urllib.parse import urlparse; url = urlparse(os.environ["DATABASE_URL"]); print("{}:{}".format(url.hostname, url.port), end="")'`
......
#!/bin/sh
exec /usr/bin/lava-worker $WORKER_NAME $URL $WS_URL $TOKEN $LOG_LEVEL $HTTP_TIMEOUT
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment