MBS-Koji integration tests

This change introduces a set of Jenkins pipelines for building MBS
images and running integration tests against Koji using those images.
These pipelines are directly based on the WaiverDB pipeline work:

https://pagure.io/waiverdb/blob/master/f/openshift

The results of those tests are used to provide feedback to Pagure PRs
and to promote images through a series of environments, which may be
used to implement a continuous deployment process.

The current test cases, written in Groovy, are:
 - module-build-init: initate a module build and check that tags
   and targets in Koji are created correctly
 - module-build-cgimport: build an empty module and ensure that
   results are imported correctly into Koji, using the CGImport
   interface
This commit is contained in:
Mike Bonnet
2019-04-26 12:34:02 -07:00
parent 1e423826c6
commit 9c3d9bb441
59 changed files with 3057 additions and 52 deletions

0
docker/test-py3.sh Normal file → Executable file
View File

View File

@@ -6,22 +6,14 @@ Deploy MBS to OpenShift
```bash
$ docker build openshift/backend \
--tag mbs-backend:latest \
--build-arg mbs_rpm=<MBS_RPM> \
--build-arg mbs_messaging_umb_rpm=<MBS_MESSAGING_UMB_RPM> \
--build-arg umb_ca_crt=<UMB_CA_CRT>
--build-arg EXTRA_RPMS=<MBS_MESSAGING_UMB_RPM>
```
where:
* MBS_RPM is a path or URL to the Module Build Service RPM. If not specified,
MBS [provided by
Fedora](https://apps.fedoraproject.org/packages/module-build-service) will be
installed in the image.
* MBS_MESSAGING_UMB_RPM is a path or URL to the [UMB Messaging
Plugin](https://github.com/release-engineering/mbs-messaging-umb) RPM. If not
provided, only `fedmsg` and `in_memory` will be available for messaging in the
image.
* UMB_CA_CRT is a path or URL to the CA certificate of the message bus to be
used by MBS.
## Build the container image for MBS frontend

View File

@@ -1,28 +1,57 @@
FROM fedora:28
FROM fedora:29 AS builder
ARG EXTRA_RPMS=""
ARG GIT_REPO=""
ARG GIT_REF=""
ARG VERSION=""
ARG CREATED=""
ARG DNF_CMD="dnf -y --setopt=deltarpm=0 --setopt=install_weak_deps=false --setopt=tsflags=nodocs"
COPY . /src
WORKDIR /src
RUN ${DNF_CMD} install \
'dnf-command(builddep)' rpm-build rpmdevtools rpmlint \
python3-tox python3-pytest python3-mock python3-flake8 bandit && \
${DNF_CMD} builddep *.spec && \
${DNF_CMD} clean all
RUN rpmdev-setuptree && \
python3 setup.py sdist && \
rpmbuild --define "_sourcedir $PWD/dist" -ba *.spec && \
mv $HOME/rpmbuild/RPMS /srv
RUN flake8 && \
bandit -r -ll -s B102,B303,B411,B602 module_build_service && \
tox -v -e py3
FROM fedora:29
LABEL \
name="Backend for the Module Build Service (MBS)" \
vendor="The Factory 2.0 Team" \
license="MIT" \
description="The MBS coordinates module builds. This image is to serve as the MBS backend." \
usage="https://pagure.io/fm-orchestrator" \
build-date=""
org.opencontainers.image.title="Backend for the Module Build Service (MBS)" \
org.opencontainers.image.description="The MBS coordinates module builds. This image is to serve as the MBS backend." \
org.opencontainers.image.vendor="The Factory 2.0 Team" \
org.opencontainers.image.authors="The Factory 2.0 Team <pnt-factory2-devel@redhat.com>" \
org.opencontainers.image.licenses="MIT" \
org.opencontainers.image.source="$GIT_REPO" \
org.opencontainers.image.revision="$GIT_REF" \
org.opencontainers.image.version="$VERSION" \
org.opencontainers.image.created="$CREATED" \
org.opencontainers.image.url="https://pagure.io/fm-orchestrator" \
org.opencontainers.image.documentation="https://pagure.io/fm-orchestrator" \
distribution-scope="public"
# The caller can chose to provide an already built module-build-service RPM.
ARG mbs_rpm=module-build-service
ARG mbs_messaging_umb_rpm
ARG umb_ca_crt
COPY --from=builder /srv/RPMS /srv/RPMS
COPY repos/ /etc/yum.repos.d/
RUN dnf -y install \
python2-pungi \
python2-psycopg2 \
https://dl.fedoraproject.org/pub/epel/7Server/x86_64/Packages/s/stomppy-3.1.6-3.el7.noarch.rpm \
$mbs_rpm \
$mbs_messaging_umb_rpm \
&& dnf -y clean all
RUN $DNF_CMD install \
python3-psycopg2 \
python3-docopt \
python3-service-identity \
/srv/*/*/*.rpm \
$EXTRA_RPMS && \
$DNF_CMD clean all && \
rm -rf /srv/RPMS
ADD $umb_ca_crt /etc/pki/ca-trust/source/anchors/umb_serverca.crt
# Do this as a workaround instead of `update-ca-trust`
RUN cat /etc/pki/ca-trust/source/anchors/umb_serverca.crt >> /etc/pki/tls/certs/ca-bundle.crt
USER 1001
VOLUME ["/etc/module-build-service", "/etc/fedmsg.d", "/etc/mbs-certs"]
ENTRYPOINT fedmsg-hub
ENTRYPOINT ["fedmsg-hub-3"]

View File

@@ -0,0 +1,91 @@
# Template to produce a new BuildConfig and ImageStream for MBS backend image builds.
---
apiVersion: v1
kind: Template
metadata:
name: mbs-backend-build-template
labels:
template: mbs-backend-build-template
parameters:
- name: NAME
displayName: Short unique identifier for the templated instances.
required: true
value: mbs-backend
- name: MBS_GIT_REPO
displayName: MBS Git repo URL
description: Default MBS Git repo URL in which to run dev tests against
required: true
value: https://pagure.io/fm-orchestrator.git
- name: MBS_GIT_REF
displayName: MBS Git repo ref
description: Default MBS Git repo ref in which to run dev tests against
required: true
value: master
- name: MBS_BACKEND_IMAGESTREAM_NAME
displayName: ImageStream name of the resulting image
required: true
value: mbs-backend
- name: MBS_BACKEND_IMAGESTREAM_NAMESPACE
displayName: Namespace of ImageStream for MBS images
required: false
- name: MBS_IMAGE_TAG
displayName: Tag of resulting image
required: true
value: latest
- name: EXTRA_RPMS
displayName: Names of extra rpms to install
required: false
value: ""
- name: CREATED
displayName: Creation date
description: The date and time the image was built, in RFC 3339 format
required: false
value: ""
objects:
- apiVersion: v1
kind: ImageStream
metadata:
name: "${MBS_BACKEND_IMAGESTREAM_NAME}"
labels:
app: "${NAME}"
- kind: "BuildConfig"
apiVersion: "v1"
metadata:
name: "${NAME}"
labels:
app: "${NAME}"
spec:
runPolicy: "Parallel"
completionDeadlineSeconds: 1800
strategy:
dockerStrategy:
forcePull: true
dockerfilePath: openshift/backend/Dockerfile
buildArgs:
- name: EXTRA_RPMS
value: "${EXTRA_RPMS}"
- name: GIT_REPO
value: "${MBS_GIT_REPO}"
- name: GIT_REF
value: "${MBS_GIT_REF}"
- name: VERSION
value: "${MBS_IMAGE_TAG}"
- name: CREATED
value: "${CREATED}"
resources:
requests:
memory: "768Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "800m"
source:
git:
uri: "${MBS_GIT_REPO}"
ref: "${MBS_GIT_REF}"
output:
to:
kind: "ImageStreamTag"
name: "${MBS_BACKEND_IMAGESTREAM_NAME}:${MBS_IMAGE_TAG}"
namespace: "${MBS_BACKEND_IMAGESTREAM_NAMESPACE}"

View File

@@ -1,27 +1,54 @@
# See `../backend/` for building `mbs-backend:latest`
FROM mbs-backend:latest
ARG GIT_REPO=""
ARG GIT_REF=""
ARG VERSION=""
ARG CREATED=""
ARG DNF_CMD="dnf -y --setopt=deltarpm=0 --setopt=install_weak_deps=false --setopt=tsflags=nodocs"
LABEL \
name="Frontend for the Module Build Service (MBS)" \
vendor="The Factory 2.0 Team" \
license="MIT" \
description="The MBS coordinates module builds. This image is to serve as the MBS frontend." \
usage="https://pagure.io/fm-orchestrator" \
build-date=""
org.opencontainers.image.title="Frontend for the Module Build Service (MBS)" \
org.opencontainers.image.description="The MBS coordinates module builds. This image is to serve as the MBS frontend." \
org.opencontainers.image.vendor="The Factory 2.0 Team" \
org.opencontainers.image.authors="The Factory 2.0 Team <pnt-factory2-devel@redhat.com>" \
org.opencontainers.image.licenses="MIT" \
org.opencontainers.image.source="$GIT_REPO" \
org.opencontainers.image.revision="$GIT_REF" \
org.opencontainers.image.version="$VERSION" \
org.opencontainers.image.created="$CREATED" \
org.opencontainers.image.url="https://pagure.io/fm-orchestrator" \
org.opencontainers.image.documentation="https://pagure.io/fm-orchestrator" \
io.openshift.expose-services="8080:http,8443:https" \
distribution-scope="public"
RUN dnf -y install \
httpd \
mod_wsgi \
&& dnf -y clean all
USER root
RUN $DNF_CMD install \
nss_wrapper httpd mod_ssl python3-mod_wsgi && \
$DNF_CMD clean all
RUN chmod a+rwx /run/httpd && \
sed -i -r -e 's!Listen 80!Listen 8080!' \
-e 's!^User apache!User default!' \
-e 's!^Group apache!Group root!' \
-e 's!^(\s*CustomLog)\s+\S+!\1 /proc/self/fd/1!' \
-e 's!^(\s*ErrorLog)\s+\S+!\1 /proc/self/fd/2!' \
/etc/httpd/conf/httpd.conf && \
sed -i -r -e 's!Listen 443!Listen 8443!' \
-e 's!_default_:443!_default_:8443!' \
-e 's!^(\s*CustomLog)\s+\S+!\1 /proc/self/fd/1!' \
-e 's!^(\s*TransferLog)\s+\S+!\1 /proc/self/fd/1!' \
-e 's!^(\s*ErrorLog)\s+\S+!\1 /proc/self/fd/2!' \
-e 's!^(SSLCertificateFile)\s+\S+!\1 /etc/mbs-certs/frontend.crt!' \
-e 's!^(SSLCertificateKeyFile)\s+\S+!\1 /etc/mbs-certs/frontend.key!' \
-e 's!^#(SSLCertificateChainFile)\s+\S+!\1 /etc/mbs-certs/frontendca.crt!' \
/etc/httpd/conf.d/ssl.conf
COPY openshift/frontend/run-httpd /usr/bin
USER 1001
EXPOSE 8080/tcp 8443/tcp
VOLUME ["/etc/module-build-service", "/etc/fedmsg.d", "/etc/mbs-certs", "/etc/httpd/conf.d"]
ENTRYPOINT ["mod_wsgi-express", "start-server", "/usr/share/mbs/mbs.wsgi"]
CMD [\
"--user", "fedmsg", "--group", "fedmsg", \
"--port", "8080", "--threads", "1", \
"--include-file", "/etc/httpd/conf.d/mbs.conf", \
"--log-level", "info", \
"--log-to-terminal", \
"--access-log", \
"--startup-log" \
]
EXPOSE 8080/tcp 8443/tcp
ENTRYPOINT ["/usr/bin/run-httpd"]

View File

@@ -0,0 +1,96 @@
# Template to produce a new BuildConfig and ImageStream for MBS frontend image builds.
---
apiVersion: v1
kind: Template
metadata:
name: mbs-frontend-build-template
labels:
template: mbs-frontend-build-template
parameters:
- name: NAME
displayName: Short unique identifier for the templated instances.
required: true
value: mbs-frontend
- name: MBS_GIT_REPO
displayName: MBS Git repo URL
description: Default MBS Git repo URL in which to run dev tests against
required: true
value: https://pagure.io/fm-orchestrator.git
- name: MBS_GIT_REF
displayName: MBS Git repo ref
description: Default MBS Git repo ref in which to run dev tests against
required: true
value: master
- name: MBS_FRONTEND_IMAGESTREAM_NAME
displayName: ImageStream name of the resulting image
required: true
value: mbs-frontend
- name: MBS_FRONTEND_IMAGESTREAM_NAMESPACE
displayName: Namespace of ImageStream for MBS images
required: false
- name: MBS_IMAGE_TAG
displayName: Tag of resulting image
required: true
value: latest
- name: MBS_BACKEND_IMAGESTREAM_NAME
displayName: ImageStream name of the MBS backend image
required: true
value: mbs-frontend
- name: MBS_BACKEND_IMAGESTREAM_NAMESPACE
displayName: Namespace of ImageStream for MBS backend image
required: false
- name: CREATED
displayName: Creation date
description: The date and time the image was built, in RFC 3339 format
required: false
value: ""
objects:
- apiVersion: v1
kind: ImageStream
metadata:
name: "${MBS_FRONTEND_IMAGESTREAM_NAME}"
labels:
app: "${NAME}"
- kind: "BuildConfig"
apiVersion: "v1"
metadata:
name: "${NAME}"
labels:
app: "${NAME}"
spec:
runPolicy: "Parallel"
completionDeadlineSeconds: 1800
strategy:
dockerStrategy:
forcePull: true
dockerfilePath: openshift/frontend/Dockerfile
buildArgs:
- name: GIT_REPO
value: "${MBS_GIT_REPO}"
- name: GIT_REF
value: "${MBS_GIT_REF}"
- name: VERSION
value: "${MBS_IMAGE_TAG}"
- name: CREATED
value: "${CREATED}"
from:
kind: ImageStreamTag
name: "${MBS_BACKEND_IMAGESTREAM_NAME}:${MBS_IMAGE_TAG}"
namespace: "${MBS_BACKEND_IMAGESTREAM_NAMESPACE}"
resources:
requests:
memory: "768Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "800m"
source:
git:
uri: "${MBS_GIT_REPO}"
ref: "${MBS_GIT_REF}"
output:
to:
kind: "ImageStreamTag"
name: "${MBS_FRONTEND_IMAGESTREAM_NAME}:${MBS_IMAGE_TAG}"
namespace: "${MBS_FRONTEND_IMAGESTREAM_NAMESPACE}"

17
openshift/frontend/run-httpd Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
set -eu
export USER_ID=$(id -u)
export GROUP_ID=$(id -g)
cp /etc/passwd /tmp/passwd
cat >> /tmp/passwd <<EOF
default:x:${USER_ID}:${GROUP_ID}:Default Application User:${HOME}:/sbin/nologin
EOF
export LD_PRELOAD=libnss_wrapper.so
export NSS_WRAPPER_PASSWD=/tmp/passwd
export NSS_WRAPPER_GROUP=/etc/group
exec httpd -D FOREGROUND $@

View File

@@ -0,0 +1,69 @@
MBS-Koji integration tests
==========================
### Background
This directory contains a set of Jenkins pipelines for building MBS container images and running integration tests between MBS and Koji. These are based on the [WaiverDB](https://pagure.io/waiverdb) [pipeline](https://pagure.io/waiverdb/blob/master/f/openshift) structure. Please see the extensive documentation there for information on the pipeline layout and workflows.
### Getting started
#### Deploying a Jenkins master
Before you can run these pipelines you need to have a Jenkins master configured to communicate with an OpenShift project. The simplest way to do this is to run your Jenkins master in your OpenShift project. The first time you create a `BuildConfig` with `strategy.type: JenkinsPipeline`, OpenShift will deploy a Jenkins master using the default settings. However, these pipelines will not run using the default settings. It is recommended that before creating any `BuildConfigs`, you setup your Jenkins master using:
```bash
make -C openshift/integration/koji/pipelines install-jenkins
```
This will deploy and configure your Jenkins master with the required set of plugins. **Note:** The Jenkins master will be configured to disable script security. Be very careful when running untrusted code, as scripts will have full access to your Jenkins environment. If you don't wish to disable script security, you may edit `openshift/integration/koji/pipelines/Makefile` to change that setting. You will need to allow scripts to access a number of Groovy/Java APIs before the pipelines will run successfully.
#### Configuring the pipelines
To load the pipelines into OpenShift (and Jenkins) run:
```bash
make -C openshift/integration/koji/pipelines install
```
This will create all the objects required for running the pipelines.
#### Configuring secrets for pushing images
If you're going to be pushing your images anywhere other than the OpenShift internal registry, you'll need to configure secrets which give you permission to push to that registry.
- Go to your registry dashboard and create a robot account.
- Backup your docker-config-json file (`$HOME/.docker/config.json`) if present.
- Run `docker login` with the robot account you just created to produce a new docker-config-json file.
- Create a new [OpenShift secret for a private registry] named `factory2-pipeline-registry-credentials` from your docker-config-json file:
```bash
oc create secret generic factory2-pipeline-registry-credentials \
--from-file=.dockerconfigjson="$HOME/.docker/config.json" \
--type=kubernetes.io/dockerconfigjson
```
#### Configuring a Pagure API key
If you would like the pipelines to provide feedback on PRs and commits, you need to configure a Pagure API key.
- Go to your Pagure repository settings, and locate to the 'API Keys' section.
- Click on the `Create new key` button to add new API key with the `Flag a pull-request`, `Comment on a pull-request`, and `Flag a commit` permissions.
- Add your newly-created API key to OpenShift:
```bash
make -C openshift/integration/koji/pipelines update-pagure-api-key KEY=<value from Pagure>
```
#### Building a Jenkins slave image
Before you can run the pipelines, you need an image to use as the Jenkins slave. This step should be repeated any time the `Dockerfile` (`openshift/integration/koji/containers/jenkins-slave/Dockerfile`) for the Jenkins slaves is updated.
```bash
oc start-build mbs-premerge-jenkins-slave
```
**Note:** The `mbs-premerge-jenkins-slave` and `mbs-postmerge-jenkins-slave` jobs produce the same output. Either may be used.
#### Setting up Jenkins jobs
##### Polling
If you want the `premerge` and `postmerge` jobs to be triggered automatically based on SCM changes, you need to run the following jobs once manually, so Jenkins initates polling:
- mbs-polling-for-prs
- mbs-polling-for-master
##### Message bus
**Note:** This requires the `Red Hat CI Plugin` to be installed.
If you're using message bus integration to enable automatic triggering of test jobs and promotion, you need to run the trigger jobs once manually so Jenkins can setup required message consumers. The following jobs should be triggered manually:
- mbs-trigger-on-latest-tag
- mbs-trigger-on-stage-tag
- mbs-backend-greenwave-promote-to-stage
- mbs-backend-greenwave-promote-to-prod
- mbs-frontend-greenwave-promote-to-stage
- mbs-frontend-greenwave-promote-to-prod
[OpenShift secret for a private registry]: https://docs.openshift.com/container-platform/3.11/dev_guide/builds/build_inputs.html#using-docker-credentials-for-private-registries

View File

@@ -0,0 +1,78 @@
# Based on the rad-jenkins image, which is in turn based on:
# https://github.com/jenkinsci/docker-jnlp-slave/blob/master/Dockerfile
# https://github.com/jenkinsci/docker-slave/blob/master/Dockerfile
FROM fedora:29
LABEL \
org.opencontainers.image.title="Jenkins slave for Module Build Service (MBS) pipelines" \
org.opencontainers.image.description="The MBS coordinates module builds. This image is to serve as the slave for executing build and test pipelines." \
org.opencontainers.image.vendor="The Factory 2.0 Team" \
org.opencontainers.image.authors="The Factory 2.0 Team <pnt-factory2-devel@redhat.com>" \
org.opencontainers.image.licenses="GPLv2+" \
org.opencontainers.image.url="https://pagure.io/fm-orchestrator" \
org.opencontainers.image.documentation="https://pagure.io/fm-orchestrator" \
distribution-scope="private"
ARG USER=jenkins
ARG UID=10000
ARG HOME_DIR=/var/lib/jenkins
ARG SLAVE_VERSION=3.29
ARG TINI_VERSION=0.18.0
ARG DNF_CMD="dnf -y --setopt=deltarpm=0 --setopt=install_weak_deps=false --setopt=tsflags=nodocs"
ARG CA_URLS=""
# Provide a default HOME location and set some default git user details
# Set LANG to UTF-8 to support it in stdout/stderr
ENV HOME=${HOME_DIR} \
GIT_COMMITTER_NAME="The Factory 2.0 Team" \
GIT_COMMITTER_EMAIL=pnt-factory2-devel@redhat.com \
LANG=en_US.UTF-8
USER root
RUN ${DNF_CMD} install -y \
java-1.8.0-openjdk nss_wrapper gettext git \
tar gzip skopeo wget make bind-utils \
origin-clients \
# Jenkins pipeline 'sh' steps seem to require ps
procps-ng \
# Tools to interface with our test instances
koji && \
${DNF_CMD} clean all
# CA Certs
WORKDIR /etc/pki/ca-trust/source/anchors/
RUN for ca_url in ${CA_URLS}; do curl -skO ${ca_url}; done && \
update-ca-trust
# Setup the user for non-arbitrary UIDs with OpenShift
# https://docs.openshift.org/latest/creating_images/guidelines.html#openshift-origin-specific-guidelines
RUN useradd -d ${HOME_DIR} -u ${UID} -g 0 -m -s /bin/bash ${USER} && \
chmod -R g+rwx ${HOME_DIR}
# Make /etc/passwd writable for root group
# so we can add dynamic user to the system in entrypoint script
RUN chmod g+rw /etc/passwd
# Retrieve jenkins slave client
RUN curl --create-dirs -sSLo /usr/share/jenkins/slave.jar \
https://repo.jenkins-ci.org/public/org/jenkins-ci/main/remoting/${SLAVE_VERSION}/remoting-${SLAVE_VERSION}.jar && \
chmod 755 /usr/share/jenkins && \
chmod 644 /usr/share/jenkins/slave.jar
# Entry point script to run jenkins slave client
COPY jenkins-slave /usr/local/bin/jenkins-slave
RUN chmod 755 /usr/local/bin/jenkins-slave
# install tini, a tiny but valid init for containers
# install wait-for-it.sh, to allow containers to wait for other services to come up
RUN curl -L -o /usr/local/bin/tini "https://github.com/krallin/tini/releases/download/v${TINI_VERSION}/tini" \
&& chmod +rx /usr/local/bin/tini \
&& curl -L -o /usr/local/bin/wait-for-it "https://raw.githubusercontent.com/vishnubob/wait-for-it/master/wait-for-it.sh" \
&& chmod +rx /usr/local/bin/tini /usr/local/bin/wait-for-it \
&& ${DNF_CMD} clean all
# For OpenShift we MUST use the UID of the user and not the name.
USER ${UID}
WORKDIR ${HOME_DIR}
ENTRYPOINT ["/usr/local/bin/tini", "--", "jenkins-slave"]

View File

@@ -0,0 +1,116 @@
#!/usr/bin/env bash
# The MIT License
#
# Copyright (c) 2015, CloudBees, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Usage jenkins-slave.sh [options] -url http://jenkins [SECRET] [AGENT_NAME]
# Optional environment variables :
# * JENKINS_TUNNEL : HOST:PORT for a tunnel to route TCP traffic to jenkins host, when jenkins can't be directly accessed over network
# * JENKINS_URL : alternate jenkins URL
# * JENKINS_SECRET : agent secret, if not set as an argument
# * JENKINS_AGENT_NAME : agent name, if not set as an argument
# * JENKINS_JAR_CACHE : directory for cached jar files
#
# Credentials are also supported for authentication to jenkins. If desired,
# create the directory /etc/jenkins/credentials with "username" and "password"
# files within.
#
# This script was originally adopted from:
# https://github.com/jenkinsci/docker-jnlp-slave/blob/master/jenkins-slave
# Dynamically create a passwd file for non-arbitrary UIDs.
# Taken from: https://docs.openshift.org/latest/creating_images/guidelines.html#openshift-origin-specific-guidelines
# Adjusted using: https://github.com/openshift/jenkins/commit/20a511b8ccf71a8ebd80519440403e530ccb6337
export USER_ID=$(id -u)
export GROUP_ID=$(id -g)
# Skip for root user
if [ x"$USER_ID" != x"0" ]; then
export NSS_WRAPPER_PASSWD=/tmp/passwd
export NSS_WRAPPER_GROUP=/etc/group
export LD_PRELOAD=/usr/lib64/libnss_wrapper.so
cp /etc/passwd $NSS_WRAPPER_PASSWD
echo "jenkins:x:${USER_ID}:${GROUP_ID}:jenkins:${HOME}:/bin/bash" >> $NSS_WRAPPER_PASSWD
fi
if [ $# -eq 1 ]; then
# if `docker run` only has one arguments, we assume user is running alternate command like `bash` to inspect the image
exec "$@"
else
# if -tunnel is not provided try env vars
if [[ "$@" != *"-tunnel "* ]]; then
if [ ! -z "$JENKINS_TUNNEL" ]; then
TUNNEL="-tunnel $JENKINS_TUNNEL"
fi
fi
if [ -n "$JENKINS_URL" ]; then
URL="-url $JENKINS_URL"
fi
if [ -n "$JENKINS_NAME" ]; then
JENKINS_AGENT_NAME="$JENKINS_NAME"
fi
if [ -n "$JENKINS_JAR_CACHE" ]; then
JAR_CACHE="-jar-cache $JENKINS_JAR_CACHE"
fi
if [ -d "/etc/jenkins/credentials" ]; then
USERNAME="$(cat /etc/jenkins/credentials/username)"
PASSWORD="$(cat /etc/jenkins/credentials/password)"
CREDENTIALS="-credentials ${USERNAME}:${PASSWORD}"
fi
if [ -z "$JNLP_PROTOCOL_OPTS" ]; then
echo "Warning: JnlpProtocol3 is disabled by default, use JNLP_PROTOCOL_OPTS to alter the behavior"
JNLP_PROTOCOL_OPTS="-Dorg.jenkinsci.remoting.engine.JnlpProtocol3.disabled=true"
fi
# If both required options are defined, do not pass the parameters
OPT_JENKINS_SECRET=""
if [ -n "$JENKINS_SECRET" ]; then
if [[ "$@" != *"${JENKINS_SECRET}"* ]]; then
OPT_JENKINS_SECRET="${JENKINS_SECRET}"
else
echo "Warning: SECRET is defined twice in command-line arguments and the environment variable"
fi
fi
OPT_JENKINS_AGENT_NAME=""
if [ -n "$JENKINS_AGENT_NAME" ]; then
if [[ "$@" != *"${JENKINS_AGENT_NAME}"* ]]; then
OPT_JENKINS_AGENT_NAME="${JENKINS_AGENT_NAME}"
else
echo "Warning: AGENT_NAME is defined twice in command-line arguments and the environment variable"
fi
fi
#TODO: Handle the case when the command-line and Environment variable contain different values.
#It is fine it blows up for now since it should lead to an error anyway.
exec java $JAVA_OPTS $JNLP_PROTOCOL_OPTS -cp /usr/share/jenkins/slave.jar hudson.remoting.jnlp.Main -headless $CREDENTIALS $JAR_CACHE $TUNNEL $URL $OPT_JENKINS_SECRET $OPT_JENKINS_AGENT_NAME "$@"
fi

View File

@@ -0,0 +1,55 @@
OC:=oc
OCFLAGS:=
JOBS_DIR:=jobs
TEMPLATES_DIR:=templates
JOB_PARAM_FILES:=$(wildcard $(JOBS_DIR)/*.env)
JOBS:=$(patsubst $(JOBS_DIR)/%.env,%,$(JOB_PARAM_FILES))
OC_CMD=$(OC) $(OCFLAGS)
help:
@echo TARGETS
@echo -e "\tinstall\t\tInstall or update pipelines to OpenShift"
@echo -e "\tuninstall\tDelete installed pipelines from OpenShift"
@echo
@echo VARIABLES
@echo -e "\tJOBS\t\tSpace seperated list of pipeline jobs to install"
@echo -e "\tJOBS_DIR\tLooking for pipeline job definitions in an alternate directory."
@echo -e "\tTEMPLATES_DIR\tLooking for pipeline job templates in an alternate directory."
@echo -e "\tOC\t\tUse this oc command"
@echo -e "\tOCFLAGS\t\tOptions to append to the oc command arguments"
install:
@$(OC_CMD) project
@for job in $(JOBS); do \
echo "[PIPELINE] Updating pipeline job \"$${job}\"..." ; \
template_file=$$(cat ./$(JOBS_DIR)/$${job}.tmpl); \
$(OC_CMD) process --local -f ./$(TEMPLATES_DIR)/$${template_file} \
--param-file ./$(JOBS_DIR)/$${job}.env | $(OC_CMD) apply -f -; \
echo "[PIPELINE] Pipeline job \"$${job}\" updated" ; \
done
uninstall:
@$(OC_CMD) project
@for job in $(JOBS); do \
template_file=$$(cat ./$(JOBS_DIR)/$${job}.tmpl); \
template_name=$${template_file%.y?ml}; \
template_name=$${template_name%-template}; \
echo "[PIPELINE] Deleting pipeline job \"$${job}\"..." ; \
$(OC_CMD) delete all -l template="$$template_name" -l app="$$job" ;\
echo "[PIPELINE] Pipeline job \"$${job}\" deleted" ; \
done
create-jenkins-is:
$(OC_CMD) import-image jenkins:2 --confirm --scheduled=true \
--from=registry.access.redhat.com/openshift3/jenkins-2-rhel7:v3.11
install-jenkins: create-jenkins-is
$(OC_CMD) new-app --template=jenkins-persistent \
-p MEMORY_LIMIT=2Gi \
-p VOLUME_CAPACITY=10Gi \
-p NAMESPACE=$(shell $(OC_CMD) project -q) \
-e INSTALL_PLUGINS=script-security:1.46,permissive-script-security:0.3,timestamper:1.9,http_request:1.8.22,ownership:0.12.1,antisamy-markup-formatter:1.5,update-sites-manager:2.0.0 \
-e JENKINS_JAVA_OVERRIDES="-Dpermissive-script-security.enabled=no_security"
update-pagure-api-key:
[ -n "$(KEY)" ] # You must specify KEY=<key value>
$(OC_CMD) delete secret pagure-api-key --ignore-not-found=true
$(OC_CMD) create secret generic pagure-api-key --from-literal=secrettext=$(KEY)
$(OC_CMD) label secret pagure-api-key credential.sync.jenkins.openshift.io=true
.PHONY: help install uninstall create-jenkins-is install-jenkins update-api-key

View File

@@ -0,0 +1,7 @@
NAME=mbs-backend-greenwave-promote-to-prod
DECISION_CONTEXT_REGEX=c3i_promote_stage_to_prod
SUBJECT_IDENTIFIER_REGEX=^factory2/mbs-backend@sha256:
SOURCE_CONTAINER_REPO=quay.io/factory2/mbs-backend
TARGET_TAG=prod
MESSAGING_TOPIC=Consumer.rh-jenkins-ci-plugin.c3i-mbs-backend-greenwave-promote-to-prod.VirtualTopic.eng.greenwave.decision.update
IMAGE_PROMOTION_JOB=mbs-backend-promoting-to-prod

View File

@@ -0,0 +1 @@
mbs-greenwave-trigger.yaml

View File

@@ -0,0 +1,7 @@
NAME=mbs-backend-greenwave-promote-to-stage
DECISION_CONTEXT_REGEX=c3i_promote_dev_to_stage
SUBJECT_IDENTIFIER_REGEX=^factory2/mbs-backend@sha256:
SOURCE_CONTAINER_REPO=quay.io/factory2/mbs-backend
TARGET_TAG=stage
MESSAGING_TOPIC=Consumer.rh-jenkins-ci-plugin.c3i-mbs-backend-greenwave-promote-to-stage.VirtualTopic.eng.greenwave.decision.update
IMAGE_PROMOTION_JOB=mbs-backend-promoting-to-stage

View File

@@ -0,0 +1 @@
mbs-greenwave-trigger.yaml

View File

@@ -0,0 +1,5 @@
NAME=mbs-backend-promoting-to-prod
PROMOTING_DESTINATIONS=quay.io/factory2/mbs-backend
DEST_TAG=prod
TAG_INTO_IMAGESTREAM=true
DEST_IMAGESTREAM_NAME=mbs-backend

View File

@@ -0,0 +1 @@
mbs-image-promotion-template.yaml

View File

@@ -0,0 +1,5 @@
NAME=mbs-backend-promoting-to-stage
PROMOTING_DESTINATIONS=quay.io/factory2/mbs-backend
DEST_TAG=stage
TAG_INTO_IMAGESTREAM=true
DEST_IMAGESTREAM_NAME=mbs-backend

View File

@@ -0,0 +1 @@
mbs-image-promotion-template.yaml

View File

@@ -0,0 +1 @@
NAME=mbs-dev-integration-test

View File

@@ -0,0 +1 @@
mbs-integration-test-template.yaml

View File

@@ -0,0 +1,7 @@
NAME=mbs-frontend-greenwave-promote-to-prod
DECISION_CONTEXT_REGEX=c3i_promote_stage_to_prod
SUBJECT_IDENTIFIER_REGEX=^factory2/mbs-frontend@sha256:
SOURCE_CONTAINER_REPO=quay.io/factory2/mbs-frontend
TARGET_TAG=prod
MESSAGING_TOPIC=Consumer.rh-jenkins-ci-plugin.c3i-mbs-frontend-greenwave-promote-to-prod.VirtualTopic.eng.greenwave.decision.update
IMAGE_PROMOTION_JOB=mbs-frontend-promoting-to-prod

View File

@@ -0,0 +1 @@
mbs-greenwave-trigger.yaml

View File

@@ -0,0 +1,7 @@
NAME=mbs-frontend-greenwave-promote-to-stage
DECISION_CONTEXT_REGEX=c3i_promote_dev_to_stage
SUBJECT_IDENTIFIER_REGEX=^factory2/mbs-frontend@sha256:
SOURCE_CONTAINER_REPO=quay.io/factory2/mbs-frontend
TARGET_TAG=stage
MESSAGING_TOPIC=Consumer.rh-jenkins-ci-plugin.c3i-mbs-frontend-greenwave-promote-to-stage.VirtualTopic.eng.greenwave.decision.update
IMAGE_PROMOTION_JOB=mbs-frontend-promoting-to-stage

View File

@@ -0,0 +1 @@
mbs-greenwave-trigger.yaml

View File

@@ -0,0 +1,5 @@
NAME=mbs-frontend-promoting-to-prod
PROMOTING_DESTINATIONS=quay.io/factory2/mbs-frontend
DEST_TAG=prod
TAG_INTO_IMAGESTREAM=true
DEST_IMAGESTREAM_NAME=mbs-frontend

View File

@@ -0,0 +1 @@
mbs-image-promotion-template.yaml

View File

@@ -0,0 +1,5 @@
NAME=mbs-frontend-promoting-to-stage
PROMOTING_DESTINATIONS=quay.io/factory2/mbs-frontend
DEST_TAG=stage
TAG_INTO_IMAGESTREAM=true
DEST_IMAGESTREAM_NAME=mbs-frontend

View File

@@ -0,0 +1 @@
mbs-image-promotion-template.yaml

View File

@@ -0,0 +1 @@
NAME=mbs-polling-for-master

View File

@@ -0,0 +1 @@
mbs-polling-pagure.yaml

View File

@@ -0,0 +1,2 @@
NAME=mbs-polling-for-prs
PAGURE_POLLING_FOR_PR=true

View File

@@ -0,0 +1 @@
mbs-polling-pagure.yaml

View File

@@ -0,0 +1,4 @@
NAME=mbs-postmerge
EXTRA_REPOS=https://copr.fedorainfracloud.org/coprs/mikeb/mbs-messaging-umb/repo/fedora-29/mikeb-mbs-messaging-umb-fedora-29.repo
EXTRA_RPMS=mbs-messaging-umb
MAIL_ADDRESS=pnt-factory2-alerts@redhat.com

View File

@@ -0,0 +1 @@
mbs-build-template.yaml

View File

@@ -0,0 +1,4 @@
NAME=mbs-premerge
EXTRA_REPOS=https://copr.fedorainfracloud.org/coprs/mikeb/mbs-messaging-umb/repo/fedora-29/mikeb-mbs-messaging-umb-fedora-29.repo
EXTRA_RPMS=mbs-messaging-umb
MAIL_ADDRESS=pnt-factory2-alerts@redhat.com

View File

@@ -0,0 +1 @@
mbs-build-template.yaml

View File

@@ -0,0 +1,4 @@
NAME=mbs-prod-integration-test
KOJI_IMAGE=quay.io/factory2/koji:prod
UMB_IMAGE=docker-registry.engineering.redhat.com/factory2/umb:prod
ENVIRONMENT=prod

View File

@@ -0,0 +1 @@
mbs-integration-test-template.yaml

View File

@@ -0,0 +1,4 @@
NAME=mbs-stage-integration-test
KOJI_IMAGE=quay.io/factory2/koji:stage
UMB_IMAGE=docker-registry.engineering.redhat.com/factory2/umb:stage
ENVIRONMENT=stage

View File

@@ -0,0 +1 @@
mbs-integration-test-template.yaml

View File

@@ -0,0 +1,4 @@
NAME=mbs-trigger-on-latest-tag
MESSAGING_TOPIC=Consumer.rh-jenkins-ci-plugin.c3i-mbs-trigger-on-latest-tag.VirtualTopic.eng.repotracker.container.tag.>
TEST_JOB_NAME=mbs-stage-integration-test
TRACKED_TAG=latest

View File

@@ -0,0 +1 @@
mbs-repotracker-trigger.yaml

View File

@@ -0,0 +1,4 @@
NAME=mbs-trigger-on-stage-tag
MESSAGING_TOPIC=Consumer.rh-jenkins-ci-plugin.c3i-mbs-trigger-on-stage-tag.VirtualTopic.eng.repotracker.container.tag.>
TEST_JOB_NAME=mbs-prod-integration-test
TRACKED_TAG=stage

View File

@@ -0,0 +1 @@
mbs-repotracker-trigger.yaml

View File

@@ -0,0 +1,272 @@
# Template to produce a new MBS build job in OpenShift.
#
# MBS build job is a part of the MBS C3I pipeline, covering the following steps:
#
# - Run Flake8 and Bandit checks
# - Run unit tests
# - Build SRPM
# - Build RPM
# - Invoke Rpmlint
# - Build container
# - Run integration tests against the latest Koji images
# - Push container
#
# Required Jenkins Plugins:
# - Openshift Sync plugin
# - Openshift Client plugin
# - Kubernetes plugin
# - SSH Agent plugin
# - Timestamper plugin
# - HTTP Request plugin
# - Red Hat CI Plugin
#
---
apiVersion: v1
kind: Template
metadata:
name: mbs-build-pipeline
parameters:
- name: NAME
displayName: Short unique identifier for the templated instances
description: This field is used to deploy multiple pipelines to one OpenShift project from this template.
required: true
value: mbs-build
- name: MBS_GIT_REPO
displayName: MBS Git repo URL
description: Default MBS Git repo URL in which to run dev tests against
required: true
value: https://pagure.io/fm-orchestrator.git
- name: MBS_GIT_REF
displayName: MBS Git repo ref
description: Default MBS Git repo ref in which to run dev tests against
required: true
value: master
- name: MBS_MAIN_BRANCH
displayName: Name of the main branch.
description: If MBS_MAIN_BRANCH equals MBS_GIT_REF, this is a post-merge build, otherwise it's a pre-merge build.
value: master
required: true
- name: JENKINS_AGENT_CLOUD_NAME
displayName: Name of OpenShift cloud in Jenkins master configuration
required: true
value: openshift
- name: JENKINS_AGENT_IMAGE
displayName: Container image for Jenkins slave pods
required: true
value: quay.io/factory2/mbs-jenkins-slave:latest
- name: JENKINS_AGENT_CA_URLS
displayName: Space-separated list of URLs to CA certificates to install in the agent image
required: false
value: ""
- name: MBS_BACKEND_DEV_IMAGE_DESTINATIONS
displayName: Comma seperated list of container repositories (without tag) to which the built MBS backend dev image will be pushed
description: OpenShift registries must be prefixed with 'atomic:'
required: false
value: "quay.io/factory2/mbs-backend"
- name: MBS_FRONTEND_DEV_IMAGE_DESTINATIONS
displayName: Comma seperated list of container repositories (without tag) to which the built MBS frontend dev image will be pushed
description: OpenShift registries must be prefixed with 'atomic:'
required: false
value: "quay.io/factory2/mbs-frontend"
- name: CONTAINER_REGISTRY_CREDENTIALS
displayName: Secret name of container registries used for pulling and pushing images
value: factory2-pipeline-registry-credentials
required: false
- name: MBS_DEV_IMAGE_TAG
displayName: Tag name of the resulting container image for development environment
value: "latest"
required: true
- name: MBS_BACKEND_IMAGESTREAM_NAME
displayName: Name of ImageStream for MBS backend images
required: true
value: mbs-backend
- name: MBS_BACKEND_IMAGESTREAM_NAMESPACE
displayName: Namespace of ImageStream for MBS backend images
required: false
- name: MBS_FRONTEND_IMAGESTREAM_NAME
displayName: Name of ImageStream for MBS frontend images
required: true
value: mbs-frontend
- name: MBS_FRONTEND_IMAGESTREAM_NAMESPACE
displayName: Namespace of ImageStream for MBS frontend images
required: false
- name: MBS_INTEGRATION_TEST_BUILD_CONFIG_NAME
displayName: Name of BuildConfig for running integration tests
required: true
value: mbs-dev-integration-test
- name: MBS_INTEGRATION_TEST_BUILD_CONFIG_NAMESPACE
displayName: Namespace of BuildConfig for running integration tests
required: false
- name: FORCE_PUBLISH_IMAGE
displayName: Whether to push the resulting image regardless of the Git branch
value: "false"
required: true
- name: TAG_INTO_IMAGESTREAM
displayName: Whether to tag the pushed image as dev
value: "true"
required: true
- name: PAGURE_URL
displayName: Pagure URL
value: "https://pagure.io"
- name: PAGURE_REPO_NAME
value: fm-orchestrator
- name: PAGURE_REPO_IS_FORK
value: "false"
- name: PAGURE_API_KEY_SECRET_NAME
displayName: Name of Pagure API key secret for updating Pagure pull-request statuses
value: "pagure-api-key"
- name: MAIL_ADDRESS
displayName: If set, build failure messages to this mail address.
- name: MBS_SPEC_FILE
displayName: Repo to the rpm specfile for the module-build-service
required: true
value: "https://src.fedoraproject.org/rpms/module-build-service/raw/master/f/module-build-service.spec"
- name: EXTRA_REPOS
displayName: Space-separated list of URLs to .repo files to install in the images
required: false
value: ""
- name: EXTRA_RPMS
displayName: Space-separated list of rpm names to install in the images
required: false
value: ""
- name: TESTCASES
displayName: >-
Space-separated list of testcases to run as part of the pipeline. An empty string (the default)
causes all available testcases to run. The value "skip" causes no testcases to be run.
required: false
value: ""
- name: CLEANUP
displayName: Cleanup objects after the pipeline is complete
required: true
value: "true"
labels:
template: mbs-build
objects:
- kind: "BuildConfig"
apiVersion: "v1"
metadata:
name: "${NAME}-jenkins-slave"
labels:
app: "${NAME}"
spec:
runPolicy: "Serial"
completionDeadlineSeconds: 1800
strategy:
dockerStrategy:
forcePull: true
dockerfilePath: Dockerfile
buildArgs:
- name: CA_URLS
value: "${JENKINS_AGENT_CA_URLS}"
resources:
requests:
memory: 512Mi
cpu: 300m
limits:
memory: 768Mi
cpu: 500m
source:
contextDir: openshift/integration/koji/containers/jenkins-slave
git:
uri: "${MBS_GIT_REPO}"
ref: "${MBS_GIT_REF}"
output:
to:
kind: DockerImage
name: "${JENKINS_AGENT_IMAGE}"
pushSecret:
name: "${CONTAINER_REGISTRY_CREDENTIALS}"
- kind: ServiceAccount
apiVersion: v1
metadata:
name: "${NAME}-jenkins-slave"
labels:
app: "${NAME}"
- kind: RoleBinding
apiVersion: v1
metadata:
name: "${NAME}-jenkins-slave_edit"
labels:
app: "${NAME}"
subjects:
- kind: ServiceAccount
name: "${NAME}-jenkins-slave"
roleRef:
name: edit
- kind: "BuildConfig"
apiVersion: "v1"
metadata:
name: "${NAME}"
labels:
app: "${NAME}"
spec:
runPolicy: "Serial"
completionDeadlineSeconds: 1800
source:
git:
uri: "${MBS_GIT_REPO}"
ref: "${MBS_GIT_REF}"
strategy:
type: JenkinsPipeline
jenkinsPipelineStrategy:
env:
- name: MBS_GIT_REPO
value: "${MBS_GIT_REPO}"
- name: MBS_GIT_REF
value: "${MBS_GIT_REF}"
- name: JENKINS_AGENT_CLOUD_NAME
value: "${JENKINS_AGENT_CLOUD_NAME}"
- name: JENKINS_AGENT_IMAGE
value: "${JENKINS_AGENT_IMAGE}"
- name: JENKINS_AGENT_SERVICE_ACCOUNT
value: "${NAME}-jenkins-slave"
- name: MBS_BACKEND_DEV_IMAGE_DESTINATIONS
value: "${MBS_BACKEND_DEV_IMAGE_DESTINATIONS}"
- name: MBS_FRONTEND_DEV_IMAGE_DESTINATIONS
value: "${MBS_FRONTEND_DEV_IMAGE_DESTINATIONS}"
- name: CONTAINER_REGISTRY_CREDENTIALS
value: "${CONTAINER_REGISTRY_CREDENTIALS}"
- name: FORCE_PUBLISH_IMAGE
value: "${FORCE_PUBLISH_IMAGE}"
- name: TAG_INTO_IMAGESTREAM
value: "${TAG_INTO_IMAGESTREAM}"
- name: MBS_DEV_IMAGE_TAG
value: "${MBS_DEV_IMAGE_TAG}"
- name: MBS_BACKEND_IMAGESTREAM_NAME
value: "${MBS_BACKEND_IMAGESTREAM_NAME}"
- name: MBS_BACKEND_IMAGESTREAM_NAMESPACE
value: "${MBS_BACKEND_IMAGESTREAM_NAMESPACE}"
- name: MBS_FRONTEND_IMAGESTREAM_NAME
value: "${MBS_FRONTEND_IMAGESTREAM_NAME}"
- name: MBS_FRONTEND_IMAGESTREAM_NAMESPACE
value: "${MBS_FRONTEND_IMAGESTREAM_NAMESPACE}"
- name: MBS_MAIN_BRANCH
value: "${MBS_MAIN_BRANCH}"
- name: MBS_INTEGRATION_TEST_BUILD_CONFIG_NAME
value: "${MBS_INTEGRATION_TEST_BUILD_CONFIG_NAME}"
- name: MBS_INTEGRATION_TEST_BUILD_CONFIG_NAMESPACE
value: "${MBS_INTEGRATION_TEST_BUILD_CONFIG_NAMESPACE}"
- name: PAGURE_REPO_NAME
value: "${PAGURE_REPO_NAME}"
- name: PAGURE_REPO_IS_FORK
value: "${PAGURE_REPO_IS_FORK}"
- name: PAGURE_URL
value: "${PAGURE_URL}"
- name: PAGURE_API_KEY_SECRET_NAME
value: "${PAGURE_API_KEY_SECRET_NAME}"
- name: MAIL_ADDRESS
value: "${MAIL_ADDRESS}"
- name: MBS_SPEC_FILE
value: "${MBS_SPEC_FILE}"
- name: EXTRA_REPOS
value: "${EXTRA_REPOS}"
- name: EXTRA_RPMS
value: "${EXTRA_RPMS}"
- name: TESTCASES
value: "${TESTCASES}"
- name: CLEANUP
value: "${CLEANUP}"
jenkinsfilePath: openshift/integration/koji/pipelines/templates/mbs-build.Jenkinsfile

View File

@@ -0,0 +1,526 @@
library identifier: 'c3i@master', changelog: false,
retriever: modernSCM([$class: 'GitSCMSource', remote: 'https://pagure.io/c3i-library.git'])
import static org.apache.commons.lang.StringEscapeUtils.escapeHtml;
pipeline {
agent {
kubernetes {
cloud params.JENKINS_AGENT_CLOUD_NAME
label "jenkins-slave-${UUID.randomUUID().toString()}"
serviceAccount params.JENKINS_AGENT_SERVICE_ACCOUNT
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
labels:
app: "jenkins-${env.JOB_BASE_NAME.take(50)}"
factory2-pipeline-kind: "mbs-build-pipeline"
factory2-pipeline-build-number: "${env.BUILD_NUMBER}"
spec:
containers:
- name: jnlp
image: "${params.JENKINS_AGENT_IMAGE}"
imagePullPolicy: Always
tty: true
env:
- name: REGISTRY_CREDENTIALS
valueFrom:
secretKeyRef:
name: "${params.CONTAINER_REGISTRY_CREDENTIALS}"
key: ".dockerconfigjson"
resources:
requests:
memory: 512Mi
cpu: 300m
limits:
memory: 768Mi
cpu: 500m
"""
}
}
options {
timestamps()
timeout(time: 120, unit: 'MINUTES')
buildDiscarder(logRotator(numToKeepStr: '10'))
}
environment {
PIPELINE_NAMESPACE = readFile("/run/secrets/kubernetes.io/serviceaccount/namespace").trim()
PAGURE_API = "${params.PAGURE_URL}/api/0"
PAGURE_REPO_IS_FORK = "${params.PAGURE_REPO_IS_FORK}"
PAGURE_REPO_HOME = "${env.PAGURE_URL}${env.PAGURE_REPO_IS_FORK == 'true' ? '/fork' : ''}/${params.PAGURE_REPO_NAME}"
}
stages {
stage('Prepare') {
steps {
script {
// check out specified branch/commit
/*def scmVars =*/ checkout([$class: 'GitSCM',
branches: [[name: params.MBS_GIT_REF]],
userRemoteConfigs: [[url: params.MBS_GIT_REPO, refspec: '+refs/heads/*:refs/remotes/origin/* +refs/pull/*/head:refs/remotes/origin/pull/*/head']],
])
// get current commit ID
// FIXME: Due to a bug discribed in https://issues.jenkins-ci.org/browse/JENKINS-45489,
// the return value of checkout() is unreliable.
// Not working: env.MBS_GIT_COMMIT = scmVars.GIT_COMMIT
env.MBS_GIT_COMMIT = sh(returnStdout: true, script: 'git rev-parse HEAD').trim()
echo "Build ${params.MBS_GIT_REF}, commit=${env.MBS_GIT_COMMIT}"
// Is the current branch a pull-request? If no, env.PR_NO will be empty.
env.PR_NO = getPrNo(params.MBS_GIT_REF)
// Generate a version-release number for the target Git commit
env.MBS_VERSION = sh(script: """grep -m 1 -P -o "(?<=version=')[^']+" setup.py""", returnStdout: true).trim()
env.BUILD_SUFFIX = ".jenkins${currentBuild.id}.git${env.MBS_GIT_COMMIT.take(7)}"
env.TEMP_TAG = "${env.MBS_VERSION}${env.BUILD_SUFFIX}"
def resp = httpRequest params.MBS_SPEC_FILE
env.SPEC_FILE_NAME = params.MBS_SPEC_FILE.split("/").last()
writeFile file: env.SPEC_FILE_NAME, text: resp.content
sh """
sed -i \
-e 's/Version:.*/Version: ${env.MBS_VERSION}/' \
-e 's/%{?dist}/${env.BUILD_SUFFIX}%{?dist}/' \
${env.SPEC_FILE_NAME}
"""
sh 'mkdir repos'
params.EXTRA_REPOS.split().each {
resp = httpRequest it
writeFile file: "repos/${it.split("/").last()}", text: resp.content
}
sh """
sed -i \
-e '/enum34/d' \
-e '/funcsigs/d' \
-e '/futures/d' \
-e '/koji/d' \
requirements.txt
"""
sh """
sed -i \
-e 's/py.test/py.test-3/g' \
-e '/basepython/d' \
-e '/sitepackages/a setenv = PYTHONPATH={toxinidir}' \
tox.ini
"""
}
}
}
stage('Update Build Info') {
when {
expression {
return params.PAGURE_URL && params.PAGURE_REPO_NAME
}
}
steps {
script {
// Set friendly display name and description
if (env.PR_NO) { // is pull-request
env.PR_URL = "${env.PAGURE_REPO_HOME}/pull-request/${env.PR_NO}"
echo "Building PR #${env.PR_NO}: ${env.PR_URL}"
// NOTE: Old versions of OpenShift Client Jenkins plugin are buggy to handle arguments
// with special bash characters (like whitespaces, #, etc).
// https://bugzilla.redhat.com/show_bug.cgi?id=1625518
currentBuild.displayName = "PR#${env.PR_NO}"
// To enable HTML syntax in build description, go to `Jenkins/Global Security/Markup Formatter` and select 'Safe HTML'.
def pagureLink = """<a href="${env.PR_URL}">${currentBuild.displayName}</a>"""
try {
def prInfo = withPagure {
it.getPR(env.PR_NO)
}
pagureLink = """<a href="${env.PR_URL}">PR#${env.PR_NO}: ${escapeHtml(prInfo.title)}</a>"""
// set PR status to Pending
if (params.PAGURE_API_KEY_SECRET_NAME)
setBuildStatusOnPagurePR(null, 'Building...')
} catch (Exception e) {
echo "Error using pagure API: ${e}"
}
currentBuild.description = pagureLink
} else { // is a branch
currentBuild.displayName = "${env.MBS_GIT_REF}: ${env.MBS_GIT_COMMIT.take(7)}"
currentBuild.description = """<a href="${env.PAGURE_REPO_HOME}/c/${env.MBS_GIT_COMMIT}">${currentBuild.displayName}</a>"""
if (params.PAGURE_API_KEY_SECRET_NAME) {
try {
flagCommit('pending', null, 'Building...')
echo "Updated commit ${env.MBS_GIT_COMMIT} status to PENDING."
} catch (e) {
echo "Error updating commit ${env.MBS_GIT_COMMIT} status to PENDING: ${e}"
}
}
}
}
}
}
stage('Build backend image') {
environment {
BACKEND_BUILDCONFIG_ID = "mbs-backend-build-${currentBuild.id}-${UUID.randomUUID().toString().take(7)}"
}
steps {
script {
openshift.withCluster() {
// OpenShift BuildConfig doesn't support specifying a tag name at build time.
// We have to create a new BuildConfig for each image build.
echo 'Creating a BuildConfig for mbs-backend build...'
def created = new Date().format("yyyy-MM-dd'T'HH:mm:ss'Z'", TimeZone.getTimeZone('UTC'))
def template = readYaml file: 'openshift/backend/mbs-backend-build-template.yaml'
def processed = openshift.process(template,
'-p', "NAME=${env.BACKEND_BUILDCONFIG_ID}",
'-p', "MBS_GIT_REPO=${params.MBS_GIT_REPO}",
// A pull-request branch, like pull/123/head, cannot be built with commit ID
// because refspec cannot be customized in an OpenShift build.
'-p', "MBS_GIT_REF=${env.PR_NO ? params.MBS_GIT_REF : env.MBS_GIT_COMMIT}",
'-p', "MBS_BACKEND_IMAGESTREAM_NAME=${params.MBS_BACKEND_IMAGESTREAM_NAME}",
'-p', "MBS_BACKEND_IMAGESTREAM_NAMESPACE=${env.PIPELINE_NAMESPACE}",
'-p', "MBS_IMAGE_TAG=${env.TEMP_TAG}",
'-p', "EXTRA_RPMS=${params.EXTRA_RPMS}",
'-p', "CREATED=${created}"
)
def buildname = c3i.buildAndWait(processed, '--from-dir=.')
def build = openshift.selector(buildname)
def ocpBuild = build.object()
env.BACKEND_IMAGE_DIGEST = ocpBuild.status.output.to.imageDigest
def ref = ocpBuild.status.outputDockerImageReference
def repo = ref.tokenize(':')[0..-2].join(':')
env.BACKEND_IMAGE_REPO = repo
env.BACKEND_IMAGE_REF = repo + '@' + env.BACKEND_IMAGE_DIGEST
env.BACKEND_IMAGE_TAG = env.TEMP_TAG
echo "Built image ${env.BACKEND_IMAGE_REF}, digest: ${env.BACKEND_IMAGE_DIGEST}, tag: ${env.BACKEND_IMAGE_TAG}"
}
}
}
post {
failure {
echo "Failed to build mbs-backend image ${env.TEMP_TAG}."
}
cleanup {
script {
openshift.withCluster() {
echo 'Tearing down...'
openshift.selector('bc', [
'app': env.BACKEND_BUILDCONFIG_ID,
'template': 'mbs-backend-build-template',
]).delete()
}
}
}
}
}
stage('Build frontend image') {
environment {
FRONTEND_BUILDCONFIG_ID = "mbs-frontend-build-${currentBuild.id}-${UUID.randomUUID().toString().take(7)}"
}
steps {
script {
openshift.withCluster() {
// OpenShift BuildConfig doesn't support specifying a tag name at build time.
// We have to create a new BuildConfig for each image build.
echo 'Creating a BuildConfig for mbs-frontend build...'
def created = new Date().format("yyyy-MM-dd'T'HH:mm:ss'Z'", TimeZone.getTimeZone('UTC'))
def template = readYaml file: 'openshift/frontend/mbs-frontend-build-template.yaml'
def processed = openshift.process(template,
'-p', "NAME=${env.FRONTEND_BUILDCONFIG_ID}",
'-p', "MBS_GIT_REPO=${params.MBS_GIT_REPO}",
// A pull-request branch, like pull/123/head, cannot be built with commit ID
// because refspec cannot be customized in an OpenShift build.
'-p', "MBS_GIT_REF=${env.PR_NO ? params.MBS_GIT_REF : env.MBS_GIT_COMMIT}",
'-p', "MBS_FRONTEND_IMAGESTREAM_NAME=${params.MBS_FRONTEND_IMAGESTREAM_NAME}",
'-p', "MBS_FRONTEND_IMAGESTREAM_NAMESPACE=${env.PIPELINE_NAMESPACE}",
'-p', "MBS_IMAGE_TAG=${env.TEMP_TAG}",
'-p', "MBS_BACKEND_IMAGESTREAM_NAME=${params.MBS_BACKEND_IMAGESTREAM_NAME}",
'-p', "MBS_BACKEND_IMAGESTREAM_NAMESPACE=${env.PIPELINE_NAMESPACE}",
'-p', "CREATED=${created}"
)
def buildname = c3i.buildAndWait(processed, '--from-dir=.')
def build = openshift.selector(buildname)
def ocpBuild = build.object()
env.FRONTEND_IMAGE_DIGEST = ocpBuild.status.output.to.imageDigest
def ref = ocpBuild.status.outputDockerImageReference
def repo = ref.tokenize(':')[0..-2].join(':')
env.FRONTEND_IMAGE_REPO = repo
env.FRONTEND_IMAGE_REF = repo + '@' + env.FRONTEND_IMAGE_DIGEST
env.FRONTEND_IMAGE_TAG = env.TEMP_TAG
echo "Built image ${env.FRONTEND_IMAGE_REF}, digest: ${env.FRONTEND_IMAGE_DIGEST}, tag: ${env.FRONTEND_IMAGE_TAG}"
}
}
}
post {
failure {
echo "Failed to build mbs-frontend image ${env.TEMP_TAG}."
}
cleanup {
script {
openshift.withCluster() {
echo 'Tearing down...'
openshift.selector('bc', [
'app': env.FRONTEND_BUILDCONFIG_ID,
'template': 'mbs-frontend-build-template',
]).delete()
}
}
}
}
}
stage('Run integration tests') {
steps {
script {
openshift.withCluster() {
openshift.withProject(params.MBS_INTEGRATION_TEST_BUILD_CONFIG_NAMESPACE) {
def build = c3i.buildAndWait("bc/${params.MBS_INTEGRATION_TEST_BUILD_CONFIG_NAME}",
'-e', "MBS_BACKEND_IMAGE=${env.BACKEND_IMAGE_REF}",
'-e', "MBS_FRONTEND_IMAGE=${env.FRONTEND_IMAGE_REF}",
'-e', "TEST_IMAGES='${env.BACKEND_IMAGE_REF} ${env.FRONTEND_IMAGE_REF}'",
'-e', "IMAGE_IS_SCRATCH=${params.MBS_GIT_REF != params.MBS_MAIN_BRANCH}",
'-e', "TESTCASES='${params.TESTCASES}'",
'-e', "CLEANUP=${params.CLEANUP}"
)
echo 'Integration tests PASSED'
}
}
}
}
post {
failure {
echo 'Integration tests FAILED'
}
}
}
stage('Push images') {
when {
expression {
return params.FORCE_PUBLISH_IMAGE == 'true' ||
params.MBS_GIT_REF == params.MBS_MAIN_BRANCH
}
}
steps {
script {
if (env.REGISTRY_CREDENTIALS) {
dir ("${env.HOME}/.docker") {
writeFile file: 'config.json', text: env.REGISTRY_CREDENTIALS
}
}
def registryToken = readFile(file: '/run/secrets/kubernetes.io/serviceaccount/token')
def copyDown = { name, src ->
src = "docker://${src}"
echo "Pulling ${name} from ${src}..."
withEnv(["SOURCE_IMAGE_REF=${src}", "TOKEN=${registryToken}"]) {
sh """
set -e +x # hide the token from Jenkins console
mkdir -p _images/${name}
skopeo copy \
--src-cert-dir=/run/secrets/kubernetes.io/serviceaccount/ \
--src-creds=serviceaccount:"$TOKEN" \
"$SOURCE_IMAGE_REF" dir:_images/${name}
"""
}
}
def pullJobs = [
'Pulling mbs-backend' : { copyDown('mbs-backend', env.BACKEND_IMAGE_REF) },
'Pulling mbs-frontend' : { copyDown('mbs-frontend', env.FRONTEND_IMAGE_REF) }
]
parallel pullJobs
def copyUp = { name, dest ->
dest = "${dest}:${params.MBS_DEV_IMAGE_TAG ?: 'latest'}"
if (!dest.startsWith('atomic:') && !dest.startsWith('docker://')) {
dest = "docker://${dest}"
}
echo "Pushing ${name} to ${dest}..."
withEnv(["DEST_IMAGE_REF=${dest}"]) {
retry(5) {
sh """
skopeo copy dir:_images/${name} "$DEST_IMAGE_REF"
"""
}
}
}
def backendDests = params.MBS_BACKEND_DEV_IMAGE_DESTINATIONS ?
params.MBS_BACKEND_DEV_IMAGE_DESTINATIONS.split(',') : []
def backendPushJobs = backendDests.collectEntries {
[ "Pushing mbs-backend to ${it}" : { copyUp('mbs-backend', it) } ]
}
parallel backendPushJobs
// Run all the frontend push jobs after the backend push jobs, so we can trigger
// on the frontend repo being updated and be confident it is in sync with the
// backend repo.
def frontendDests = params.MBS_FRONTEND_DEV_IMAGE_DESTINATIONS ?
params.MBS_FRONTEND_DEV_IMAGE_DESTINATIONS.split(',') : []
def frontendPushJobs = frontendDests.collectEntries {
[ "Pushing mbs-frontend to ${it}" : { copyUp('mbs-frontend', it) } ]
}
parallel frontendPushJobs
}
}
post {
failure {
echo 'Pushing images FAILED'
}
}
}
stage('Tag into ImageStreams') {
when {
expression {
return "${params.MBS_DEV_IMAGE_TAG}" && params.TAG_INTO_IMAGESTREAM == "true" &&
(params.FORCE_PUBLISH_IMAGE == "true" || params.MBS_GIT_REF == params.MBS_MAIN_BRANCH)
}
}
steps {
script {
openshift.withCluster() {
openshift.withProject(params.MBS_BACKEND_IMAGESTREAM_NAMESPACE) {
def sourceRef = "${params.MBS_BACKEND_IMAGESTREAM_NAME}@${env.BACKEND_IMAGE_DIGEST}"
def destRef = "${params.MBS_BACKEND_IMAGESTREAM_NAME}:${params.MBS_DEV_IMAGE_TAG}"
echo "Tagging ${sourceRef} as ${destRef}..."
openshift.tag(sourceRef, destRef)
}
openshift.withProject(params.MBS_FRONTEND_IMAGESTREAM_NAMESPACE) {
def sourceRef = "${params.MBS_FRONTEND_IMAGESTREAM_NAME}@${env.FRONTEND_IMAGE_DIGEST}"
def destRef = "${params.MBS_FRONTEND_IMAGESTREAM_NAME}:${params.MBS_DEV_IMAGE_TAG}"
echo "Tagging ${sourceRef} as ${destRef}..."
openshift.tag(sourceRef, destRef)
}
}
}
}
post {
failure {
echo "Tagging images as :${params.MBS_DEV_IMAGE_TAG} FAILED"
}
}
}
}
post {
cleanup {
script {
if (params.CLEANUP == 'true') {
openshift.withCluster() {
if (env.BACKEND_IMAGE_TAG) {
echo "Removing tag ${env.BACKEND_IMAGE_TAG} from the ${params.MBS_BACKEND_IMAGESTREAM_NAME} ImageStream..."
openshift.withProject(params.MBS_BACKEND_IMAGESTREAM_NAMESPACE) {
openshift.tag("${params.MBS_BACKEND_IMAGESTREAM_NAME}:${env.BACKEND_IMAGE_TAG}", "-d")
}
}
if (env.FRONTEND_IMAGE_TAG) {
echo "Removing tag ${env.FRONTEND_IMAGE_TAG} from the ${params.MBS_FRONTEND_IMAGESTREAM_NAME} ImageStream..."
openshift.withProject(params.MBS_FRONTEND_IMAGESTREAM_NAMESPACE) {
openshift.tag("${params.MBS_FRONTEND_IMAGESTREAM_NAME}:${env.FRONTEND_IMAGE_TAG}", "-d")
}
}
}
}
}
}
success {
script {
// on pre-merge workflow success
if (params.PAGURE_API_KEY_SECRET_NAME && env.PR_NO) {
try {
setBuildStatusOnPagurePR(100, 'Build passed.')
echo "Updated PR #${env.PR_NO} status to PASS."
} catch (e) {
echo "Error updating PR #${env.PR_NO} status to PASS: ${e}"
}
}
// on post-merge workflow success
if (params.PAGURE_API_KEY_SECRET_NAME && !env.PR_NO) {
try {
flagCommit('success', 100, 'Build passed.')
echo "Updated commit ${env.MBS_GIT_COMMIT} status to PASS."
} catch (e) {
echo "Error updating commit ${env.MBS_GIT_COMMIT} status to PASS: ${e}"
}
}
}
}
failure {
script {
// on pre-merge workflow failure
if (params.PAGURE_API_KEY_SECRET_NAME && env.PR_NO) {
// updating Pagure PR flag
try {
setBuildStatusOnPagurePR(0, 'Build failed.')
echo "Updated PR #${env.PR_NO} status to FAILURE."
} catch (e) {
echo "Error updating PR #${env.PR_NO} status to FAILURE: ${e}"
}
// making a comment
try {
commentOnPR("""
Build ${env.MBS_GIT_COMMIT} [FAILED](${env.BUILD_URL})!
Rebase or make new commits to rebuild.
""".stripIndent())
echo "Comment made."
} catch (e) {
echo "Error making a comment on PR #${env.PR_NO}: ${e}"
}
}
// on post-merge workflow failure
if (!env.PR_NO) {
// updating Pagure commit flag
if (params.PAGURE_API_KEY_SECRET_NAME) {
try {
flagCommit('failure', 0, 'Build failed.')
echo "Updated commit ${env.MBS_GIT_COMMIT} status to FAILURE."
} catch (e) {
echo "Error updating commit ${env.MBS_GIT_COMMIT} status to FAILURE: ${e}"
}
}
// sending email
if (params.MAIL_ADDRESS){
try {
sendBuildStatusEmail('failed')
} catch (e) {
echo "Error sending email: ${e}"
}
}
}
}
}
}
}
@NonCPS
def getPrNo(branch) {
def prMatch = branch =~ /^(?:.+\/)?pull\/(\d+)\/head$/
return prMatch ? prMatch[0][1] : ''
}
def withPagure(args=[:], cl) {
args.apiUrl = env.PAGURE_API
args.repo = env.PAGURE_REPO_NAME
args.isFork = env.PAGURE_REPO_IS_FORK == 'true'
def pagureClient = pagure.client(args)
return cl(pagureClient)
}
def withPagureCreds(args=[:], cl) {
def pagureClient = null
withCredentials([string(credentialsId: "${env.PIPELINE_NAMESPACE}-${env.PAGURE_API_KEY_SECRET_NAME}", variable: 'TOKEN')]) {
args.token = env.TOKEN
pagureClient = withPagure(args, cl)
}
return pagureClient
}
def setBuildStatusOnPagurePR(percent, String comment) {
withPagureCreds {
it.updatePRStatus(username: 'c3i-jenkins', uid: 'ci-pre-merge',
url: env.BUILD_URL, percent: percent, comment: comment, pr: env.PR_NO)
}
}
def flagCommit(status, percent, comment) {
withPagureCreds {
it.flagCommit(username: 'c3i-jenkins', uid: 'ci-post-merge', status: status,
url: env.BUILD_URL, percent: percent, comment: comment, commit: env.MBS_GIT_COMMIT)
}
}
def commentOnPR(String comment) {
withPagureCreds {
it.commentOnPR(comment: comment, pr: env.PR_NO)
}
}
def sendBuildStatusEmail(String status) {
def recipient = params.MAIL_ADDRESS
def subject = "Jenkins job ${env.JOB_NAME} #${env.BUILD_NUMBER} ${status}."
def body = "Build URL: ${env.BUILD_URL}"
if (env.PR_NO) {
subject = "Jenkins job ${env.JOB_NAME}, PR #${env.PR_NO} ${status}."
body += "\nPull Request: ${env.PR_URL}"
}
emailext to: recipient, subject: subject, body: body
}

View File

@@ -0,0 +1,80 @@
// Use scripted syntax because CIBuildTrigger currently doesn't support the declarative syntax
properties([
disableConcurrentBuilds(),
pipelineTriggers([
// example: https://github.com/jenkinsci/jms-messaging-plugin/blob/9b9387c3a52f037ba0d019c2ebcf2a2796fc6397/src/test/java/com/redhat/jenkins/plugins/ci/integration/AmqMessagingPluginIntegrationTest.java
[$class: 'CIBuildTrigger',
providerData: [$class: 'ActiveMQSubscriberProviderData',
name: params.MESSAGING_PROVIDER,
overrides: [topic: params.MESSAGING_TOPIC],
checks: [
[field: '$.msg.subject_type', expectedValue: 'container-image'],
[field: '$.msg.subject_identifier', expectedValue: params.SUBJECT_IDENTIFIER_REGEX],
[field: '$.msg.decision_context', expectedValue: params.DECISION_CONTEXT_REGEX],
[field: '$.msg.policies_satisfied', expectedValue: 'true'],
],
],
],
]),
])
if (!params.CI_MESSAGE) {
echo 'This build is not started by a CI message. Only configurations were done.'
return
}
def label = "jenkins-slave-${UUID.randomUUID().toString()}"
podTemplate(
cloud: "${params.JENKINS_AGENT_CLOUD_NAME}",
label: label,
serviceAccount: "${params.JENKINS_AGENT_SERVICE_ACCOUNT}",
defaultContainer: 'jnlp',
yaml: """
apiVersion: v1
kind: Pod
metadata:
labels:
app: "jenkins-${env.JOB_BASE_NAME.take(50)}"
factory2-pipeline-kind: "mbs-greenwave-trigger"
factory2-pipeline-build-number: "${env.BUILD_NUMBER}"
spec:
containers:
- name: jnlp
image: ${params.JENKINS_AGENT_IMAGE}
imagePullPolicy: Always
tty: true
resources:
requests:
memory: 256Mi
cpu: 200m
limits:
memory: 512Mi
cpu: 300m
"""
) {
node(label) {
stage('Trigger promotion') {
def message = readJSON text: params.CI_MESSAGE
// Extract the digest of the image to be promoted.
// e.g. factory2/waiverdb@sha256:35201c572fc8a137862b7a256476add8d7465fa5043d53d117f4132402f8ef6b
// -> sha256:35201c572fc8a137862b7a256476add8d7465fa5043d53d117f4132402f8ef6b
def digest = (message.msg.subject_identifier =~ /@(sha256:\w+)$/)[0][1]
// Generate the pull spec of the image
// e.g. quay.io/factory2/waiverdb@sha256:35201c572fc8a137862b7a256476add8d7465fa5043d53d117f4132402f8ef6b
def image = "${params.SOURCE_CONTAINER_REPO}@${digest}"
echo "Starting a new build to promote image ${image} to :${params.TARGET_TAG}..."
openshift.withCluster() {
def bcSelector = openshift.selector('bc', params.IMAGE_PROMOTION_JOB)
def buildSelector = bcSelector.startBuild(
'-e', "IMAGE=${image}",
'-e', "DEST_TAG=${params.TARGET_TAG}",
)
bcSelector.watch {
return !(it.object().status.phase in ["New", "Pending"])
}
buildInfo = buildSelector.object()
echo "Build ${buildInfo.metadata.annotations['openshift.io/jenkins-build-uri'] ?: buildInfo.metadata.name} started."
}
}
}
}

View File

@@ -0,0 +1,110 @@
# Template to produce a new OpenShift pipeline job for triggering a build on repotracker messages
#
---
apiVersion: v1
kind: Template
metadata:
name: mbs-greenwave-trigger
labels:
template: mbs-greenwave-trigger
parameters:
- name: NAME
displayName: Short unique identifier for the templated instances
description: This field is used to deploy multiple pipelines to one OpenShift project from this template.
required: true
- name: MBS_GIT_REPO
displayName: MBS Git repo URL
description: Default MBS Git repo URL in which to run dev tests against
value: "https://pagure.io/fm-orchestrator.git"
- name: MBS_GIT_REF
displayName: MBS Git repo ref
description: Default MBS Git repo ref in which to run dev tests against
value: master
- name: DECISION_CONTEXT_REGEX
displayName: Regex pattern for Greenwave decision context in CI message
required: true
- name: SUBJECT_IDENTIFIER_REGEX
displayName: Regex pattern for Greenwave subject identifier in CI message
required: true
- name: SOURCE_CONTAINER_REPO
displayName: Container repo of the image
required: true
- name: TARGET_TAG
displayName: Tag name to promote the image to
required: true
- name: IMAGE_PROMOTION_JOB
displayName: Downstream image promotion job to trigger
required: true
- name: MESSAGING_PROVIDER
displayName: Name of the JMS messaging provider
value: Red Hat UMB
- name: MESSAGING_TOPIC
displayName: Name of the topic that the trigger subscribes to
value: "Consumer.rh-jenkins-ci-plugin.c3i-greenwave-trigger.VirtualTopic.eng.greenwave.decision.update"
- name: JENKINS_AGENT_IMAGE
displayName: Container image for Jenkins slave pods
value: quay.io/factory2/mbs-jenkins-slave:latest
- name: JENKINS_AGENT_CLOUD_NAME
displayName: Name of OpenShift cloud in Jenkins master configuration
value: openshift
objects:
- kind: ServiceAccount
apiVersion: v1
metadata:
name: "${NAME}-jenkins-slave"
labels:
app: "${NAME}"
- kind: RoleBinding
apiVersion: v1
metadata:
name: "${NAME}-jenkins-slave_edit"
labels:
app: "${NAME}"
subjects:
- kind: ServiceAccount
name: "${NAME}-jenkins-slave"
roleRef:
name: edit
- kind: "BuildConfig"
apiVersion: "v1"
metadata:
name: "${NAME}"
labels:
app: "${NAME}"
spec:
runPolicy: "Serial"
completionDeadlineSeconds: 1800
source:
git:
uri: "${MBS_GIT_REPO}"
ref: "${MBS_GIT_REF}"
strategy:
type: JenkinsPipeline
jenkinsPipelineStrategy:
env:
- name: JENKINS_AGENT_CLOUD_NAME
value: "${JENKINS_AGENT_CLOUD_NAME}"
- name: JENKINS_AGENT_IMAGE
value: "${JENKINS_AGENT_IMAGE}"
- name: JENKINS_AGENT_SERVICE_ACCOUNT
value: "${NAME}-jenkins-slave"
- name: SOURCE_CONTAINER_REPO
value: "${SOURCE_CONTAINER_REPO}"
- name: TARGET_TAG
value: "${TARGET_TAG}"
- name: IMAGE_PROMOTION_JOB
value: "${IMAGE_PROMOTION_JOB}"
- name: DECISION_CONTEXT_REGEX
value: "${DECISION_CONTEXT_REGEX}"
- name: SUBJECT_IDENTIFIER_REGEX
value: "${SUBJECT_IDENTIFIER_REGEX}"
- name: MESSAGING_PROVIDER
value: "${MESSAGING_PROVIDER}"
- name: MESSAGING_TOPIC
value: "${MESSAGING_TOPIC}"
# CI_MESSAGE and MESSAGE_HEADERS are used internally by JMS messaging plugin
- name: CI_MESSAGE
value:
- name: MESSAGE_HEADERS
value:
jenkinsfilePath: openshift/integration/koji/pipelines/templates/mbs-greenwave-trigger.Jenkinsfile

View File

@@ -0,0 +1,120 @@
# Template to produce a pipeline for promoting images between environments
#
# The pipeline pulls the image to be promoted, then pushes it to destinations with promoted tags.
# Optionally, it tags the promoted image into an image stream after pushes.
---
apiVersion: v1
kind: Template
metadata:
name: mbs-image-promotion
labels:
template: mbs-image-promotion
parameters:
- name: NAME
displayName: Short unique identifier for the templated instances
description: This field is used to deploy multiple pipelines to one OpenShift project from this template.
required: true
- name: MBS_GIT_REPO
displayName: MBS Git repo URL
description: Default MBS Git repo URL in which to run functional tests against
required: true
value: "https://pagure.io/fm-orchestrator.git"
- name: MBS_GIT_REF
displayName: MBS Git repo ref
description: Default MBS Git repo ref in which to run functional tests against
required: true
value: master
- name: IMAGE
displayName: The container image to be promoted
description: This field must be in repo:tag or repo@sha256 format
value: provided-by-trigger
- name: PROMOTING_DESTINATIONS
displayName: Comma seperated list of container repositories (without tags) to which the image will be promoted
description: OpenShift registries must be prefixed with 'atomic:'
required: true
- name: CONTAINER_REGISTRY_CREDENTIALS
displayName: Secret name of container registries used for pulling and pushing images
value: factory2-pipeline-registry-credentials
required: false
- name: TAG_INTO_IMAGESTREAM
displayName: Whether to tag the image into an ImageStream
value: "false"
required: true
- name: DEST_IMAGESTREAM_NAME
displayName: Name of the ImageStream to be tagged
required: false
value: ""
- name: DEST_IMAGESTREAM_NAMESPACE
displayName: Namespace of the ImageStream to be tagged
description: Leaving blank means using the same namespace as the pipeline build
required: false
value: ""
- name: DEST_TAG
displayName: Name of the new tag
required: true
- name: JENKINS_AGENT_IMAGE
displayName: Container image for Jenkins slave pods
required: true
value: quay.io/factory2/mbs-jenkins-slave:latest
- name: JENKINS_AGENT_CLOUD_NAME
displayName: Name of OpenShift cloud in Jenkins master configuration
required: true
value: openshift
objects:
- kind: ServiceAccount
apiVersion: v1
metadata:
name: "${NAME}-jenkins-slave"
labels:
app: "${NAME}"
- kind: RoleBinding
apiVersion: v1
metadata:
name: "${NAME}-jenkins-slave_edit"
labels:
app: "${NAME}"
subjects:
- kind: ServiceAccount
name: "${NAME}-jenkins-slave"
roleRef:
name: edit
- kind: "BuildConfig"
apiVersion: "v1"
metadata:
name: "${NAME}"
labels:
app: "${NAME}"
spec:
runPolicy: "Serial" # FIXME: Parallel is supported, but we have limited quota in UpShift.
completionDeadlineSeconds: 1800
source:
git:
uri: "${MBS_GIT_REPO}"
ref: "${MBS_GIT_REF}"
strategy:
type: JenkinsPipeline
source:
type: None
jenkinsPipelineStrategy:
env:
- name: IMAGE
value: "${IMAGE}"
- name: PROMOTING_DESTINATIONS
value: "${PROMOTING_DESTINATIONS}"
- name: CONTAINER_REGISTRY_CREDENTIALS
value: "${CONTAINER_REGISTRY_CREDENTIALS}"
- name: TAG_INTO_IMAGESTREAM
value: "${TAG_INTO_IMAGESTREAM}"
- name: DEST_IMAGESTREAM_NAME
value: "${DEST_IMAGESTREAM_NAME}"
- name: DEST_IMAGESTREAM_NAMESPACE
value: "${DEST_IMAGESTREAM_NAMESPACE}"
- name: DEST_TAG
value: "${DEST_TAG}"
- name: JENKINS_AGENT_IMAGE
value: "${JENKINS_AGENT_IMAGE}"
- name: JENKINS_AGENT_CLOUD_NAME
value: "${JENKINS_AGENT_CLOUD_NAME}"
- name: JENKINS_AGENT_SERVICE_ACCOUNT
value: "${NAME}-jenkins-slave"
jenkinsfilePath: openshift/integration/koji/pipelines/templates/mbs-image-promotion.Jenkinsfile

View File

@@ -0,0 +1,123 @@
pipeline {
agent {
kubernetes {
cloud "${params.JENKINS_AGENT_CLOUD_NAME}"
label "jenkins-slave-${UUID.randomUUID().toString()}"
serviceAccount "${params.JENKINS_AGENT_SERVICE_ACCOUNT}"
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
labels:
app: "jenkins-${env.JOB_BASE_NAME.take(50)}"
factory2-pipeline-kind: "mbs-image-promotion-pipeline"
factory2-pipeline-build-number: "${env.BUILD_NUMBER}"
spec:
containers:
- name: jnlp
image: "${params.JENKINS_AGENT_IMAGE}"
imagePullPolicy: Always
tty: true
env:
- name: REGISTRY_CREDENTIALS
valueFrom:
secretKeyRef:
name: "${params.CONTAINER_REGISTRY_CREDENTIALS}"
key: '.dockerconfigjson'
resources:
requests:
memory: 512Mi
cpu: 300m
limits:
memory: 768Mi
cpu: 500m
"""
}
}
options {
timestamps()
timeout(time: 30, unit: 'MINUTES')
}
environment {
PIPELINE_NAMESPACE = readFile(file: '/run/secrets/kubernetes.io/serviceaccount/namespace').trim()
SERVICE_ACCOUNT_TOKEN = readFile(file: '/run/secrets/kubernetes.io/serviceaccount/token').trim()
}
stages {
stage ('Prepare') {
steps {
script {
// Setting up registry credentials
dir ("${env.HOME}/.docker") {
// for the OpenShift internal registry
def dockerConfig = readJSON text: '{ "auths": {} }'
dockerConfig.auths['docker-registry.default.svc:5000'] = [
'email': '',
'auth': sh(returnStdout: true, script: 'set +x; echo -n "serviceaccount:$SERVICE_ACCOUNT_TOKEN" | base64 -').trim()
]
// merging user specified credentials
if (env.REGISTRY_CREDENTIALS) {
toBeMerged = readJSON text: env.REGISTRY_CREDENTIALS
dockerConfig.auths.putAll(toBeMerged.auths)
}
// writing to ~/.docker/config.json
writeJSON file: 'config.json', json: dockerConfig
}
}
}
}
stage('Pull image') {
steps {
echo "Pulling container image ${params.IMAGE}..."
withEnv(["SOURCE_IMAGE_REF=${params.IMAGE}"]) {
sh '''
set -e +x # hide the token from Jenkins console
mkdir -p _image
skopeo copy docker://"$SOURCE_IMAGE_REF" dir:_image
'''
}
}
}
stage('Promote image') {
steps {
script {
def destinations = params.PROMOTING_DESTINATIONS ? params.PROMOTING_DESTINATIONS.split(',') : []
openshift.withCluster() {
def pushTasks = destinations.collectEntries {
["Pushing ${it}" : {
def dest = "${it}:${params.DEST_TAG}"
// Only docker and atomic registries are allowed
if (!dest.startsWith('atomic:') && !dest.startsWith('docker://')) {
dest = "docker://${dest}"
}
echo "Pushing container image to ${dest}..."
withEnv(["DEST_IMAGE_REF=${dest}"]) {
retry(5) {
sh 'skopeo copy dir:_image "$DEST_IMAGE_REF"'
}
}
}]
}
parallel pushTasks
}
}
}
}
stage('Tag ImageStream') {
when {
expression {
return params.DEST_IMAGESTREAM_NAME && params.TAG_INTO_IMAGESTREAM == "true"
}
}
steps {
script {
def destRef = "${params.DEST_IMAGESTREAM_NAMESPACE ?: env.PIPELINE_NAMESPACE}/${params.DEST_IMAGESTREAM_NAME}:${params.DEST_TAG}"
openshift.withCluster() {
echo "Tagging ${params.IMAGE} into ${destRef}..."
openshift.tag('--source=docker', params.IMAGE, destRef)
}
}
}
}
}
}

View File

@@ -0,0 +1,141 @@
# Template to produce a new OpenShift pipeline for running integration tests
#
---
apiVersion: v1
kind: Template
metadata:
name: mbs-integration-test
labels:
template: mbs-integration-test
parameters:
- name: NAME
displayName: Short unique identifier for the templated instances
description: This field is used to deploy multiple pipelines to one OpenShift project from this template.
required: true
value: mbs-integration-test
- name: MBS_BACKEND_IMAGE
displayName: The MBS backend container image to be tested
description: This field must be in repo:tag or repo@sha256 format
value: quay.io/factory2/mbs-backend:latest
- name: MBS_FRONTEND_IMAGE
displayName: The MBS frontend container image to be tested
description: This field must be in repo:tag or repo@sha256 format
value: quay.io/factory2/mbs-frontend:latest
- name: MBS_GIT_REPO
displayName: MBS Git repo URL
description: Default MBS Git repo URL in which to find the integration tests to run
required: true
value: "https://pagure.io/fm-orchestrator.git"
- name: MBS_GIT_REF
displayName: MBS Git repo ref
description: Default MBS Git repo ref in which to find the integration tests to run
required: true
value: master
- name: KOJI_IMAGE
displayName: The Koji container image to be tested
description: This field must be in repo:tag or repo@sha256 format
value: quay.io/factory2/koji:latest
- name: UMB_IMAGE
displayName: The UMB container image to be tested
description: This field must be in repo:tag or repo@sha256 format
value: docker-registry.engineering.redhat.com/factory2/umb:latest
- name: TEST_IMAGES
displayName: Images being tested
description: >-
A space-separated list of the refs of the images being tested.
Results of the tests will be reported to ResultsDB.
- name: JENKINS_AGENT_IMAGE
displayName: Container image for Jenkins slave pods
required: true
value: quay.io/factory2/mbs-jenkins-slave:latest
- name: CONTAINER_REGISTRY_CREDENTIALS
displayName: Secret name of container registries used for pulling and pushing images
value: factory2-pipeline-registry-credentials
required: false
- name: JENKINS_AGENT_CLOUD_NAME
displayName: Name of OpenShift cloud in Jenkins master configuration
required: true
value: openshift
- name: ENVIRONMENT
displayName: environment name (dev/stage/prod)
required: true
value: dev
- name: MESSAGING_PROVIDER
displayName: Name of the JMS messaging provider
value: Red Hat UMB
- name: TESTCASES
displayName: >-
Space-separated list of testcases to run as part of the pipeline. An empty string (the default)
causes all available testcases to run. The value "skip" causes no testcases to be run.
required: false
value: ""
- name: CLEANUP
displayName: Cleanup objects after testing is complete
required: true
value: "true"
objects:
- kind: ServiceAccount
apiVersion: v1
metadata:
name: "${NAME}-jenkins-slave"
labels:
app: "${NAME}"
- kind: RoleBinding
apiVersion: v1
metadata:
name: "${NAME}-jenkins-slave_edit"
labels:
app: "${NAME}"
subjects:
- kind: ServiceAccount
name: "${NAME}-jenkins-slave"
roleRef:
name: edit
- kind: "BuildConfig"
apiVersion: "v1"
metadata:
name: "${NAME}"
labels:
app: "${NAME}"
spec:
runPolicy: "Serial" # FIXME: Parallel is supported, but we have limited quota in UpShift.
completionDeadlineSeconds: 1800
source:
git:
uri: "${MBS_GIT_REPO}"
ref: "${MBS_GIT_REF}"
strategy:
type: JenkinsPipeline
jenkinsPipelineStrategy:
env:
- name: MBS_BACKEND_IMAGE
value: "${MBS_BACKEND_IMAGE}"
- name: MBS_FRONTEND_IMAGE
value: "${MBS_FRONTEND_IMAGE}"
- name: KOJI_IMAGE
value: "${KOJI_IMAGE}"
- name: UMB_IMAGE
value: "${UMB_IMAGE}"
- name: TEST_IMAGES
value: "${TEST_IMAGES}"
- name: IMAGE_IS_SCRATCH
value: "true"
- name: "TEST_ID"
value: ""
- name: CONTAINER_REGISTRY_CREDENTIALS
value: "${CONTAINER_REGISTRY_CREDENTIALS}"
- name: JENKINS_AGENT_IMAGE
value: "${JENKINS_AGENT_IMAGE}"
- name: JENKINS_AGENT_CLOUD_NAME
value: "${JENKINS_AGENT_CLOUD_NAME}"
- name: ENVIRONMENT
value: "${ENVIRONMENT}"
- name: MESSAGING_PROVIDER
value: "${MESSAGING_PROVIDER}"
- name: JENKINS_AGENT_SERVICE_ACCOUNT
value: "${NAME}-jenkins-slave"
- name: TESTCASES
value: "${TESTCASES}"
- name: CLEANUP
value: "${CLEANUP}"
jenkinsfilePath: openshift/integration/koji/pipelines/templates/mbs-integration-test.Jenkinsfile

View File

@@ -0,0 +1,273 @@
library identifier: 'c3i@master', changelog: false,
retriever: modernSCM([$class: "GitSCMSource", remote: "https://pagure.io/c3i-library.git"])
pipeline {
agent {
kubernetes {
cloud params.JENKINS_AGENT_CLOUD_NAME
label "jenkins-slave-${UUID.randomUUID().toString()}"
serviceAccount params.JENKINS_AGENT_SERVICE_ACCOUNT
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
labels:
app: "jenkins-${env.JOB_BASE_NAME.take(50)}"
factory2-pipeline-kind: "mbs-integration-test-pipeline"
factory2-pipeline-build-number: "${env.BUILD_NUMBER}"
spec:
containers:
- name: jnlp
image: "${params.JENKINS_AGENT_IMAGE}"
imagePullPolicy: Always
tty: true
env:
- name: REGISTRY_CREDENTIALS
valueFrom:
secretKeyRef:
name: "${params.CONTAINER_REGISTRY_CREDENTIALS}"
key: ".dockerconfigjson"
resources:
requests:
memory: 512Mi
cpu: 300m
limits:
memory: 768Mi
cpu: 500m
"""
}
}
options {
timestamps()
timeout(time: 60, unit: 'MINUTES')
buildDiscarder(logRotator(numToKeepStr: '10'))
}
environment {
// Jenkins BUILD_TAG could be too long (> 63 characters) for OpenShift to consume
TEST_ID = "${params.TEST_ID ?: 'jenkins-' + currentBuild.id + '-' + UUID.randomUUID().toString().substring(0,7)}"
}
stages {
stage('Prepare') {
steps {
script {
// Don't set ENVIRONMENT_LABEL in the environment block! Otherwise you will get 2 different UUIDs.
env.ENVIRONMENT_LABEL = "test-${env.TEST_ID}"
}
}
}
stage('Call cleanup routine') {
steps {
script {
// Cleanup all test environments that were created 1 hour ago in case of failures of previous cleanups.
c3i.cleanup('umb', 'koji', 'mbs')
}
}
post {
failure {
echo "Cleanup of old environments FAILED"
}
}
}
stage('Call UMB deployer') {
steps {
script {
def keystore = ca.get_keystore("umb-${TEST_ID}-broker", 'mbskeys')
def truststore = ca.get_truststore('mbstrust')
umb.deploy(env.TEST_ID, keystore, 'mbskeys', truststore, 'mbstrust',
params.UMB_IMAGE)
}
}
post {
failure {
echo "UMB deployment FAILED"
}
}
}
stage('Call Koji deployer') {
steps {
script {
koji.deploy(env.TEST_ID, ca.get_ca_cert(),
ca.get_ssl_cert("koji-${TEST_ID}-hub"),
"amqps://umb-${TEST_ID}-broker",
ca.get_ssl_cert("koji-${TEST_ID}-msg"),
"mbs-${TEST_ID}-koji-admin",
params.KOJI_IMAGE)
}
}
post {
failure {
echo "Koji deployment FAILED"
}
}
}
stage('Call MBS deployer') {
steps {
script {
// Required for accessing src.fedoraproject.org
def digicertca = '''-----BEGIN CERTIFICATE-----
MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j
ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL
MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3
LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug
RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm
+9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW
PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM
xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB
Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3
hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg
EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF
MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA
FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec
nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z
eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF
hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2
Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
+OkuE6N36B9K
-----END CERTIFICATE-----
'''
def cabundle = ca.get_ca_cert().cert + digicertca
def msgcert = ca.get_ssl_cert("mbs-${TEST_ID}-msg")
def frontendcert = ca.get_ssl_cert("mbs-${TEST_ID}-frontend")
def kojicert = ca.get_ssl_cert("mbs-${TEST_ID}-koji-admin")
mbs.deploy(env.TEST_ID, kojicert, ca.get_ca_cert(), msgcert, frontendcert, ca.get_ca_cert(), cabundle,
"https://koji-${TEST_ID}-hub",
"umb-${TEST_ID}-broker:61612",
params.MBS_BACKEND_IMAGE, params.MBS_FRONTEND_IMAGE)
}
}
post {
failure {
echo "MBS deployment FAILED"
}
}
}
stage('Run tests') {
steps {
script {
def testcases
if (params.TESTCASES) {
if (params.TESTCASES == 'skip') {
testcases = []
echo 'Skipping integration tests'
} else {
testcases = params.TESTCASES.split()
echo "Using specified list of testcases: ${testcases}"
}
} else {
testcases = findFiles(glob: 'openshift/integration/koji/pipelines/tests/*.groovy').collect {
it.name.minus('.groovy')
}
echo "Using all available testcases: ${testcases}"
}
testcases.each { testcase ->
env.CURRENT_TESTCASE = testcase
echo "Running testcase ${testcase}..."
def test = load "openshift/integration/koji/pipelines/tests/${testcase}.groovy"
test.runTests()
}
echo "Tests complete"
}
}
post {
failure {
echo "Testcase ${env.CURRENT_TESTCASE} FAILED"
}
}
}
}
post {
success {
script {
params.TEST_IMAGES.split().each {
sendToResultsDB(it, 'passed')
}
}
}
failure {
script {
params.TEST_IMAGES.split().each {
sendToResultsDB(it, 'failed')
}
openshift.withCluster() {
echo 'Getting logs from all deployments...'
def sel = openshift.selector('dc', ['environment': env.ENVIRONMENT_LABEL])
sel.logs('--tail=100')
}
}
}
cleanup {
script {
if (params.CLEANUP == 'true') {
openshift.withCluster() {
/* Tear down everything we just created */
echo 'Tearing down test resources...'
openshift.selector('all,pvc,configmap,secret',
['environment': env.ENVIRONMENT_LABEL]).delete('--ignore-not-found=true')
}
}
}
}
}
}
def sendToResultsDB(imageRef, status) {
def (repourl, digest) = imageRef.tokenize('@')
def (registry, reponame) = repourl.split('/', 2)
def image = reponame.split('/').last()
def sendResult = sendCIMessage \
providerName: params.MESSAGING_PROVIDER, \
overrides: [topic: 'VirtualTopic.eng.ci.container-image.test.complete'], \
messageType: 'Custom', \
messageProperties: '', \
messageContent: """
{
"ci": {
"name": "C3I Jenkins",
"team": "DevOps",
"url": "${env.JENKINS_URL}",
"docs": "https://pagure.io/fm-orchestrator/blob/master/f/openshift/integration/koji",
"irc": "#pnt-devops-dev",
"email": "pnt-factory2-devel@redhat.com",
"environment": "${params.ENVIRONMENT}"
},
"run": {
"url": "${env.BUILD_URL}",
"log": "${env.BUILD_URL}/console",
"debug": "",
"rebuild": "${env.BUILD_URL}/rebuild/parametrized"
},
"artifact": {
"type": "container-image",
"repository": "${reponame}",
"digest": "${digest}",
"nvr": "${imageRef}",
"issuer": "c3i-jenkins",
"scratch": ${params.IMAGE_IS_SCRATCH},
"id": "${image}@${digest}"
},
"system":
[{
"os": "${params.JENKINS_AGENT_IMAGE}",
"provider": "openshift",
"architecture": "x86_64"
}],
"type": "integration",
"category": "${params.ENVIRONMENT}",
"status": "${status}",
"xunit": "",
"generated_at": "${new Date().format("yyyy-MM-dd'T'HH:mm:ss'Z'", TimeZone.getTimeZone('UTC'))}",
"namespace": "c3i",
"version": "0.1.0"
}
"""
if (sendResult.getMessageId()) {
// echo sent message id and content
echo 'Successfully sent the test result to ResultsDB.'
echo "Message ID: ${sendResult.getMessageId()}"
echo "Message content: ${sendResult.getMessageContent()}"
} else {
echo 'Failed to sent the test result to ResultsDB.'
}
}

View File

@@ -0,0 +1,229 @@
# Template to produce a new OpenShift pipeline job for polling for Pagure branches or PRs
#
---
apiVersion: v1
kind: Template
metadata:
name: mbs-polling-to-pagure
labels:
template: mbs-polling-to-pagure
parameters:
- name: NAME
displayName: Short unique identifier for the templated instances
description: This field is used to deploy multiple pipelines to one OpenShift project from this template.
required: true
value: mbs-polling-to-pagure
- name: PAGURE_REPO_NAME
displayName: Pagure project name
description: <username>/<namespace>/<repo>
required: true
value: fm-orchestrator
- name: PAGURE_REPO_IS_FORK
displayName: Is the Pagure repo a fork?
required: true
value: "false"
- name: PAGURE_POLLING_FOR_PR
displayName: set to 'true' to poll for PRs, or 'false' for the master branch
required: true
value: "false"
- name: PAGURE_URL
displayName: Pagure URL
value: "https://pagure.io"
- name: PAGURE_POLLING_SCHEDULE
displayName: Schedule of polling
description: using cron-style syntax
required: true
value: "H/5 * * * *"
- name: PAGURE_POLLED_BRANCH
displayName: Name of polled branch
required: true
value: "master"
- name: PREMERGE_JOB_NAME
displayName: Downstream pre-merge job name
required: true
value: mbs-premerge
- name: POSTMERGE_JOB_NAME
displayName: Downstream post-merge job name
required: true
value: mbs-postmerge
- name: PIPELINE_UPDATE_JOBS_DIR
displayName: location of pipeline job definitions for auto update
value: jobs
required: false
- name: JENKINS_AGENT_IMAGE
displayName: Container image for Jenkins slave pods
required: true
value: quay.io/factory2/mbs-jenkins-slave:latest
- name: JENKINS_AGENT_CLOUD_NAME
displayName: Name of OpenShift cloud in Jenkins master configuration
required: true
value: openshift
objects:
- kind: ServiceAccount
apiVersion: v1
metadata:
name: "${NAME}-jenkins-slave"
labels:
app: "${NAME}"
- kind: RoleBinding
apiVersion: v1
metadata:
name: "${NAME}-jenkins-slave_edit"
labels:
app: "${NAME}"
subjects:
- kind: ServiceAccount
name: "${NAME}-jenkins-slave"
roleRef:
name: edit
- kind: "BuildConfig"
apiVersion: "v1"
metadata:
name: "${NAME}"
labels:
app: "${NAME}"
spec:
runPolicy: "Serial"
completionDeadlineSeconds: 1800
strategy:
type: JenkinsPipeline
jenkinsPipelineStrategy:
jenkinsfile: |-
// Don't use external Jenkinsfile here, or Jenkins will also poll on that repo and branch
pipeline {
agent {
kubernetes {
cloud "${JENKINS_AGENT_CLOUD_NAME}"
label "jenkins-slave-${UUID.randomUUID().toString()}"
serviceAccount "${NAME}-jenkins-slave"
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
labels:
app: "jenkins-${env.JOB_BASE_NAME.take(50)}"
factory2-pipeline-kind: "mbs-polling-to-pagure-pipeline"
factory2-pipeline-build-number: "${env.BUILD_NUMBER}"
spec:
containers:
- name: jnlp
image: "${JENKINS_AGENT_IMAGE}"
imagePullPolicy: Always
tty: true
resources:
requests:
memory: 512Mi
cpu: 300m
limits:
memory: 768Mi
cpu: 500m
"""
}
}
options {
timestamps()
}
environment {
PIPELINE_NAMESPACE = readFile('/run/secrets/kubernetes.io/serviceaccount/namespace').trim()
PAGURE_URL = "${PAGURE_URL}"
PAGURE_REPO_IS_FORK = "${PAGURE_REPO_IS_FORK}"
PAGURE_POLLING_FOR_PR = "${PAGURE_POLLING_FOR_PR}"
PAGURE_REPO_HOME = "${env.PAGURE_URL}${env.PAGURE_REPO_IS_FORK == 'true' ? '/fork' : ''}/${PAGURE_REPO_NAME}"
GIT_URL = "${env.PAGURE_URL}/${env.PAGURE_REPO_IS_FORK == 'true' ? 'forks/' : ''}${PAGURE_REPO_NAME}.git"
PREMERGE_JOB_NAME = "${PREMERGE_JOB_NAME}"
POSTMERGE_JOB_NAME = "${POSTMERGE_JOB_NAME}"
}
triggers { pollSCM("${PAGURE_POLLING_SCHEDULE}") }
stages {
stage('Prepare') {
agent { label 'master' }
steps {
script {
// checking out the polled branch
def polledBranch = env.PAGURE_POLLING_FOR_PR == 'true' ? 'origin/pull/*/head' : "origin/${PAGURE_POLLED_BRANCH}"
def scmVars = checkout([$class: 'GitSCM',
branches: [[name: polledBranch]],
userRemoteConfigs: [
[
name: 'origin',
url: env.GIT_URL,
refspec: '+refs/heads/*:refs/remotes/origin/* +refs/pull/*/head:refs/remotes/origin/pull/*/head',
],
],
extensions: [[$class: 'CleanBeforeCheckout']],
])
env.MBS_GIT_COMMIT = scmVars.GIT_COMMIT
// setting build display name
def prefix = 'origin/'
def branch = scmVars.GIT_BRANCH.startsWith(prefix) ? scmVars.GIT_BRANCH.substring(prefix.size())
: scmVars.GIT_BRANCH // origin/pull/1234/head -> pull/1234/head, origin/master -> master
env.MBS_GIT_BRANCH = branch
echo "Build on branch=${env.MBS_GIT_BRANCH}, commit=${env.MBS_GIT_COMMIT}"
if (env.PAGURE_POLLING_FOR_PR == 'false') {
currentBuild.displayName = "${env.MBS_GIT_BRANCH}: ${env.MBS_GIT_COMMIT.substring(0, 7)}"
currentBuild.description = """<a href="${env.PAGURE_REPO_HOME}/c/${env.MBS_GIT_COMMIT}">${currentBuild.displayName}</a>"""
} else if (env.PAGURE_POLLING_FOR_PR == 'true' && branch ==~ /^pull\/[0-9]+\/head$/) {
env.PR_NO = branch.split('/')[1]
env.PR_URL = "${env.PAGURE_REPO_HOME}/pull-request/${env.PR_NO}"
// To HTML syntax in build description, go to `Jenkins/Global Security/Markup Formatter` and select 'Safe HTML'.
def pagureLink = """<a href="${env.PR_URL}">PR#${env.PR_NO}</a>"""
echo "Building PR #${env.PR_NO}: ${env.PR_URL}"
currentBuild.displayName = "PR#${env.PR_NO}"
currentBuild.description = pagureLink
} else { // This shouldn't happen.
error("Build is aborted due to unexpected polling trigger actions.")
}
}
}
}
stage('Update pipeline jobs') {
when {
expression {
return "${PIPELINE_UPDATE_JOBS_DIR}" && env.PAGURE_POLLING_FOR_PR == 'false' && env.MBS_GIT_BRANCH == "${PAGURE_POLLED_BRANCH}"
}
}
steps {
checkout([$class: 'GitSCM',
branches: [[name: env.MBS_GIT_BRANCH]],
userRemoteConfigs: [
[
name: 'origin',
url: env.GIT_URL,
refspec: '+refs/heads/*:refs/remotes/origin/* +refs/pull/*/head:refs/remotes/origin/pull/*/head',
],
],
extensions: [[$class: 'CleanBeforeCheckout']],
])
script {
dir('openshift/integration/koji/pipelines') {
sh '''
make install JOBS_DIR="${PIPELINE_UPDATE_JOBS_DIR}"
'''
}
}
}
}
stage('Build') {
steps {
script {
openshift.withCluster() {
def bcSelector = openshift.selector('bc',
env.PAGURE_POLLING_FOR_PR == 'true' ? env.PREMERGE_JOB_NAME : env.POSTMERGE_JOB_NAME)
echo 'Starting a MBS build run...'
def devBuild = bcSelector.startBuild(
'-e', "MBS_GIT_REF=${env.MBS_GIT_BRANCH}",
)
devBuild.watch {
return !(it.object().status.phase in ["New", "Pending"])
}
def devBuildInfo = devBuild.object()
def downstreamBuildName = devBuildInfo.metadata.name
def downstreamBuildUrl = devBuildInfo.metadata.annotations['openshift.io/jenkins-build-uri']
echo "Downstream build ${downstreamBuildName}(${downstreamBuildUrl}) started."
}
}
}
}
}
}

View File

@@ -0,0 +1,91 @@
// Use scripted syntax because CIBuildTrigger currently doesn't support the declarative syntax
properties([
disableConcurrentBuilds(),
pipelineTriggers([
// example: https://github.com/jenkinsci/jms-messaging-plugin/blob/9b9387c3a52f037ba0d019c2ebcf2a2796fc6397/src/test/java/com/redhat/jenkins/plugins/ci/integration/AmqMessagingPluginIntegrationTest.java
[$class: 'CIBuildTrigger',
providerData: [$class: 'ActiveMQSubscriberProviderData',
name: params.MESSAGING_PROVIDER,
overrides: [topic: params.MESSAGING_TOPIC],
selector: "repo = '${params.TRACKED_CONTAINER_REPO}' AND action IN ('added', 'updated') AND tag = '${params.TRACKED_TAG}'",
],
],
]),
])
if (!params.CI_MESSAGE) {
echo 'This build is not started by a CI message. Only configurations were done.'
return
}
def label = "jenkins-slave-${UUID.randomUUID().toString()}"
podTemplate(
cloud: "${params.JENKINS_AGENT_CLOUD_NAME}",
label: label,
serviceAccount: "${env.JENKINS_AGENT_SERVICE_ACCOUNT}",
defaultContainer: 'jnlp',
yaml: """
apiVersion: v1
kind: Pod
metadata:
labels:
app: "jenkins-${env.JOB_BASE_NAME.take(50)}"
factory2-pipeline-kind: "mbs-repotracker-trigger"
factory2-pipeline-build-number: "${env.BUILD_NUMBER}"
spec:
containers:
- name: jnlp
image: ${params.JENKINS_AGENT_IMAGE}
imagePullPolicy: Always
tty: true
env:
- name: REGISTRY_CREDENTIALS
valueFrom:
secretKeyRef:
name: "${params.CONTAINER_REGISTRY_CREDENTIALS}"
key: ".dockerconfigjson"
resources:
requests:
memory: 256Mi
cpu: 200m
limits:
memory: 512Mi
cpu: 300m
"""
) {
node(label) {
stage('Trigger tests') {
def message = readJSON text: params.CI_MESSAGE
echo "Tag :${message.tag} is ${message.action} in ${message.repo}. New digest: ${message.digest}"
def frontendImage = "${message.repo}@${message.digest}"
// We have the digest of the current frontend image with this tag.
// Lookup the digest of the current backend image with the same tag.
if (env.REGISTRY_CREDENTIALS) {
dir ("${env.HOME}/.docker") {
writeFile file: 'config.json', text: env.REGISTRY_CREDENTIALS
}
}
def output = sh(script: "skopeo inspect docker://${params.MBS_BACKEND_REPO}:${message.tag}", returnStdout: true).trim()
def backendData = readJSON text: output
def backendImage = "${params.MBS_BACKEND_REPO}@${backendData.Digest}"
echo "Current mbs-backend image is: ${backendImage}"
echo "Triggering a job to test if ${frontendImage} and ${backendImage} meet all criteria of desired tag"
openshift.withCluster() {
openshift.withProject(params.TEST_JOB_NAMESPACE) {
def testBcSelector = openshift.selector('bc', params.TEST_JOB_NAME)
def buildSelector = testBcSelector.startBuild(
'-e', "MBS_BACKEND_IMAGE=${backendImage}",
'-e', "MBS_FRONTEND_IMAGE=${frontendImage}",
'-e', "TEST_IMAGES='${backendImage} ${frontendImage}'",
'-e', "IMAGE_IS_SCRATCH=false"
)
buildSelector.watch {
return !(it.object().status.phase in ["New", "Pending"])
}
buildInfo = buildSelector.object()
echo "Build ${buildInfo.metadata.annotations['openshift.io/jenkins-build-uri'] ?: buildInfo.metadata.name} started."
}
}
}
}
}

View File

@@ -0,0 +1,116 @@
# Template to produce a new OpenShift pipeline job for triggering a build on repotracker messages
#
---
apiVersion: v1
kind: Template
metadata:
name: mbs-repotracker-trigger
labels:
template: mbs-repotracker-trigger
parameters:
- name: NAME
displayName: Short unique identifier for the templated instances
description: This field is used to deploy multiple pipelines to one OpenShift project from this template.
value: mbs-repotracker-trigger
- name: MBS_GIT_REPO
displayName: MBS Git repo URL
description: Default MBS Git repo URL in which to run dev tests against
value: "https://pagure.io/fm-orchestrator.git"
- name: MBS_GIT_REF
displayName: MBS Git repo ref
description: Default MBS Git repo ref in which to run dev tests against
value: master
- name: TRACKED_CONTAINER_REPO
displayName: Container repo to be tracked
value: quay.io/factory2/mbs-frontend
- name: TRACKED_TAG
displayName: Name of tag to be tracked
required: true
- name: MBS_BACKEND_REPO
displayName: The location of the repo (without tag) where the mbs-backend image is located
value: quay.io/factory2/mbs-backend
- name: CONTAINER_REGISTRY_CREDENTIALS
displayName: Secret name of container registries used for pulling and pushing images
value: factory2-pipeline-registry-credentials
required: false
- name: JENKINS_AGENT_IMAGE
displayName: Container image for Jenkins slave pods
value: quay.io/factory2/mbs-jenkins-slave:latest
- name: JENKINS_AGENT_CLOUD_NAME
displayName: Name of OpenShift cloud in Jenkins master configuration
value: openshift
- name: MESSAGING_PROVIDER
displayName: Name of the JMS messaging provider
value: Red Hat UMB
- name: MESSAGING_TOPIC
displayName: Name of the topic that the trigger subscribes to
value: "Consumer.rh-jenkins-ci-plugin.c3i-mbs-repotracker-trigger.VirtualTopic.eng.repotracker.container.tag.>"
- name: TEST_JOB_NAME
displayName: Name of integration test job to trigger
required: true
- name: TEST_JOB_NAMESPACE
displayName: Namespace in which to trigger the integration test job
required: false
objects:
- kind: ServiceAccount
apiVersion: v1
metadata:
name: "${NAME}-jenkins-slave"
labels:
app: "${NAME}"
- kind: RoleBinding
apiVersion: v1
metadata:
name: "${NAME}-jenkins-slave_edit"
labels:
app: "${NAME}"
subjects:
- kind: ServiceAccount
name: "${NAME}-jenkins-slave"
roleRef:
name: edit
- kind: "BuildConfig"
apiVersion: "v1"
metadata:
name: "${NAME}"
labels:
app: "${NAME}"
spec:
runPolicy: "Serial"
completionDeadlineSeconds: 1800
source:
git:
uri: "${MBS_GIT_REPO}"
ref: "${MBS_GIT_REF}"
strategy:
type: JenkinsPipeline
jenkinsPipelineStrategy:
env:
- name: JENKINS_AGENT_CLOUD_NAME
value: "${JENKINS_AGENT_CLOUD_NAME}"
- name: JENKINS_AGENT_IMAGE
value: "${JENKINS_AGENT_IMAGE}"
- name: JENKINS_AGENT_SERVICE_ACCOUNT
value: "${NAME}-jenkins-slave"
- name: TRACKED_CONTAINER_REPO
value: "${TRACKED_CONTAINER_REPO}"
- name: TRACKED_TAG
value: "${TRACKED_TAG}"
- name: MBS_BACKEND_REPO
value: "${MBS_BACKEND_REPO}"
- name: CONTAINER_REGISTRY_CREDENTIALS
value: "${CONTAINER_REGISTRY_CREDENTIALS}"
- name: TEST_JOB_NAME
value: "${TEST_JOB_NAME}"
- name: TEST_JOB_NAMESPACE
value: "${TEST_JOB_NAMESPACE}"
- name: MESSAGING_PROVIDER
value: "${MESSAGING_PROVIDER}"
- name: MESSAGING_TOPIC
value: "${MESSAGING_TOPIC}"
# CI_MESSAGE and MESSAGE_HEADERS are used internally by JMS messaging plugin
- name: CI_MESSAGE
value:
- name: MESSAGE_HEADERS
value:
jenkinsfilePath: openshift/integration/koji/pipelines/templates/mbs-repotracker-trigger.Jenkinsfile

View File

@@ -0,0 +1,73 @@
// Build an empty module and verify that the CGImport works correctly
def runTests() {
def clientcert = ca.get_ssl_cert("mbs-${TEST_ID}-koji-admin")
koji.setConfig("https://koji-${TEST_ID}-hub/kojihub", "https://koji-${TEST_ID}-hub/kojifiles",
clientcert.cert, clientcert.key, ca.get_ca_cert().cert)
def tags = koji.callMethod("listTags")
if (!tags.any { it.name == "module-f28" }) {
koji.addTag("module-f28")
}
if (!tags.any { it.name == "module-f28-build" }) {
koji.addTag("module-f28-build", "--parent=module-f28", "--arches=x86_64")
}
// There's currently no way to query whether a given user has CG access, so just add it
// and hope no one else has already done it.
koji.runCmd("grant-cg-access", "mbs-${TEST_ID}-koji-admin", "module-build-service", "--new")
if (!koji.callMethod("listBTypes").any { it.name == "module" }) {
koji.callMethodLogin("addBType", "module")
}
def buildparams = """
{"scmurl": "https://src.fedoraproject.org/forks/mikeb/modules/testmodule.git?#8b3fb16160f899ce10905faf570f110d52b91154",
"branch": "empty-f28",
"owner": "mbs-${TEST_ID}-koji-admin"}
"""
def resp = httpRequest(
httpMode: "POST",
url: "https://mbs-${TEST_ID}-frontend/module-build-service/1/module-builds/",
acceptType: "APPLICATION_JSON",
contentType: "APPLICATION_JSON",
requestBody: buildparams,
ignoreSslErrors: true,
)
if (resp.status != 201) {
echo "Response code was ${resp.status}, output was ${resp.content}"
error "POST response code was ${resp.status}, not 201"
}
def buildinfo = readJSON(text: resp.content)
timeout(10) {
waitUntil {
resp = httpRequest(
url: "https://mbs-${TEST_ID}-frontend/module-build-service/1/module-builds/${buildinfo.id}",
ignoreSslErrors: true,
)
if (resp.status != 200) {
echo "Response code was ${resp.status}, output was ${resp.content}"
error "GET response code was ${resp.status}, not 200"
}
def modinfo = readJSON(text: resp.content)
if (modinfo.state_name == "failed") {
error "Module ${modinfo.id} (${modinfo.name}) is in the ${modinfo.state_name} state"
} else if (modinfo.state_name != "ready") {
echo "Module ${modinfo.id} (${modinfo.name}) is in the ${modinfo.state_name} state, not ready"
return false
}
def builds = koji.listBuilds()
echo "Builds: ${builds}"
def build = builds.find { it.name == "testmodule" }
if (!build) {
echo "Could not find a build of testmodule"
return false
}
def develbuild = builds.find { it.name == "testmodule-devel" }
if (!develbuild) {
echo "Could not find a build of testmodule-devel"
return false
}
echo "All checks passed"
return true
}
}
}
return this;

View File

@@ -0,0 +1,157 @@
// Submit a build to MBS and verify that it initializes Koji correctly
def runTests() {
def clientcert = ca.get_ssl_cert("mbs-${TEST_ID}-koji-admin")
koji.setConfig("https://koji-${TEST_ID}-hub/kojihub", "https://koji-${TEST_ID}-hub/kojifiles",
clientcert.cert, clientcert.key, ca.get_ca_cert().cert)
def tags = koji.callMethod("listTags")
if (!tags.any { it.name == "module-f28" }) {
koji.addTag("module-f28")
}
if (!tags.any { it.name == "module-f28-build" }) {
koji.addTag("module-f28-build", "--parent=module-f28", "--arches=x86_64")
}
def buildparams = """
{"scmurl": "https://src.fedoraproject.org/modules/testmodule.git?#9c589780e1dd1698dc64dfa28d30014ad18cad32",
"branch": "f28",
"owner": "mbs-${TEST_ID}-koji-admin"}
"""
def resp = httpRequest(
httpMode: "POST",
url: "https://mbs-${TEST_ID}-frontend/module-build-service/1/module-builds/",
acceptType: "APPLICATION_JSON",
contentType: "APPLICATION_JSON",
requestBody: buildparams,
ignoreSslErrors: true,
)
if (resp.status != 201) {
echo "Response code was ${resp.status}, output was ${resp.content}"
error "POST response code was ${resp.status}, not 201"
}
def buildinfo = readJSON(text: resp.content)
// Check that MBS has configured Koji correctly
timeout(10) {
waitUntil {
resp = httpRequest(
url: "https://mbs-${TEST_ID}-frontend/module-build-service/1/module-builds/${buildinfo.id}",
ignoreSslErrors: true,
)
if (resp.status != 200) {
echo "Response code was ${resp.status}, output was ${resp.content}"
error "GET response code was ${resp.status}, not 200"
}
def modinfo = readJSON(text: resp.content)
if (modinfo.state_name == "failed") {
error "Module ${modinfo.id} (${modinfo.name}) is in the ${modinfo.state_name} state"
} else if (modinfo.state_name != "build") {
echo "Module ${modinfo.id} (${modinfo.name}) is in the ${modinfo.state_name} state, not build"
return false
}
def targets = koji.listTargets()
def target = targets.find { it.name =~ "^module-testmodule-" }
if (!target) {
echo "Could not find module target"
return false
}
echo "Target: ${target}"
def taginfo = koji.tagInfo(target.build_tag_name)
echo "Build tag: ${taginfo}"
if (taginfo.arches != "x86_64") {
echo "${target.build_tag_name} does not have arches set to x86_64"
return false
}
if (taginfo.perm != "admin") {
echo "${target.build_tag_name} does not have perm set to admin"
return false
}
if (taginfo.extra.get("mock.package_manager", "") != "dnf") {
echo "${target.build_tag_name} is not configured to use dnf"
return false
}
if (!taginfo.extra.get("repo_include_all", false)) {
echo "${target.build_tag_name} is not configured with repo_include_all"
return false
}
def ancestors = koji.listTagInheritance(target.build_tag_name)
echo "Ancestors of ${target.build_tag_name}: ${ancestors}"
if (!ancestors.contains("module-f28-build")) {
echo "module-f28-build not in inheritance of ${target.build_tag_name}"
return false
}
def groups = koji.listGroups(target.build_tag_name)
echo "Groups of ${target.build_tag_name}: ${groups}"
def srpm_build = groups.find { it.name == "srpm-build" }
if (!srpm_build) {
echo "${target.build_tag_name} does not have a srpm-build group"
return false
}
def srpm_packages = srpm_build.packagelist.findAll { it.package in ["bash", "rpm-build", "module-build-macros"] }
if (srpm_packages.size() != 3) {
echo "${target.build_tag_name} does not have required packages in the srpm-build group"
return false
}
def build = groups.find { it.name == "build" }
if (!build) {
echo "${target.build_tag_name} does not have a build group"
return false
}
def build_packages = build.packagelist.findAll { it.package in ["bash", "rpm-build", "module-build-macros"] }
if (build_packages.size() != 3) {
echo "${target.build_tag_name} does not have required packages in the build group"
return false
}
def tasks = koji.listTasks()
echo "Tasks: ${tasks}"
def build_task = tasks.find { it.method == "build" }
if (!build_task) {
echo "No build task has been created"
return false
}
if (build_task.request.size() < 3) {
echo "The build task does not have the correct format"
return false
}
if (!build_task.request[0].contains("module-build-macros")) {
echo "The build task is not building module-build-macros"
return false
}
if (!build_task.request[0].endsWith(".src.rpm")) {
echo "The build task is not building from a srpm"
return false
}
if (build_task.request[1] != target.name) {
echo "The build task is not using the correct target"
return false
}
if (!build_task.request[2].get("skip_tag", false)) {
echo "The build task is not using skip_tag"
return false
}
if (build_task.request[2].get("mbs_artifact_name", "") != "module-build-macros") {
echo "The build task does not have the mbs_artifact_name option set correctly"
return false
}
if (build_task.request[2].get("mbs_module_target", "") != target.dest_tag_name) {
echo "The build task does not have the mbs_module_target option set correctly"
return false
}
def newrepo_task = tasks.find { it.method == "newRepo" }
if (!newrepo_task) {
echo "No newRepo task has been created"
return false
}
if (newrepo_task.request.size() < 1) {
echo "The newRepo task does not have the correct format"
return false
}
if (newrepo_task.request[0] != target.build_tag_name) {
echo "The newRepo task is not associated with the correct tag"
return false
}
echo "All checks passed"
return true
}
}
}
return this;

View File

@@ -289,6 +289,19 @@ objects:
logging.basicConfig(level='DEBUG')
from module_build_service import app as application
- apiVersion: v1
# Only creating this as a Secret because it supports base64-encoded data.
# Convert to a ConfigMap and use binaryData once we're running on OpenShift 3.10+.
kind: Secret
metadata:
name: mbs-cacerts
labels:
app: mbs
service: frontend
environment: "test-${TEST_ID}"
data:
ca-bundle.crt: |-
${CA_CERTS}
- apiVersion: v1
kind: Secret
metadata:
@@ -392,6 +405,9 @@ objects:
- name: koji-certificates
mountPath: /etc/koji-certs
readOnly: true
- name: cacerts-vol
mountPath: /etc/pki/tls/certs
readOnly: true
resources:
limits:
memory: 400Mi
@@ -415,6 +431,10 @@ objects:
- name: koji-certificates
secret:
secretName: mbs-koji-secrets
- name: cacerts-vol
secret:
secretName: mbs-cacerts
defaultMode: 0444
triggers:
- type: ConfigChange
# backend
@@ -753,6 +773,9 @@ objects:
- name: koji-certificates
mountPath: /etc/koji-certs
readOnly: true
- name: cacerts-vol
mountPath: /etc/pki/tls/certs
readOnly: true
resources:
limits:
memory: 400Mi
@@ -770,6 +793,10 @@ objects:
- name: koji-certificates
secret:
secretName: mbs-koji-secrets
- name: cacerts-vol
secret:
secretName: mbs-cacerts
defaultMode: 0444
triggers:
- type: ConfigChange
# postgresql
@@ -904,3 +931,7 @@ parameters:
description: Top level URL of the Koji instance to use. Without a '/' at the end.
default: https://mbs-brew-hub.usersys.redhat.com
required: true
- name: CA_CERTS
displayName: CA certificates
description: Bundle of CA certificates that should be trusted
required: true