From 628906a9455326ed41c8f32be5bdeaad9af5eddd Mon Sep 17 00:00:00 2001 From: Sayan Chowdhury Date: Fri, 6 Apr 2018 05:29:38 +0530 Subject: [PATCH 01/13] fedimg: Port fix for F28 compose messages to 1.0.1 Signed-off-by: Sayan Chowdhury --- files/hotfix/fedimg/consumers.py | 135 +++++++++++++++++++++++++++++++ roles/fedimg/tasks/main.yml | 8 ++ 2 files changed, 143 insertions(+) create mode 100644 files/hotfix/fedimg/consumers.py diff --git a/files/hotfix/fedimg/consumers.py b/files/hotfix/fedimg/consumers.py new file mode 100644 index 0000000000..9b5e4a0437 --- /dev/null +++ b/files/hotfix/fedimg/consumers.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- +# This file is part of fedimg. +# Copyright (C) 2014-2017 Red Hat, Inc. +# +# fedimg is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# fedimg is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public +# License along with fedimg; if not, see http://www.gnu.org/licenses, +# or write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +# +# Authors: David Gay +# Sayan Chowdhury +""" +This is the `fedmsg consumer`_ that subscribes to the topic emitted after the +completion of the nightly and production compose. The consumer on receving the +message uploads the image using the API of the cloud providers. +""" + +import logging +import multiprocessing.pool + +import fedmsg.consumers +import fedmsg.encoding +import fedfind.release + +import fedimg.uploader + +from fedimg.config import PROCESS_COUNT, STATUS_FILTER +from fedimg.utils import get_rawxz_urls, get_value_from_dict + +LOG = logging.getLogger(__name__) + + +class FedimgConsumer(fedmsg.consumers.FedmsgConsumer): + """ + A `fedmsg consumer`_ that listens to the pungi compose topics and kicks + of the process to upload the images to various cloud providers. + + Attributes: + topic (str): The topics this consumer is subscribed to. Set to + ``org.fedoraproject.prod.pungi.compose.status.change``. + config_key (str): The key to set to ``True`` in the fedmsg config to + enable this consumer. The key is ``fedimgconsumer.prod.enabled``. + """ + topic = ['org.fedoraproject.prod.pungi.compose.status.change'] + config_key = "fedimgconsumer.prod.enabled" + + def __init__(self, *args, **kwargs): + LOG.info("FedimgConsumer initializing") + super(FedimgConsumer, self).__init__(*args, **kwargs) + + # Threadpool for upload jobs + LOG.info("Creating thread pool of %s process", PROCESS_COUNT) + self.upload_pool = multiprocessing.pool.ThreadPool( + processes=PROCESS_COUNT + ) + LOG.info("FedimgConsumer initialized") + + def consume(self, msg): + """ + This is called when we receive a message matching our topics. + + Args: + msg (dict): The raw message from fedmsg. + """ + LOG.info('Received %r %r', msg['topic'], msg['body']['msg_id']) + + msg_info = msg['body']['msg'] + if msg_info['status'] not in STATUS_FILTER: + return + + location = msg_info['location'] + compose_id = msg_info['compose_id'] + compose_metadata = fedfind.release.get_release_cid(compose_id).metadata + images_meta = get_value_from_dict( + compose_metadata, + 'images', + 'payload', + 'images', + 'CloudImages', + 'x86_64' + ) + + if images_meta is None: + LOG.debug('No compatible image found to process') + return + + upload_urls = get_rawxz_urls(location, images_meta) + if len(upload_urls) > 0: + LOG.info("Start processing compose id: %s", compose_id) + fedimg.uploader.upload( + pool=self.upload_pool, + urls=upload_urls, + compose_id=compose_id + ) + + +class FedimgStagingConsumer(FedimgConsumer): + """ + A `fedmsg consumer`_ that listens to the staging pungi compose topics and + kicks of the process to upload the images to various cloud providers. + + Attributes: + topic (str): The topics this consumer is subscribed to. Set to + ``org.fedoraproject.stg.pungi.compose.status.change``. + config_key (str): The key to set to ``True`` in the fedmsg config to + enable this consumer. The key is ``fedimgconsumer.stg.enabled``. + """ + topic = ['org.fedoraproject.stg.pungi.compose.status.change'] + config_key = "fedimgconsumer.stg.enabled" + + +class FedimgDevConsumer(FedimgConsumer): + """ + A `fedmsg consumer`_ that listens to the dev pungi compose topics and + kicks of the process to upload the images to various cloud providers. + + Attributes: + topic (str): The topics this consumer is subscribed to. Set to + ``org.fedoraproject.dev.pungi.compose.status.change``. + config_key (str): The key to set to ``True`` in the fedmsg config to + enable this consumer. The key is ``fedimgconsumer.dev.enabled``. + """ + topic = ['org.fedoraproject.dev.pungi.compose.status.change'] + config_key = "fedimgconsumer.dev.enabled" + diff --git a/roles/fedimg/tasks/main.yml b/roles/fedimg/tasks/main.yml index 4b98023843..5ceacbabcb 100644 --- a/roles/fedimg/tasks/main.yml +++ b/roles/fedimg/tasks/main.yml @@ -134,3 +134,11 @@ tags: - cron - fedimg + +- name: hotfix - copy the consumers.py over to the site-packages + copy: src="{{ files }}/hotfix/fedimg/consumers.py" dest=/usr/lib/python2.7/site-packages/fedimg/consumers.py + notify: + - restart fedmsg-hub + tags: + - fedimg + - hotfix From 586b46910deebbd7db92f74f69c9d7c95c5db3b1 Mon Sep 17 00:00:00 2001 From: Sayan Chowdhury Date: Fri, 6 Apr 2018 05:34:02 +0530 Subject: [PATCH 02/13] fedimg: patch to process the F28+ messages Signed-off-by: Sayan Chowdhury --- files/hotfix/fedimg/consumers.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/files/hotfix/fedimg/consumers.py b/files/hotfix/fedimg/consumers.py index 9b5e4a0437..410b64190d 100644 --- a/files/hotfix/fedimg/consumers.py +++ b/files/hotfix/fedimg/consumers.py @@ -80,15 +80,24 @@ class FedimgConsumer(fedmsg.consumers.FedmsgConsumer): location = msg_info['location'] compose_id = msg_info['compose_id'] - compose_metadata = fedfind.release.get_release_cid(compose_id).metadata - images_meta = get_value_from_dict( - compose_metadata, - 'images', - 'payload', - 'images', - 'CloudImages', - 'x86_64' - ) + compose_metadata = fedfind.release.get_release(cid=compose_id).metadata + + # Till F27, both cloud-base and atomic images were available + # under variant CloudImages. With F28 and onward releases, + # cloud-base image compose moved to cloud variant and atomic images + # moved under atomic variant. + prev_rel = ['26', '27'] + if msg_info['release_version'] in prev_rel: + images_meta = get_value_from_dict( + compose_metadata, 'images', 'payload', 'images', 'CloudImages', + 'x86_64') + else: + images_meta = get_value_from_dict( + compose_metadata, 'images', 'payload', 'images', + 'Cloud', 'x86_64') + images_meta.extend(get_value_from_dict( + compose_metadata, 'images', 'payload', + 'images', 'AtomicHost', 'x86_64')) if images_meta is None: LOG.debug('No compatible image found to process') From 32ff935a55dba6991c63769231a8dcaef63da0ad Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Fri, 6 Apr 2018 02:14:14 +0000 Subject: [PATCH 03/13] fix typos in pdc-backend csi data. ticket 6775 --- inventory/group_vars/pdc-backend | 8 ++++---- inventory/group_vars/pdc-backend-stg | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/inventory/group_vars/pdc-backend b/inventory/group_vars/pdc-backend index 97720e7ac3..4509bc28b4 100644 --- a/inventory/group_vars/pdc-backend +++ b/inventory/group_vars/pdc-backend @@ -23,18 +23,18 @@ csi_relationship: | fedmsg-hub daemon that loads the pdc-updater consumer plugin. However, the pdc-updater plugin is configured to do different things in each place. - On pdc-updater01, the compose handler is enabled which listens for new pungi + On pdc-backend01, the compose handler is enabled which listens for new pungi composes, and stores them in PDC. Fedora QE uses this data. The consumer has only a single thread enabled to avoid OOMing itself with more than one compose at a time. - On pdc-updater02, the modularity handlers are enabled which listen for MBS - activity, and store that in PDC. pdc-updater02 also hosts the retirement + On pdc-backend02, the modularity handlers are enabled which listen for MBS + activity, and store that in PDC. pdc-backend02 also hosts the retirement handler which listens to dist-git for new dead.package files, and propagates the retirement to PDC (by prematurely EOLing the branch). Multiple threads are enabled so that it can work more efficiently on these smaller tasks. - On pdc-updater03, the dep chain handlers are enabled which listen for koji + On pdc-backend03, the dep chain handlers are enabled which listen for koji messages and store dep chain information in PDC, like what rpms depend on what other rpms at build time, and what containers depend on what rpms, etc.. Multiple threads are enabled so that it can work more efficiently on these diff --git a/inventory/group_vars/pdc-backend-stg b/inventory/group_vars/pdc-backend-stg index 425ca623ef..f07babb410 100644 --- a/inventory/group_vars/pdc-backend-stg +++ b/inventory/group_vars/pdc-backend-stg @@ -23,11 +23,11 @@ csi_relationship: | a fedmsg-hub daemon that loads the pdc-updater consumer plugin. However, the pdc-updater plugin is configured to do different things in each place. - On pdc-updater01, the compose handler is enabled which listens for new pungi + On pdc-backend01, the compose handler is enabled which listens for new pungi composes, and stores them in PDC. Fedora QE uses this data. The consumer has only a single thread enabled to avoid OOMing itself with more than one compose at a time. - On pdc-updater02, the dep chain and modularity handlers are enabled which + On pdc-backend02, the dep chain and modularity handlers are enabled which listen for koji and MBS activity, and store that in PDC. Multiple threads are enabled so that it can work more efficiently on these smaller tasks. From 0bb668f2dfd374cd992a45353dbc99943559938f Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Fri, 6 Apr 2018 04:09:44 +0000 Subject: [PATCH 04/13] some more fedmsg can_send --- inventory/group_vars/resultsdb-prod | 1 + inventory/group_vars/resultsdb-stg | 1 + 2 files changed, 2 insertions(+) diff --git a/inventory/group_vars/resultsdb-prod b/inventory/group_vars/resultsdb-prod index 1df269d999..74833e516b 100644 --- a/inventory/group_vars/resultsdb-prod +++ b/inventory/group_vars/resultsdb-prod @@ -76,3 +76,4 @@ fedmsg_certs: group: apache can_send: - taskotron.result.new + - resultsdb.result.new diff --git a/inventory/group_vars/resultsdb-stg b/inventory/group_vars/resultsdb-stg index d834f9c64a..ec0840caae 100644 --- a/inventory/group_vars/resultsdb-stg +++ b/inventory/group_vars/resultsdb-stg @@ -73,3 +73,4 @@ fedmsg_certs: group: apache can_send: - taskotron.result.new + - resultsdb.result.new From 52c43d21481a2ed53c6ef8a09819a8e258c25ccb Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Fri, 6 Apr 2018 05:26:20 +0000 Subject: [PATCH 05/13] adjust shm size check for postgres servers --- roles/rkhunter/templates/rkhunter.conf.j2 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/roles/rkhunter/templates/rkhunter.conf.j2 b/roles/rkhunter/templates/rkhunter.conf.j2 index 9bff3d0c09..7a521b848c 100644 --- a/roles/rkhunter/templates/rkhunter.conf.j2 +++ b/roles/rkhunter/templates/rkhunter.conf.j2 @@ -643,3 +643,7 @@ SCRIPTWHITELIST=/usr/bin/groups SCRIPTWHITELIST=/usr/bin/GET SCRIPTWHITELIST=/sbin/ifup SCRIPTWHITELIST=/sbin/ifdown +{% if inventory_hostname in groups['dbservers'] or ansible_hostname.startswith(('pagure', 'retrace', 'anitya', 'upstream')) %} +# Set this size very large on postgres running servers. +IPC_SEG_SIZE=100000000000 +{% endif %} From 180cc21c6ac8e4ccbe709efdf685a6c9fe76b67a Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Fri, 6 Apr 2018 05:31:38 +0000 Subject: [PATCH 06/13] fix typo --- roles/rkhunter/templates/rkhunter.conf.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/rkhunter/templates/rkhunter.conf.j2 b/roles/rkhunter/templates/rkhunter.conf.j2 index 7a521b848c..4f823fbf1e 100644 --- a/roles/rkhunter/templates/rkhunter.conf.j2 +++ b/roles/rkhunter/templates/rkhunter.conf.j2 @@ -643,7 +643,7 @@ SCRIPTWHITELIST=/usr/bin/groups SCRIPTWHITELIST=/usr/bin/GET SCRIPTWHITELIST=/sbin/ifup SCRIPTWHITELIST=/sbin/ifdown -{% if inventory_hostname in groups['dbservers'] or ansible_hostname.startswith(('pagure', 'retrace', 'anitya', 'upstream')) %} +{% if inventory_hostname in groups['dbservers'] or inventory_hostname.startswith(('pagure', 'retrace', 'anitya', 'upstream')) %} # Set this size very large on postgres running servers. IPC_SEG_SIZE=100000000000 {% endif %} From 6699d4ed8e4332ddcdc6551001413ed887a47507 Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Fri, 6 Apr 2018 05:34:04 +0000 Subject: [PATCH 07/13] fix space --- roles/rkhunter/templates/rkhunter.conf.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/rkhunter/templates/rkhunter.conf.j2 b/roles/rkhunter/templates/rkhunter.conf.j2 index 4f823fbf1e..8667ddc561 100644 --- a/roles/rkhunter/templates/rkhunter.conf.j2 +++ b/roles/rkhunter/templates/rkhunter.conf.j2 @@ -643,7 +643,7 @@ SCRIPTWHITELIST=/usr/bin/groups SCRIPTWHITELIST=/usr/bin/GET SCRIPTWHITELIST=/sbin/ifup SCRIPTWHITELIST=/sbin/ifdown -{% if inventory_hostname in groups['dbservers'] or inventory_hostname.startswith(('pagure', 'retrace', 'anitya', 'upstream')) %} +{% if inventory_hostname in groups['dbservers'] or inventory_hostname.startswith(('pagure','retrace','anitya','upstream')) %} # Set this size very large on postgres running servers. IPC_SEG_SIZE=100000000000 {% endif %} From aabe4115b5d6c7925bb37cd00b2cc53b35565c00 Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Fri, 6 Apr 2018 05:38:25 +0000 Subject: [PATCH 08/13] try and simplify --- roles/rkhunter/templates/rkhunter.conf.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/rkhunter/templates/rkhunter.conf.j2 b/roles/rkhunter/templates/rkhunter.conf.j2 index 8667ddc561..28d08530ad 100644 --- a/roles/rkhunter/templates/rkhunter.conf.j2 +++ b/roles/rkhunter/templates/rkhunter.conf.j2 @@ -643,7 +643,7 @@ SCRIPTWHITELIST=/usr/bin/groups SCRIPTWHITELIST=/usr/bin/GET SCRIPTWHITELIST=/sbin/ifup SCRIPTWHITELIST=/sbin/ifdown -{% if inventory_hostname in groups['dbservers'] or inventory_hostname.startswith(('pagure','retrace','anitya','upstream')) %} +{% if inventory_hostname.startswith(('db','pagure','retrace','anitya','upstream')) %} # Set this size very large on postgres running servers. IPC_SEG_SIZE=100000000000 {% endif %} From 46a249f5ad08ae8cf6b198c3d46fdac0d9a81188 Mon Sep 17 00:00:00 2001 From: Clement Verna Date: Fri, 6 Apr 2018 17:03:50 +0200 Subject: [PATCH 09/13] Ensures /etc/dnsmasq.d/ dir exists Signed-off-by: Clement Verna --- playbooks/groups/osbs-cluster.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/playbooks/groups/osbs-cluster.yml b/playbooks/groups/osbs-cluster.yml index 9ea4f06857..bfed5887d6 100644 --- a/playbooks/groups/osbs-cluster.yml +++ b/playbooks/groups/osbs-cluster.yml @@ -305,7 +305,8 @@ state: restarted tasks: - + - name: Ensures /etc/dnsmasq.d/ dir exists + file: path="/etc/dnsmasq.d/" state=directory - name: install fedora dnsmasq specific config copy: src: "{{files}}/osbs/fedora-dnsmasq.conf.{{env}}" From 346d810a6e261400695c0b254904861e144f8fc6 Mon Sep 17 00:00:00 2001 From: Jeremy Cline Date: Fri, 6 Apr 2018 11:05:06 -0400 Subject: [PATCH 10/13] Run release-monitoring db migrations in a pre-deployment step This also adds the necessary alembic configuration. Signed-off-by: Jeremy Cline --- .../files/deploymentconfig.yml | 9 +++++ .../templates/configmap.yml | 35 +++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/roles/openshift-apps/release-monitoring/files/deploymentconfig.yml b/roles/openshift-apps/release-monitoring/files/deploymentconfig.yml index ba3ad2a846..1ff307439a 100644 --- a/roles/openshift-apps/release-monitoring/files/deploymentconfig.yml +++ b/roles/openshift-apps/release-monitoring/files/deploymentconfig.yml @@ -22,6 +22,15 @@ items: maxUnavailable: 25% timeoutSeconds: 600 updatePeriodSeconds: 1 +{% if env == 'staging' %} + pre: + failurePolicy: Abort + execNewPod: + containerName: release-monitoring-web + command: [ /bin/sh, -i, -c, "alembic -c /etc/anitya/alembic.ini upgrade head" ] + volumes: + - config-volume +{% endif %} type: Rolling template: metadata: diff --git a/roles/openshift-apps/release-monitoring/templates/configmap.yml b/roles/openshift-apps/release-monitoring/templates/configmap.yml index 2392fe8294..aa48fc8cf8 100644 --- a/roles/openshift-apps/release-monitoring/templates/configmap.yml +++ b/roles/openshift-apps/release-monitoring/templates/configmap.yml @@ -70,3 +70,38 @@ data: [anitya_log_config.root] level = "ERROR" handlers = ["console"] + alembic.ini: |- + [alembic] + script_location = anitya:db/migrations + sourceless = false +{% if env == 'staging' %} + sqlalchemy.url = "postgresql://{{ anitya_stg_db_user }}:{{ anitya_stg_db_pass }}@{{ anitya_stg_db_host }}/{{ anitya_stg_db_name }}" +{% else %} + sqlalchemy.url = "postgresql://{{ anitya_db_user }}:{{ anitya_db_pass }}@{{ anitya_db_host }}/{{ anitya_db_name }}" +{% endif %} + [loggers] + keys = root,sqlalchemy,alembic + [handlers] + keys = console + [formatters] + keys = generic + [logger_root] + level = WARN + handlers = console + qualname = + [logger_sqlalchemy] + level = WARN + handlers = + qualname = sqlalchemy.engine + [logger_alembic] + level = INFO + handlers = + qualname = alembic + [handler_console] + class = StreamHandler + args = (sys.stderr,) + level = NOTSET + formatter = generic + [formatter_generic] + format = %(levelname)-5.5s [%(name)s] %(message)s + datefmt = %H:%M:%S From 58633c9921907b85ff853c9dd002dbc1e23db40d Mon Sep 17 00:00:00 2001 From: Jeremy Cline Date: Fri, 6 Apr 2018 15:12:54 +0000 Subject: [PATCH 11/13] Drop if statement in deploymentconfig Signed-off-by: Jeremy Cline --- .../release-monitoring/files/deploymentconfig.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/roles/openshift-apps/release-monitoring/files/deploymentconfig.yml b/roles/openshift-apps/release-monitoring/files/deploymentconfig.yml index 1ff307439a..652b9005e7 100644 --- a/roles/openshift-apps/release-monitoring/files/deploymentconfig.yml +++ b/roles/openshift-apps/release-monitoring/files/deploymentconfig.yml @@ -22,7 +22,6 @@ items: maxUnavailable: 25% timeoutSeconds: 600 updatePeriodSeconds: 1 -{% if env == 'staging' %} pre: failurePolicy: Abort execNewPod: @@ -30,7 +29,6 @@ items: command: [ /bin/sh, -i, -c, "alembic -c /etc/anitya/alembic.ini upgrade head" ] volumes: - config-volume -{% endif %} type: Rolling template: metadata: From 51444b5dc85dad8bc318dc3f025def32d7bc6e91 Mon Sep 17 00:00:00 2001 From: Clement Verna Date: Fri, 6 Apr 2018 17:13:16 +0200 Subject: [PATCH 12/13] Let's try without the user in stg Signed-off-by: Clement Verna --- playbooks/groups/osbs-cluster.yml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/playbooks/groups/osbs-cluster.yml b/playbooks/groups/osbs-cluster.yml index bfed5887d6..c832e28ef1 100644 --- a/playbooks/groups/osbs-cluster.yml +++ b/playbooks/groups/osbs-cluster.yml @@ -325,12 +325,6 @@ - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml tasks: - - name: set policy for koji builder in openshift for osbs - shell: "oadm policy add-role-to-user -n default edit htpasswd_provider: {{ osbs_koji_stg_username }} && touch /etc/origin/koji-builder-policy-added" - args: - creates: "/etc/origin/koji-builder-policy-added" - when: env == "staging" - - name: set policy for koji builder in openshift for osbs shell: "oadm policy add-role-to-user -n default edit htpasswd_provider: {{ osbs_koji_prod_username }} && touch /etc/origin/koji-builder-policy-added" args: @@ -341,6 +335,7 @@ shell: "oadm policy add-role-to-user -n default edit system:serviceaccount:default:builder && touch /etc/origin/atomic-reactor-policy-added" args: creates: "/etc/origin/atomic-reactor-policy-added" + when: env == "production" - name: Deploy OSBS on top of OpenShift hosts: osbs-masters-stg[0]:osbs-masters[0] From 53946acfd6d2469065d7e5b1b4fb293eda8bf71a Mon Sep 17 00:00:00 2001 From: Clement Verna Date: Fri, 6 Apr 2018 17:27:46 +0200 Subject: [PATCH 13/13] Add the vars and KUBECONFIG Path Signed-off-by: Clement Verna --- playbooks/groups/osbs-cluster.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/playbooks/groups/osbs-cluster.yml b/playbooks/groups/osbs-cluster.yml index c832e28ef1..0ce6f74fc8 100644 --- a/playbooks/groups/osbs-cluster.yml +++ b/playbooks/groups/osbs-cluster.yml @@ -395,6 +395,16 @@ tags: - osbs-worker-namespace user: root + vars_files: + - /srv/web/infra/ansible/vars/global.yml + - "/srv/private/ansible/vars.yml" + - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml + + vars: + osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig + osbs_environment: + KUBECONFIG: "{{ osbs_kubeconfig_path }}" + roles: - role: osbs-namespace osbs_namespace: "{{ osbs_worker_namespace }}"