Files
fedora-infra_ansible/roles/copr/backend/templates/resalloc/pools.yaml.j2
2026-01-26 10:17:11 +01:00

492 lines
17 KiB
Django/Jinja

---
#
# Notes for Resalloc configuration file:
#
{% if False %}
# - This is rather a complex Jinja template. Feel free to use the helper
# script to expand locally before you commit any change to this file:
# ./roles/copr/backend/templates/resalloc/pools.yaml.expand.sh
#
{% endif %}
# - You do NOT need to restart resalloc server after changing this file, it
# automatically reloads this configuration file.
#
# - higher priority pools are used first
# default priority is 0
# reserved instances in cloud has > 0
# on-premise instance < 0
# high performance instances <= 40
#
# - if you need to drop a pool, it requires you to do a few steps:
# a) first evacutate the pool by setting `max: 0`,
# b) wait till all machines are deallocated (the cmd_delete is still defined
# for this pool!)
# c) you may remove the pool configuration in this file, and finally
# d) remove the pool in PostgreSQL database.
{% macro aws(arch, max, max_starting, max_prealloc, spot=False, on_demand=none, priority=0, reserved=False) %}
aws_{{ arch }}_{{ on_demand + '_' if on_demand is not none else '' }}{% if spot %}spot{% else %}normal{% endif %}{% if reserved %}reserved{% endif %}_{% if devel %}dev{% else %}prod{% endif %}:
{% if on_demand %}
max: 10
max_starting: 4
{% elif reserved and devel %}
max: 0
{% else %}
max: {{ max }}
max_starting: {{ max_starting }}
max_prealloc: {{ max_prealloc }}
{% endif %}
tags:
- name: copr_builder
priority: {{ priority }}
- aws
- arch_noarch
{% if arch == 'x86_64' %}
- arch_x86_64
- arch_x86_64_native
- arch_x86_64_v2
- arch_x86_64_v2_native
- arch_i386
- arch_i386_native
- arch_i586
- arch_i586_native
- arch_i686
- arch_i686_native
- arch_armhfp
- arch_armhfp_emulated
- arch_riscv64
- arch_riscv64_emulated
{% else %}
- arch_aarch64
- arch_aarch64_native
{% endif %}
{% if on_demand %}
tags_on_demand:
- on_demand_{{ on_demand }}
{% endif %}
{% if arch == 'x86_64' and on_demand is none %}
{% set itype='c7i.xlarge' %}
{% elif arch == 'x86_64' and on_demand == 'powerful' %}
{% set itype='c7a.8xlarge' %}
{% elif arch == 'aarch64' and on_demand is none %}
{% set itype='c7g.xlarge' %}
{% elif arch == 'aarch64' and on_demand == 'powerful' %}
{% set itype='c7g.8xlarge' %}
{% endif %}
{% if spot and on_demand == 'powerful' %}
cmd_new: copr-resalloc-vm-ip-to-yaml copr-resalloc-aws-new-{{ arch }} --instance-type {{ itype }} --additional-volume-size 320 --root-volume-size 18
{% elif spot %}
cmd_new: copr-resalloc-vm-ip-to-yaml copr-resalloc-aws-new-{{ arch }} --instance-type {{ itype }} --additional-volume-size 160 --spot-price 0.156
{% elif on_demand == 'powerful' %}
cmd_new: copr-resalloc-vm-ip-to-yaml copr-resalloc-aws-new-{{ arch }} --instance-type {{ itype }} --additional-volume-size 320
{% else %}
cmd_new: copr-resalloc-vm-ip-to-yaml copr-resalloc-aws-new-{{ arch }} --instance-type {{ itype }} --additional-volume-size 160
{% endif %}
cmd_delete: "/var/lib/resallocserver/resalloc_provision/vm-delete"
cmd_livecheck: "resalloc-check-vm-ip"
cmd_release: "/var/lib/resallocserver/resalloc_provision/vm-release"
cmd_list: resalloc-aws-list
livecheck_period: 180
reuse_opportunity_time: 90
reuse_max_count: 8
reuse_max_time: 1800
description: >
A pool of
{% if spot and on_demand == 'powerful' %}powerful spot
{% elif spot %}spot
{% elif reserved and on_demand == 'powerful' %}reserved powerful
{% elif reserved %}reserved
{% elif on_demand == 'powerful' %}powerful
{% endif %}
{{ arch }} instances in the Amazon AWS Fedora organization.
{% if spot %}
Spot instances are cheaper, but the trade-off is that AWS can
kill them at any point during the build. A user will not notice
because the build will automatically be taken by another
builder.
{% endif %}
{% if on_demand == 'powerful' %}
Powerful builders have more computing power and more storage
than regular builders but there is a longer queue for them.
{% endif %}
{% if reserved %}
Reserved builders don't have any special properties. They are
just paid in advance so they are cheaper.
{% endif %}
They are located in us-east-1 (N. Virginia).
Thank you Amazon Web Services, Inc. for sponsoring these builders.
{% endmacro %}
{% macro osuosl_p09_p10(on_demand=none, cpu="p09") %}
{% if on_demand == 'powerful' %}
{% set itype='321a7d9a-ecb7-4592-a8b4-a4bb78481e2e' %}
{% endif %}
{% if on_demand is none %}
# Power{% if cpu == "p10" %}10{% else %}9{% endif %} VMs in openpower-controller.osuosl.org
{% else %}
# Power{% if cpu == "p10" %}10{% else %}9{% endif %} VMs in openpower-controller.osuosl.org for on-demand {{ on_demand }}
{% endif %}
copr_osuosl_{% if cpu == "p10" %}p10{% else %}p09{% endif %}_{% if on_demand %}{{ on_demand }}_{% endif %}{% if devel %}dev{% else %}prod{% endif %}:
{% if on_demand == 'powerful' %}
{% if devel %}
# just for testing, turned off by default
max: 0
{% else %}
max: 3
{% endif %}
max_starting: 1
{% else %}
{% if devel %}
max: 3
max_prealloc: 1
max_starting: 1
{% else %}
max: 15
max_prealloc: 6
max_starting: 4
{% endif %}
{% endif %}
tags:
- copr_builder
- name: arch_noarch
priority: -25
- name: arch_ppc64le
priority: -10
- arch_ppc64le_native
- in_osuosl
{% if cpu == "p10" %}
# Power9 builds may be done on P10, too.
{% endif %}
- name: arch_power9
priority: -10
{% if cpu == "p10" %}
- name: arch_power10
priority: -10
{% endif %}
{% if on_demand == 'powerful' %}
tags_on_demand:
- on_demand_{{ on_demand }}
# name: p10.nvme.c16m64d320
cmd_new: 'copr-resalloc-vm-ip-to-yaml /var/lib/resallocserver/resalloc_provision/osuosl-vm --flavor {{ itype }}'
{% else %}
cmd_new: 'copr-resalloc-vm-ip-to-yaml /var/lib/resallocserver/resalloc_provision/osuosl-vm'
{% endif %}
cmd_delete: "/var/lib/resallocserver/resalloc_provision/vm-delete"
cmd_livecheck: "resalloc-check-vm-ip"
cmd_release: "/var/lib/resallocserver/resalloc_provision/vm-release"
cmd_list: '/var/lib/resallocserver/resalloc_provision/osuosl-list'
livecheck_period: 180
reuse_opportunity_time: 90
reuse_max_count: 8
reuse_max_time: 1800
description: >
A pool of {% if on_demand == 'powerful' %}powerful {% endif %}ppc64le instances on hypervisors in the OSU Open Source Lab.
These machines have POWER{% if cpu == "p10" %}10{% else %}9{% endif %} processors.
Thank you Oregon State University for sponsoring these builders.
{% endmacro %}
# x86_64 hypervisors
{% for hv in ["04"] %}
{% if "x86_hypervisor_" + hv in builders %}
vmhost_x86_{{ hv }}_{% if devel %}dev{% else %}prod{% endif %}:
max: {{ builders["x86_hypervisor_" + hv]["x86_64"][0] }}
max_starting: {{ builders["x86_hypervisor_" + hv]["x86_64"][1] }}
max_prealloc: {{ builders["x86_hypervisor_" + hv]["x86_64"][2] }}
tags:
- copr_builder
- arch_noarch
- arch_x86_64
- arch_x86_64_native
- arch_x86_64_v2
- arch_x86_64_v2_native
- arch_i386
- arch_i386_native
- arch_i586
- arch_i586_native
- arch_i686
- arch_i686_native
- arch_armhfp
- arch_armhfp_emulated
- arch_riscv64
- arch_riscv64_emulated
- hypervisor
- hypervisor_x86_64
- hypervisor_x86_64_{{ hv }}
cmd_new: "copr-resalloc-vm-ip-to-yaml /var/lib/resallocserver/provision/libvirt-new --ram-size 16384 --swap-vol-size 168"
cmd_delete: "/var/lib/resallocserver/resalloc_provision/vm-delete"
cmd_livecheck: "resalloc-check-vm-ip"
cmd_release: "/var/lib/resallocserver/resalloc_provision/vm-release"
cmd_list: "/var/lib/resallocserver/provision/libvirt-list"
livecheck_period: 180
reuse_opportunity_time: 90
reuse_max_count: 8
reuse_max_time: 1800
description: >
A pool of x86_64 instances in the Fedora Community Cage.
They are located in RDU (N Carolina).
Thank you Fedora Infrastructure team for maintaining the hypervisors.
{% else %}
#
# x86_hypervisor_{{ hv }} not configured in group_vars
#
{% endif %}
{% endfor %}
# Power8 hypervisors
{% for hv in ["01", "02", "03"] %}
{% if "ppc64le_hypervisor_" + hv in builders %}
copr_hv_ppc64le_{{ hv }}_{% if devel %}dev{% else %}prod{% endif %}:
max: {{ builders["ppc64le_hypervisor_" + hv]["ppc64le"][0] }}
max_starting: {{ builders["ppc64le_hypervisor_" + hv]["ppc64le"][1] }}
max_prealloc: {{ builders["ppc64le_hypervisor_" + hv]["ppc64le"][2] }}
tags:
- copr_builder
- name: arch_noarch
priority: -8
- arch_ppc64le
- arch_ppc64le_native
- hypervisor
- hypervisor_ppc64le
- hypervisor_ppc64le_{{ hv }}
- arch_power8
cmd_new: "copr-resalloc-vm-ip-to-yaml /var/lib/resallocserver/provision/libvirt-new --swap-vol-size 168"
cmd_delete: "/var/lib/resallocserver/resalloc_provision/vm-delete"
cmd_livecheck: "resalloc-check-vm-ip"
cmd_release: "/var/lib/resallocserver/resalloc_provision/vm-release"
cmd_list: "/var/lib/resallocserver/provision/libvirt-list"
livecheck_period: 180
reuse_opportunity_time: 90
reuse_max_count: 8
reuse_max_time: 1800
description: >
A pool of ppc64le instances in the Fedora Community Cage.
These machines have POWER8 processors and are located in RDU (N Carolina).
Thank you Fedora Infrastructure team for maintaining the hypervisors.
{% endif %}
{% endfor %}
# Power9 hypervisors
{% for hv in ["01", "02", "03", "04"] %}
{% if "p09_hypervisor_" + hv in builders %}
vmhost_p09_{{ hv }}_{% if devel %}dev{% else %}prod{% endif %}:
max: {{ builders["p09_hypervisor_" + hv]["ppc64le"][0] }}
max_starting: {{ builders["p09_hypervisor_" + hv]["ppc64le"][1] }}
max_prealloc: {{ builders["p09_hypervisor_" + hv]["ppc64le"][2] }}
tags:
- copr_builder
- name: arch_noarch
priority: -8
- arch_ppc64le
- arch_ppc64le_native
- hypervisor
- hypervisor_p09
- hypervisor_p09_{{ hv }}
- arch_power9
# The Power9 machine has 160 threads. The bottleneck is certainly small
# disk, so try to waste the CPUs appropriately.
cmd_new: "copr-resalloc-vm-ip-to-yaml /var/lib/resallocserver/provision/libvirt-new --cpu-count 5 --ram-size 16384 --swap-vol-size 168"
cmd_delete: "/var/lib/resallocserver/resalloc_provision/vm-delete"
cmd_livecheck: "resalloc-check-vm-ip"
cmd_release: "/var/lib/resallocserver/resalloc_provision/vm-release"
cmd_list: "/var/lib/resallocserver/provision/libvirt-list"
livecheck_period: 180
reuse_opportunity_time: 90
reuse_max_count: 8
reuse_max_time: 1800
description: >
A pool of ppc64le instances in the Fedora Community Cage.
These machines have POWER9 processors and are located in RDU (N Carolina).
Thank you Fedora Infrastructure team for maintaining the hypervisors.
{% endif %}
{% endfor %}
# Z Cloud instances in Washington (hp == high performance)
copr_ic_s390x_hp_us_east_{% if devel %}dev{% else %}prod{% endif %}:
max: {{ builders.ibm_cloud_us_east_hp.s390x[0] }}
max_starting: {{ builders.ibm_cloud_us_east_hp.s390x[1] }}
tags:
- copr_builder
- name: arch_noarch
priority: -30
- ibm_cloud
- us_east
{% if devel %}
tags_on_demand:
{% endif %}
- name: arch_s390x_native
- name: arch_s390x
{% if devel %}
priority: -300
{% else %}
tags_on_demand:
{% endif %}
- on_demand_powerful
{% set pool_config = resalloc_pools.ibm_us_east %}
cmd_new: |
copr-resalloc-vm-ip-to-yaml \
resalloc-ibm-cloud-vm \
--token-file "{{ ibmcloud_token_file }}" \
--region us-east \
--log-level debug \
create "$RESALLOC_NAME" \
--playbook "{{ provision_directory }}/libvirt-provision.yml" \
--image-uuid "{{ pool_config.images.s390x }}" \
--vpc-id "{{ pool_config.vpc }}" \
--security-group-id "{{ pool_config.security_group }}" \
--ssh-key-id "{{ pool_config.ssh_key }}" \
--instance-type "{{ cloud_instance_types.ibm_cloud.s390x_hp }}" \
--subnets-ids "{{ '" "'.join(pool_config.subnets) }}" \
--additional-volume-size "320"
cmd_delete: |
/var/lib/resallocserver/resalloc_provision/vm-delete
resalloc-ibm-cloud-vm --token-file "{{ ibmcloud_token_file }}" \
--region us-east --log-level debug delete "$RESALLOC_NAME"
cmd_livecheck: "resalloc-check-vm-ip"
cmd_release: "/var/lib/resallocserver/resalloc_provision/vm-release"
cmd_list: resalloc-ibm-cloud-list-vms --token-file "{{ ibmcloud_token_file }}" --region us-east
livecheck_period: 180
reuse_opportunity_time: 45
reuse_max_count: 8
reuse_max_time: 1800
description: >
A pool of high-performance s390x instances in the IBM Cloud, Washington, D.C. (us-east).
Thanks to IBM for sponsoring these builders.
{% for zone in ['br_sao_1', 'br_sao_2', 'br_sao_3', 'eu_es_1', 'eu_es_2', 'eu_es_3'] %}
copr_ic_s390x_{{ zone }}_{% if devel %}dev{% else %}prod{% endif %}:
max: {{ builders['ibm_cloud_' + zone].s390x[0] }}
max_starting: {{ builders['ibm_cloud_' + zone].s390x[1] }}
{% if not devel %}
max_prealloc: {{ builders['ibm_cloud_' + zone].s390x[2] }}
{% endif %}
tags:
- copr_builder
- name: arch_noarch
priority: -30
- ibm_cloud
- {{ zone }}
{% if devel %}
tags_on_demand:
{% endif %}
- name: arch_s390x_native
- name: arch_s390x
cmd_new: |
copr-resalloc-vm-ip-to-yaml \
{% set pool_config = resalloc_pools["ibm_" + zone] %}
resalloc-ibm-cloud-vm \
--token-file "{{ ibmcloud_token_file }}" \
--region {{ pool_config.region_config.name }} \
--log-level debug \
create "$RESALLOC_NAME" \
--playbook "{{ provision_directory }}/libvirt-provision.yml" \
--image-uuid "{{ pool_config.region_config.images.s390x }}" \
--vpc-id "{{ pool_config.region_config.vpc }}" \
--security-group-id "{{ pool_config.region_config.security_group }}" \
--ssh-key-id "{{ pool_config.region_config.ssh_key }}" \
--instance-type "{{ cloud_instance_types.ibm_cloud.s390x }}" \
--subnets-ids "{{ pool_config.zone }}:{{ pool_config.subnet }}" \
{% for ip in pool_config.floating_ips %}
--floating-ip-uuid-in-subnet {{ pool_config.subnet }} {{ ip }} \
{% endfor %}
--additional-volume-size "160"
cmd_delete: |
/var/lib/resallocserver/resalloc_provision/vm-delete
resalloc-ibm-cloud-vm --token-file "{{ ibmcloud_token_file }}" \
--region {{ pool_config.region_config.name }} --log-level debug delete "$RESALLOC_NAME"
cmd_livecheck: "resalloc-check-vm-ip"
cmd_release: "/var/lib/resallocserver/resalloc_provision/vm-release"
cmd_list: resalloc-ibm-cloud-list-vms --token-file "{{ ibmcloud_token_file }}" --region {{ pool_config.region_config.name }}
livecheck_period: 180
reuse_opportunity_time: 45
reuse_max_count: 8
reuse_max_time: 1800
description: >
A pool of s390x instances in the IBM Cloud, {{ pool_config.region_config.name_humans }} ({{ pool_config.zone }}).
Thanks to IBM for sponsoring these builders.
{% endfor %}
# Power9 builders in OSU osuosl
{{ osuosl_p09_p10() }}
# High-performance power10 builders in OSU osuosl
{{ osuosl_p09_p10(on_demand='powerful', cpu="p10") }}
#### AWS x86_64
# on-premise HV has priority 0 and above
# AWS should have priority < 0 unless it is reserved instance for which we pay anyway
# reserved instances
# aws(arch, max, max_starting, max_prealloc, spot=False, on_demand=none, priority=0, reserved=False)
{% if not devel %}
{{ aws('x86_64', builders.aws_reserved.x86_64[0], builders.aws_reserved.x86_64[1],
builders.aws_reserved.x86_64[2], priority=20, reserved=True) }}
{% endif %}
{{ aws('x86_64', builders.aws_spot.x86_64[0], builders.aws_spot.x86_64[1],
builders.aws_spot.x86_64[2], True, priority=-2) }}
{% if not devel %}
{{ aws('x86_64', builders.aws.x86_64[0], builders.aws.x86_64[1],
builders.aws.x86_64[2], priority=-10 ) }}
{% endif %}
### AARCH 64 builders
{% if not devel %}
# reserved instance
{{ aws('aarch64', builders.aws_reserved.aarch64[0], builders.aws_reserved.aarch64[1],
builders.aws_reserved.aarch64[2], priority=21, reserved=True) }}
{% endif %}
{{ aws('aarch64', builders.aws_spot.aarch64[0], builders.aws_spot.aarch64[1],
builders.aws_spot.aarch64[2], spot=True, priority=-1) }}
{% if not devel %}
{{ aws('aarch64', builders.aws.aarch64[0], builders.aws.aarch64[1],
builders.aws.aarch64[2], priority=-5) }}
{% endif %}
#### High performance builders
# priority should be less than any other normal builder. i.e., <= -40
{{ aws('x86_64', builders.aws_powerful.x86_64[0], builders.aws_powerful.x86_64[1],
builders.aws_powerful.x86_64[2], spot=True, on_demand='powerful', priority=-40) }}
{{ aws('aarch64', builders.aws_powerful.aarch64[0], builders.aws_powerful.aarch64[1],
builders.aws_powerful.aarch64[2], spot=True, on_demand='powerful', priority=-40) }}
{{ aws('x86_64', builders.aws_powerful.x86_64[0], builders.aws_powerful.x86_64[1],
builders.aws_powerful.x86_64[2], on_demand='powerful', priority=-60) }}
{{ aws('aarch64', builders.aws_powerful.aarch64[0], builders.aws_powerful.aarch64[1],
builders.aws_powerful.aarch64[2], on_demand='powerful', priority=-60) }}
#### Evacuate machines in older pools
{% for poolname in [] %}
{{ poolname }}_{% if devel %}dev{% else %}prod{% endif %}:
max: 0
cmd_new: /bin/true
cmd_delete: "/var/lib/resallocserver/resalloc_provision/vm-delete"
cmd_list: resalloc-aws-list
{% endfor %}