10 Commits

Author SHA1 Message Date
Kevin Fenzi
947ad7f7ba nagios: this check is really supposed to be against pagure.io
This was fallout from my sed to change all the references from pagure.io
to forge.fedoraproject.org. In this case though, we do want pagure.io
here because we are using this to check that it's up and working
properly.

Signed-off-by: Kevin Fenzi <kevin@scrye.com>
2026-02-16 11:17:59 -08:00
Kevin Fenzi
b6a7d5edfd nagios: try and rework conditional
Signed-off-by: Kevin Fenzi <kevin@scrye.com>
2026-02-16 10:23:01 -08:00
Kevin Fenzi
24ecee5ebe nagios: try and fix the proxy03/14 problem with missing host because they are in rdu3-iso instead of rdu3
Signed-off-by: Kevin Fenzi <kevin@scrye.com>
2026-02-16 10:02:47 -08:00
Kevin Fenzi
b6ec520bc5 buildhw-x86-02: disable in koji and set to not freeze
I am going to use this builder to test/deploy pesign sigul-dry bridge.
So, it has been disabled in koji and should be ok to test with.
Once things are lined up and tested it can be re-enabled.

Signed-off-by: Kevin Fenzi <kevin@scrye.com>
2026-02-16 09:10:49 -08:00
Kevin Fenzi
6706723eea hardware: adjust inventory to drop p08's and add p09s
The p08 copr machines were in rdu2-cc and are gone now.
The p09 machines in rdu3 are all up and online now.

Signed-off-by: Kevin Fenzi <kevin@scrye.com>
2026-02-16 09:04:49 -08:00
Pavel Raiskup
bf99504840 copr-fe: drop a redundant rule
See https://github.com/fedora-copr/copr/issues/4171
2026-02-16 16:55:45 +01:00
James Antill
c6d0f4e5a3 mirror_from_forge: Change messages from pagure to forgejo.
Signed-off-by: James Antill <james@and.org>
2026-02-16 09:39:58 -05:00
Jiri Kyjovsky
8dd7e55028 copr-hv: enable migrated hvs on copr-be-dev 2026-02-16 15:34:54 +01:00
Miroslav Suchý
68ec08de9f copr: fix name of the pool 2026-02-16 13:28:14 +01:00
Miroslav Suchý
2cfcd10d79 copr: fix name of the pool 2026-02-16 13:21:56 +01:00
12 changed files with 40 additions and 26 deletions

View File

@@ -46,11 +46,11 @@ builders:
p09_hypervisor_04:
ppc64le: [1, 1, 1]
x86_hypervisor_01:
x86_64: [0, 1, 1]
x86_64: [2, 1, 1]
x86_hypervisor_02:
x86_64: [0, 1, 1]
x86_64: [2, 1, 1]
x86_hypervisor_03:
x86_64: [0, 1, 1]
x86_64: [2, 1, 1]
x86_hypervisor_04:
x86_64: [2, 1, 1]

View File

@@ -76,11 +76,12 @@ backup01.rdu3.fedoraproject.org
[powerpc]
#bvmhost-p09-01.stg.rdu3.fedoraproject.org
bvmhost-p09-05.rdu3.fedoraproject.org
vmhost-p08-copr01.rdu-cc.fedoraproject.org
vmhost-p08-copr02.rdu-cc.fedoraproject.org
vmhost-p09-copr01.rdu3.fedoraproject.org
bvmhost-p10-01.rdu3.fedoraproject.org
bvmhost-p10-02.rdu3.fedoraproject.org
vmhost-p09-copr01.rdu3.fedoraproject.org
vmhost-p09-copr02.rdu3.fedoraproject.org
vmhost-p09-copr03.rdu3.fedoraproject.org
vmhost-p09-copr04.rdu3.fedoraproject.org
[appliedmicro]
bvmhost-a64-01.stg.rdu3.fedoraproject.org

View File

@@ -4,6 +4,7 @@ dns1: 10.16.163.33
br0_ipv4_ip: 10.16.169.32
br0_ipv4_gw: 10.16.169.254
br0_ipv4_nm: 24
freezes: false
has_ipv4: yes
has_ipv6: no
mac0: c4:cb:e1:e1:5c:02

View File

@@ -19,7 +19,8 @@ mac5: b4:96:91:63:3b:e9
mac6: b4:96:91:63:3b:ea
mac7: b4:96:91:63:3b:eb
mac8: f4:02:70:d3:15:95
libvirt_pool: copr_hv_x86_64_01
libvirt_host: "[{{ br0_ipv6_ip }}]"
libvirt_pool: vmhost_x86_01
libvirt_pool_order_id: 7
libvirt_arch: x86_64
network_connections:

View File

@@ -18,7 +18,8 @@ mac4: b4:96:91:63:3b:9d
mac5: 84:16:0c:bc:24:e0
mac6: b4:96:91:63:3b:9e
mac7: b4:96:91:63:3b:9f
libvirt_pool: copr_hv_x86_64_02
libvirt_host: "[{{ br0_ipv6_ip }}]"
libvirt_pool: vmhost_x86_02
libvirt_pool_order_id: 8
libvirt_arch: x86_64
network_connections:

View File

@@ -18,7 +18,8 @@ mac4: "b4:96:91:63:3b:50"
mac5: "b4:96:91:63:3b:51"
mac6: "b4:96:91:63:3b:52"
mac7: "b4:96:91:63:3b:53"
libvirt_pool: copr_hv_x86_64_03
libvirt_host: "[{{ br0_ipv6_ip }}]"
libvirt_pool: vmhost_x86_03
libvirt_pool_order_id: 9
libvirt_arch: x86_64
network_connections:

View File

@@ -15,7 +15,7 @@
# default priority is 0
# reserved instances in cloud has > 0
# on-premise instance < 0
# high performance instances <= 40
# high performance instances <= - 40
#
# - if you need to drop a pool, it requires you to do a few steps:
# a) first evacutate the pool by setting `max: 0`,
@@ -26,7 +26,7 @@
{% macro aws(arch, max, max_starting, max_prealloc, spot=False, on_demand=none, priority=0, reserved=False) %}
aws_{{ arch }}_{{ on_demand + '_' if on_demand is not none else '' }}{% if spot %}spot{% else %}normal{% endif %}{% if reserved %}reserved{% endif %}_{% if devel %}dev{% else %}prod{% endif %}:
{% if on_demand %}
{% if on_demand and not reserved %}
max: 10
max_starting: 4
{% elif reserved and devel %}
@@ -195,7 +195,7 @@ copr_osuosl_{% if cpu == "p10" %}p10{% else %}p09{% endif %}_{% if on_demand %}{
{% endmacro %}
# x86_64 hypervisors
{% for hv in ["04"] %}
{% for hv in ["01", "02", "03", "04"] %}
{% if "x86_hypervisor_" + hv in builders %}
vmhost_x86_{{ hv }}_{% if devel %}dev{% else %}prod{% endif %}:
max: {{ builders["x86_hypervisor_" + hv]["x86_64"][0] }}
@@ -470,7 +470,7 @@ copr_ic_s390x_{{ zone }}_{% if devel %}dev{% else %}prod{% endif %}:
# aws(arch, max, max_starting, max_prealloc, spot=False, on_demand=none, priority=0, reserved=False)
{% if not devel %}
{{ aws('x86_64', builders.aws_reserved_powerful.x86_64[0], builders.aws_reserved_powerful.x86_64[1],
builders.aws_reserved_powerful.x86_64[2], priority=-40, reserved=True) }}
builders.aws_reserved_powerful.x86_64[2], on_demand='powerful', reserved=True, priority=-40) }}
{% endif %}

View File

@@ -280,10 +280,6 @@ EXTRA_BUILDCHROOT_TAGS = [{
# powerful builders for RISC-V team - specific packages
"pattern": "@forge-riscv-members/.*/.*riscv64/(kernel|gcc|llvm|clang).*",
"tags": ["on_demand_powerful"],
}, {
# powerful builders for RISC-V team - repos ending with _kernel
"pattern": "@forge-riscv-members/.*_kernel/.*riscv64/.*",
"tags": ["on_demand_powerful"],
}]
{% endif %}

View File

@@ -13,9 +13,8 @@ callback = "mirror_from_forge_bus:MirrorFromForge"
queue = "mirror_forge_ansible{{ env_suffix }}"
{% endif %}
exchange = "amq.topic"
# FIXME: This key is probably wrong.
routing_keys = [
"org.fedoraproject.prod.forgejo.git.receive",
"org.fedoraproject.prod.forgejo.push",
]
[tls]

View File

@@ -11,8 +11,10 @@ import time
from fedora_messaging import config, message
# FIXME: This key is probably wrong
_msg_topic = "org.fedoraproject.prod.forgejo.git.receive"
_msg_topic = "org.fedoraproject.prod.forgejo.push"
# "pagure" or "forgejo"
_msg_from = "forgejo"
_log = logging.getLogger("mirror_from_forge_bus")
@@ -73,7 +75,12 @@ class MirrorFromForge(object):
msg = message.Message
msg.topic = _msg_topic
msg.body = {"repo": {"fullname": self.trigger_names[0]}}
if _msg_from is None:
pass
elif _msg_from == "forgejo": # Lots of things missing here...
msg.body = {"repository": {"full_name": self.trigger_names[0]}}
elif _msg_from == "pagure":
msg.body = {"repo": {"fullname": self.trigger_names[0]}}
self.__call__(message=msg)
def __call__(self, message, cnt=0):
@@ -85,7 +92,14 @@ class MirrorFromForge(object):
"""
_log.info("Received topic: %s", message.topic)
if message.topic == _msg_topic:
repo_name = message.body.get("repo", {}).get("fullname")
# In theory we could try both here, but it might be confusing later
# so just use _msg_from and try one.
if _msg_from is None:
pass
elif _msg_from == "forgejo":
repo_name = message.body.get("repository", {}).get("full_name")
elif _msg_from == "pagure":
repo_name = message.body.get("repo", {}).get("fullname")
if repo_name not in self.trigger_names:
_log.info("%s is not a forge repo of interest, bailing", repo_name)
return

View File

@@ -1,5 +1,5 @@
{% for host in groups['all']|sort %}
{% if hostvars[host].datacenter == 'rdu3' and hostvars[host].nagios_Can_Connect == true %}
{% if hostvars[host].datacenter.startswith('rdu3') and hostvars[host].nagios_Can_Connect == true %}
define host {
{% if hostvars[host].nagios_Check_Services['nrpe'] == true %}
use defaulttemplate

View File

@@ -246,7 +246,7 @@ define service {
define service {
host_name pagure.io
service_description https://forge.fedoraproject.org/infra/tickets
check_command check_website_follow!pagure.io!https://forge.fedoraproject.org/infra/tickets!Issues
check_command check_website_follow!pagure.io!https://pagure.io/infra/tickets!Issues
max_check_attempts 8
use websitetemplate
}