Job:
periodic-ci-openshift-release-master-nightly-4.14-e2e-aws-ovn-single-node (all) - 36 runs, 25% failed, 344% of failures match = 86% impact
#1791487618895056896junit2 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h1m30s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-0-25.ec2.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 7m38s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1791562553780867072junit2 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h8m46s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 6m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert NodeSystemSaturation fired for 1m58s seconds with labels: ALERTS{alertname="NodeSystemSaturation", alertstate="firing", container="kube-rbac-proxy", endpoint="https", instance="ip-10-0-22-38.us-west-2.compute.internal", job="node-exporter", namespace="openshift-monitoring", pod="node-exporter-2c5jv", prometheus="openshift-monitoring/k8s", service="node-exporter", severity="warning"} result=reject
#1791456463168737280junit2 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h3m30s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 2m28s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-112-240.us-east-2.compute.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert KubePodNotReady fired for 28m58s seconds with labels: ALERTS{alertname="KubePodNotReady", alertstate="firing", namespace="openshift-marketplace", pod="community-operators-ncj2c", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
#1791420260629352448junit2 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h13m50s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 58s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-100-187.us-west-1.compute.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 13m44s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1791390627485716480junit3 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h3m36s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 3m58s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-63-166.ec2.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 14m46s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1791269838706970624junit3 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h5m6s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 18m36s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1791187456243011584junit3 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h16m16s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert NodeSystemSaturation fired for 58s seconds with labels: ALERTS{alertname="NodeSystemSaturation", alertstate="firing", container="kube-rbac-proxy", endpoint="https", instance="ip-10-0-112-248.us-west-1.compute.internal", job="node-exporter", namespace="openshift-monitoring", pod="node-exporter-n8slt", prometheus="openshift-monitoring/k8s", service="node-exporter", severity="warning"} result=reject
#1790717675194814464junit4 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h10m18s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert NodeSystemSaturation fired for 28s seconds with labels: ALERTS{alertname="NodeSystemSaturation", alertstate="firing", container="kube-rbac-proxy", endpoint="https", instance="ip-10-0-71-141.ec2.internal", job="node-exporter", namespace="openshift-monitoring", pod="node-exporter-w7z4z", prometheus="openshift-monitoring/k8s", service="node-exporter", severity="warning"} result=reject
#1790684930137657344junit4 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h4m34s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 28s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-121-79.us-east-2.compute.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-121-79.us-east-2.compute.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 14m42s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1790610386563633152junit5 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h9m24s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 21m54s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1790510568637992960junit5 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h5m48s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 5m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert NodeSystemSaturation fired for 58s seconds with labels: ALERTS{alertname="NodeSystemSaturation", alertstate="firing", container="kube-rbac-proxy", endpoint="https", instance="ip-10-0-42-191.us-west-2.compute.internal", job="node-exporter", namespace="openshift-monitoring", pod="node-exporter-vnwqf", prometheus="openshift-monitoring/k8s", service="node-exporter", severity="warning"} result=reject
#1790390134244380672junit5 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h11m10s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 5m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert NodeSystemSaturation fired for 28s seconds with labels: ALERTS{alertname="NodeSystemSaturation", alertstate="firing", container="kube-rbac-proxy", endpoint="https", instance="ip-10-0-44-137.us-west-1.compute.internal", job="node-exporter", namespace="openshift-monitoring", pod="node-exporter-trdhf", prometheus="openshift-monitoring/k8s", service="node-exporter", severity="warning"} result=reject
#1790238649707663360junit6 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h14m6s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 5m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 22m44s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1791082802691706880junit3 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h13m16s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert NodeSystemSaturation fired for 1m28s seconds with labels: ALERTS{alertname="NodeSystemSaturation", alertstate="firing", container="kube-rbac-proxy", endpoint="https", instance="ip-10-0-125-85.us-west-2.compute.internal", job="node-exporter", namespace="openshift-monitoring", pod="node-exporter-tg4mr", prometheus="openshift-monitoring/k8s", service="node-exporter", severity="warning"} result=reject
#1791009250483376128junit4 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h16m54s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 5m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 21m16s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1790948561936977920junit4 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h4m10s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 1m28s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-33-124.ec2.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 12m28s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1789640371970510848junit7 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 57m48s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 58s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-27-34.ec2.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 12m34s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1789569038498664448junit8 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 59m44s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 1m58s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-108-0.ec2.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 12m42s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1789063710145253376junit9 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h16m50s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 1m28s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-61-110.ec2.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 24m40s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1789256663778201600junit8 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h5m46s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 18m14s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1788953567730601984junit9 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h12m22s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 5m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert NodeSystemSaturation fired for 58s seconds with labels: ALERTS{alertname="NodeSystemSaturation", alertstate="firing", container="kube-rbac-proxy", endpoint="https", instance="ip-10-0-26-98.us-west-1.compute.internal", job="node-exporter", namespace="openshift-monitoring", pod="node-exporter-7htx9", prometheus="openshift-monitoring/k8s", service="node-exporter", severity="warning"} result=reject
#1788740398714195968junit10 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h6m58s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 2m28s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-125-64.ec2.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 58s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-125-64.ec2.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 15m32s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1788351171002372096junit11 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h18m36s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert NodeSystemSaturation fired for 28s seconds with labels: ALERTS{alertname="NodeSystemSaturation", alertstate="firing", container="kube-rbac-proxy", endpoint="https", instance="ip-10-0-98-49.us-west-1.compute.internal", job="node-exporter", namespace="openshift-monitoring", pod="node-exporter-9bgpv", prometheus="openshift-monitoring/k8s", service="node-exporter", severity="warning"} result=reject
#1788276678997315584junit11 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h12m22s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 8m28s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-115-21.us-west-1.compute.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert NodeSystemSaturation fired for 1m28s seconds with labels: ALERTS{alertname="NodeSystemSaturation", alertstate="firing", container="kube-rbac-proxy", endpoint="https", instance="ip-10-0-115-21.us-west-1.compute.internal", job="node-exporter", namespace="openshift-monitoring", pod="node-exporter-p6sk5", prometheus="openshift-monitoring/k8s", service="node-exporter", severity="warning"} result=reject
#1788055788237885440junit12 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h13m4s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-88-122.ec2.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 24m0s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject
#1787963564347101184junit12 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h19m44s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 1m58s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-117-215.us-east-2.compute.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 58s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-117-215.us-east-2.compute.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 1m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert NodeSystemSaturation fired for 1m58s seconds with labels: ALERTS{alertname="NodeSystemSaturation", alertstate="firing", container="kube-rbac-proxy", endpoint="https", instance="ip-10-0-117-215.us-east-2.compute.internal", job="node-exporter", namespace="openshift-monitoring", pod="node-exporter-zbpxm", prometheus="openshift-monitoring/k8s", service="node-exporter", severity="warning"} result=reject
#1787733976622829568junit13 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h21m0s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 2m28s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-94-207.us-west-1.compute.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 5m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert NodeSystemSaturation fired for 58s seconds with labels: ALERTS{alertname="NodeSystemSaturation", alertstate="firing", container="kube-rbac-proxy", endpoint="https", instance="ip-10-0-94-207.us-west-1.compute.internal", job="node-exporter", namespace="openshift-monitoring", pod="node-exporter-xm769", prometheus="openshift-monitoring/k8s", service="node-exporter", severity="warning"} result=reject
#1787701592162570240junit13 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h10m34s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-109-231.ec2.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 3m58s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-109-231.ec2.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 2m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 5m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert NodeSystemSaturation fired for 1m28s seconds with labels: ALERTS{alertname="NodeSystemSaturation", alertstate="firing", container="kube-rbac-proxy", endpoint="https", instance="ip-10-0-109-231.ec2.internal", job="node-exporter", namespace="openshift-monitoring", pod="node-exporter-tkq56", prometheus="openshift-monitoring/k8s", service="node-exporter", severity="warning"} result=reject
#1787637635846508544junit13 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h9m2s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert ExtremelyHighIndividualControlPlaneCPU fired for 28s seconds with labels: ALERTS{alertname="ExtremelyHighIndividualControlPlaneCPU", alertstate="firing", instance="ip-10-0-23-78.ec2.internal", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert NodeSystemSaturation fired for 28s seconds with labels: ALERTS{alertname="NodeSystemSaturation", alertstate="firing", container="kube-rbac-proxy", endpoint="https", instance="ip-10-0-23-78.ec2.internal", job="node-exporter", namespace="openshift-monitoring", pod="node-exporter-p7dzm", prometheus="openshift-monitoring/k8s", service="node-exporter", severity="warning"} result=reject
#1787516424739098624junit13 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h7m4s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m58s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert NodeSystemSaturation fired for 28s seconds with labels: ALERTS{alertname="NodeSystemSaturation", alertstate="firing", container="kube-rbac-proxy", endpoint="https", instance="ip-10-0-70-154.us-east-2.compute.internal", job="node-exporter", namespace="openshift-monitoring", pod="node-exporter-bz4ff", prometheus="openshift-monitoring/k8s", service="node-exporter", severity="warning"} result=reject
#1787439650994917376junit13 days ago
V2 alert AlertmanagerReceiversNotConfigured fired for 1h4m38s seconds with labels: ALERTS{alertname="AlertmanagerReceiversNotConfigured", alertstate="firing", namespace="openshift-monitoring", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 3m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert HighOverallControlPlaneCPU fired for 4m28s seconds with labels: ALERTS{alertname="HighOverallControlPlaneCPU", alertstate="firing", namespace="openshift-kube-apiserver", prometheus="openshift-monitoring/k8s", severity="warning"} result=reject
V2 alert PodSecurityViolation fired for 12m30s seconds with labels: ALERTS{alertname="PodSecurityViolation", alertstate="firing", namespace="openshift-kube-apiserver", policy_level="restricted", prometheus="openshift-monitoring/k8s", severity="info"} result=reject

Found in 86.11% of runs (344.44% of failures) across 36 total runs and 1 jobs (25.00% failed) in 325ms - clear search | chart view - source code located on github