Job:
periodic-ci-openshift-release-master-ci-4.15-upgrade-from-stable-4.14-e2e-azure-sdn-upgrade (all) - 28 runs, 36% failed, 10% of failures match = 4% impact
#1789920406191214592build-log.txt.gz6 days ago
time="2024-05-13T09:22:57Z" level=info msg="resulting interval message" message="{NodeNotReady  Node is not ready map[reason:NodeNotReady]}"
time="2024-05-13T09:22:57Z" level=info msg="processed event" event="{{ } {openshift-kube-scheduler-operator.17cf01ff028ffa0e  openshift-kube-scheduler-operator  2c61591b-9184-4223-b2e3-f1d9df573571 82917 0 2024-05-13 09:22:57 +0000 UTC <nil> <nil> map[] map[monitor.openshift.io/observed-recreation-count:0 monitor.openshift.io/observed-update-count:1] [] [] [{cluster-kube-scheduler-operator Update v1 2024-05-13 09:22:57 +0000 UTC FieldsV1 {\"f:count\":{},\"f:firstTimestamp\":{},\"f:involvedObject\":{},\"f:lastTimestamp\":{},\"f:message\":{},\"f:reason\":{},\"f:reportingComponent\":{},\"f:source\":{\"f:component\":{}},\"f:type\":{}} }]} {Deployment openshift-kube-scheduler-operator openshift-kube-scheduler-operator 87d4c17d-d2d3-412c-ac60-782374d58aba apps/v1  } OperatorStatusChanged Status for clusteroperator/kube-scheduler changed: Degraded message changed from \"NodeControllerDegraded: All master nodes are ready\" to \"NodeControllerDegraded: The master nodes not ready: node \\\"ci-op-cl6124jr-caf63-7fwdz-master-0\\\" not ready since 2024-05-13 09:22:57 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)\" {openshift-cluster-kube-scheduler-operator-status-controller-statussyncer_kube-scheduler } 2024-05-13 09:22:57 +0000 UTC 2024-05-13 09:22:57 +0000 UTC 1 Normal 0001-01-01 00:00:00 +0000 UTC nil  nil openshift-cluster-kube-scheduler-operator-status-controller-statussyncer_kube-scheduler }"
time="2024-05-13T09:22:57Z" level=info msg="resulting interval locator" locator="{Kind map[deployment:openshift-kube-scheduler-operator hmsg:1ed71d6ebc namespace:openshift-kube-scheduler-operator]}"
time="2024-05-13T09:22:57Z" level=info msg="resulting interval message" message="{OperatorStatusChanged  Status for clusteroperator/kube-scheduler changed: Degraded message changed from \"NodeControllerDegraded: All master nodes are ready\" to \"NodeControllerDegraded: The master nodes not ready: node \\\"ci-op-cl6124jr-caf63-7fwdz-master-0\\\" not ready since 2024-05-13 09:22:57 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)\" map[reason:OperatorStatusChanged]}"
time="2024-05-13T09:22:57Z" level=info msg="processed event" event="{{ } {sdn-controller-d6pzl.17cf01ff033b8a71  openshift-sdn  1048cc56-936c-4d99-bcb6-e9be39d05ffc 82919 0 2024-05-13 09:22:57 +0000 UTC <nil> <nil> map[] map[monitor.openshift.io/observed-recreation-count:0 monitor.openshift.io/observed-update-count:1] [] [] [{kube-controller-manager Update v1 2024-05-13 09:22:57 +0000 UTC FieldsV1 {\"f:count\":{},\"f:firstTimestamp\":{},\"f:involvedObject\":{},\"f:lastTimestamp\":{},\"f:message\":{},\"f:reason\":{},\"f:reportingComponent\":{},\"f:source\":{\"f:component\":{}},\"f:type\":{}} }]} {Pod openshift-sdn sdn-controller-d6pzl 1019ee9e-1678-4a2a-a206-79e013f29166 v1 60174 } NodeNotReady Node is not ready {node-controller } 2024-05-13 09:22:57 +0000 UTC 2024-05-13 09:22:57 +0000 UTC 1 Warning 0001-01-01 00:00:00 +0000 UTC nil  nil node-controller }"
#1789920406191214592build-log.txt.gz6 days ago
time="2024-05-13T09:22:57Z" level=info msg="resulting interval message" message="{NodeNotReady  Node is not ready map[reason:NodeNotReady]}"
time="2024-05-13T09:22:57Z" level=info msg="processed event" event="{{ } {etcd-operator.17cf01ff05d4a0cc  openshift-etcd-operator  b02f7182-dddc-4287-a2d0-ce577a50c66e 82929 0 2024-05-13 09:22:57 +0000 UTC <nil> <nil> map[] map[monitor.openshift.io/observed-recreation-count:0 monitor.openshift.io/observed-update-count:1] [] [] [{cluster-etcd-operator Update v1 2024-05-13 09:22:57 +0000 UTC FieldsV1 {\"f:count\":{},\"f:firstTimestamp\":{},\"f:involvedObject\":{},\"f:lastTimestamp\":{},\"f:message\":{},\"f:reason\":{},\"f:reportingComponent\":{},\"f:source\":{\"f:component\":{}},\"f:type\":{}} }]} {Deployment openshift-etcd-operator etcd-operator ba88cd4d-a5c6-4932-804a-f7224f6efa03 apps/v1  } OperatorStatusChanged Status for clusteroperator/etcd changed: Degraded message changed from \"NodeControllerDegraded: All master nodes are ready\\nEtcdEndpointsDegraded: EtcdEndpointsController can't evaluate whether quorum is safe: etcd cluster has quorum of 2 and 2 healthy members which is not fault tolerant: [{Member:ID:7380640431518010364 name:\\\"ci-op-cl6124jr-caf63-7fwdz-master-2\\\" peerURLs:\\\"https://10.0.0.8:2380\\\" clientURLs:\\\"https://10.0.0.8:2379\\\"  Healthy:true Took:1.072611ms Error:<nil>} {Member:ID:8974456408853077466 name:\\\"ci-op-cl6124jr-caf63-7fwdz-master-1\\\" peerURLs:\\\"https://10.0.0.6:2380\\\" clientURLs:\\\"https://10.0.0.6:2379\\\"  Healthy:true Took:2.104522ms Error:<nil>} {Member:ID:16035793696385259329 name:\\\"ci-op-cl6124jr-caf63-7fwdz-master-0\\\" peerURLs:\\\"https://10.0.0.7:2380\\\" clientURLs:\\\"https://10.0.0.7:2379\\\"  Healthy:false Took: Error:create client failure: failed to make etcd client for endpoints [https://10.0.0.7:2379]: context deadline exceeded}]\\nEtcdMembersDegraded: No unhealthy members found\" to \"NodeControllerDegraded: The master nodes not ready: node \\\"ci-op-cl6124jr-caf63-7fwdz-master-0\\\" not ready since 2024-05-13 09:22:57 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)\\nEtcdEndpointsDegraded: EtcdEndpointsController can't evaluate whether quorum is safe: etcd cluster has quorum of 2 and 2 healthy members which is not fault tolerant: [{Member:ID:7380640431518010364 name:\\\"ci-op-cl6124jr-caf63-7fwdz-master-2\\\" peerURLs:\\\"https://10.0.0.8:2380\\\" clientURLs:\\\"https://10.0.0.8:2379\\\"  Healthy:true Took:1.072611ms Error:<nil>} {Member:ID:8974456408853077466 name:\\\"ci-op-cl6124jr-caf63-7fwdz-master-1\\\" peerURLs:\\\"https://10.0.0.6:2380\\\" clientURLs:\\\"https://10.0.0.6:2379\\\"  Healthy:true Took:2.104522ms Error:<nil>} {Member:ID:16035793696385259329 name:\\\"ci-op-cl6124jr-caf63-7fwdz-master-0\\\" peerURLs:\\\"https://10.0.0.7:2380\\\" clientURLs:\\\"https://10.0.0.7:2379\\\"  Healthy:false Took: Error:create client failure: failed to make etcd client for endpoints [https://10.0.0.7:2379]: context deadline exceeded}]\\nEtcdMembersDegraded: No unhealthy members found\" {openshift-cluster-etcd-operator-status-controller-statussyncer_etcd } 2024-05-13 09:22:57 +0000 UTC 2024-05-13 09:22:57 +0000 UTC 1 Normal 0001-01-01 00:00:00 +0000 UTC nil  nil openshift-cluster-etcd-operator-status-controller-statussyncer_etcd }"
time="2024-05-13T09:22:57Z" level=info msg="resulting interval locator" locator="{Kind map[deployment:etcd-operator hmsg:25fec7028a namespace:openshift-etcd-operator]}"
time="2024-05-13T09:22:57Z" level=info msg="resulting interval message" message="{OperatorStatusChanged  Status for clusteroperator/etcd changed: Degraded message changed from \"NodeControllerDegraded: All master nodes are ready\\nEtcdEndpointsDegraded: EtcdEndpointsController can't evaluate whether quorum is safe: etcd cluster has quorum of 2 and 2 healthy members which is not fault tolerant: [{Member:ID:7380640431518010364 name:\\\"ci-op-cl6124jr-caf63-7fwdz-master-2\\\" peerURLs:\\\"https://10.0.0.8:2380\\\" clientURLs:\\\"https://10.0.0.8:2379\\\"  Healthy:true Took:1.072611ms Error:<nil>} {Member:ID:8974456408853077466 name:\\\"ci-op-cl6124jr-caf63-7fwdz-master-1\\\" peerURLs:\\\"https://10.0.0.6:2380\\\" clientURLs:\\\"https://10.0.0.6:2379\\\"  Healthy:true Took:2.104522ms Error:<nil>} {Member:ID:16035793696385259329 name:\\\"ci-op-cl6124jr-caf63-7fwdz-master-0\\\" peerURLs:\\\"https://10.0.0.7:2380\\\" clientURLs:\\\"https://10.0.0.7:2379\\\"  Healthy:false Took: Error:create client failure: failed to make etcd client for endpoints [https://10.0.0.7:2379]: context deadline exceeded}]\\nEtcdMembersDegraded: No unhealthy members found\" to \"NodeControllerDegraded: The master nodes not ready: node \\\"ci-op-cl6124jr-caf63-7fwdz-master-0\\\" not ready since 2024-05-13 09:22:57 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)\\nEtcdEndpointsDegraded: EtcdEndpointsController can't evaluate whether quorum is safe: etcd cluster has quorum of 2 and 2 healthy members which is not fault tolerant: [{Member:ID:7380640431518010364 name:\\\"ci-op-cl6124jr-caf63-7fwdz-master-2\\\" peerURLs:\\\"https://10.0.0.8:2380\\\" clientURLs:\\\"https://10.0.0.8:2379\\\"  Healthy:true Took:1.072611ms Error:<nil>} {Member:ID:8974456408853077466 name:\\\"ci-op-cl6124jr-caf63-7fwdz-master-1\\\" peerURLs:\\\"https://10.0.0.6:2380\\\" clientURLs:\\\"https://10.0.0.6:2379\\\"  Healthy:true Took:2.104522ms Error:<nil>} {Member:ID:16035793696385259329 name:\\\"ci-op-cl6124jr-caf63-7fwdz-master-0\\\" peerURLs:\\\"https://10.0.0.7:2380\\\" clientURLs:\\\"https://10.0.0.7:2379\\\"  Healthy:false Took: Error:create client failure: failed to make etcd client for endpoints [https://10.0.0.7:2379]: context deadline exceeded}]\\nEtcdMembersDegraded: No unhealthy members found\" map[reason:OperatorStatusChanged]}"
time="2024-05-13T09:22:57Z" level=info msg="processed event" event="{{ } {network-check-target-qfjp7.17cf01ff07762dbb  openshift-network-diagnostics  f1d634a1-8170-473b-aa88-f7ded4d21573 82931 0 2024-05-13 09:22:57 +0000 UTC <nil> <nil> map[] map[monitor.openshift.io/observed-recreation-count:0 monitor.openshift.io/observed-update-count:1] [] [] [{kube-controller-manager Update v1 2024-05-13 09:22:57 +0000 UTC FieldsV1 {\"f:count\":{},\"f:firstTimestamp\":{},\"f:involvedObject\":{},\"f:lastTimestamp\":{},\"f:message\":{},\"f:reason\":{},\"f:reportingComponent\":{},\"f:source\":{\"f:component\":{}},\"f:type\":{}} }]} {Pod openshift-network-diagnostics network-check-target-qfjp7 dd99e139-77a5-440c-9b88-79c4b3ed062d v1 63002 } NodeNotReady Node is not ready {node-controller } 2024-05-13 09:22:57 +0000 UTC 2024-05-13 09:22:57 +0000 UTC 1 Warning 0001-01-01 00:00:00 +0000 UTC nil  nil node-controller }"
#1789920406191214592build-log.txt.gz6 days ago
time="2024-05-13T09:22:57Z" level=info msg="resulting interval message" message="{NodeNotReady  Node is not ready map[reason:NodeNotReady]}"
time="2024-05-13T09:22:57Z" level=info msg="processed event" event="{{ } {kube-controller-manager-operator.17cf01ff07cbd9ff  openshift-kube-controller-manager-operator  afe9413c-da26-48c5-ab36-e446bedd6cfd 82932 0 2024-05-13 09:22:57 +0000 UTC <nil> <nil> map[] map[monitor.openshift.io/observed-recreation-count:0 monitor.openshift.io/observed-update-count:1] [] [] [{cluster-kube-controller-manager-operator Update v1 2024-05-13 09:22:57 +0000 UTC FieldsV1 {\"f:count\":{},\"f:firstTimestamp\":{},\"f:involvedObject\":{},\"f:lastTimestamp\":{},\"f:message\":{},\"f:reason\":{},\"f:reportingComponent\":{},\"f:source\":{\"f:component\":{}},\"f:type\":{}} }]} {Deployment openshift-kube-controller-manager-operator kube-controller-manager-operator 2ef171f0-703b-4b88-bb44-2e0b492d26c2 apps/v1  } OperatorStatusChanged Status for clusteroperator/kube-controller-manager changed: Degraded message changed from \"NodeControllerDegraded: All master nodes are ready\" to \"NodeControllerDegraded: The master nodes not ready: node \\\"ci-op-cl6124jr-caf63-7fwdz-master-0\\\" not ready since 2024-05-13 09:22:57 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)\" {kube-controller-manager-operator-status-controller-statussyncer_kube-controller-manager } 2024-05-13 09:22:57 +0000 UTC 2024-05-13 09:22:57 +0000 UTC 1 Normal 0001-01-01 00:00:00 +0000 UTC nil  nil kube-controller-manager-operator-status-controller-statussyncer_kube-controller-manager }"
time="2024-05-13T09:22:57Z" level=info msg="resulting interval locator" locator="{Kind map[deployment:kube-controller-manager-operator hmsg:168913cc16 namespace:openshift-kube-controller-manager-operator]}"
time="2024-05-13T09:22:57Z" level=info msg="resulting interval message" message="{OperatorStatusChanged  Status for clusteroperator/kube-controller-manager changed: Degraded message changed from \"NodeControllerDegraded: All master nodes are ready\" to \"NodeControllerDegraded: The master nodes not ready: node \\\"ci-op-cl6124jr-caf63-7fwdz-master-0\\\" not ready since 2024-05-13 09:22:57 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)\" map[reason:OperatorStatusChanged]}"
time="2024-05-13T09:22:57Z" level=info msg="processed event" event="{{ } {kube-apiserver-operator.17cf01ff084ca50d  openshift-kube-apiserver-operator  275fd9d1-a942-44e2-a5af-36c7a1ded514 82937 0 2024-05-13 09:22:57 +0000 UTC <nil> <nil> map[] map[monitor.openshift.io/observed-recreation-count:0 monitor.openshift.io/observed-update-count:1] [] [] [{cluster-kube-apiserver-operator Update v1 2024-05-13 09:22:57 +0000 UTC FieldsV1 {\"f:count\":{},\"f:firstTimestamp\":{},\"f:involvedObject\":{},\"f:lastTimestamp\":{},\"f:message\":{},\"f:reason\":{},\"f:reportingComponent\":{},\"f:source\":{\"f:component\":{}},\"f:type\":{}} }]} {Deployment openshift-kube-apiserver-operator kube-apiserver-operator 05ae4922-20b9-49f4-aca0-38f2ef2fe48c apps/v1  } OperatorStatusChanged Status for clusteroperator/kube-apiserver changed: Degraded message changed from \"NodeControllerDegraded: All master nodes are ready\" to \"NodeControllerDegraded: The master nodes not ready: node \\\"ci-op-cl6124jr-caf63-7fwdz-master-0\\\" not ready since 2024-05-13 09:22:57 +0000 UTC because NodeStatusUnknown (Kubelet stopped posting node status.)\" {kube-apiserver-operator-status-controller-statussyncer_kube-apiserver } 2024-05-13 09:22:57 +0000 UTC 2024-05-13 09:22:57 +0000 UTC 1 Normal 0001-01-01 00:00:00 +0000 UTC nil  nil kube-apiserver-operator-status-controller-statussyncer_kube-apiserver }"
time="2024-05-13T09:22:57Z" level=info msg="resulting interval locator" locator="{Kind map[deployment:kube-apiserver-operator hmsg:9fe3ba6074 namespace:openshift-kube-apiserver-operator]}"

Found in 0.91% of runs (2.50% of failures) across 110 total runs and 10 jobs (36.36% failed) in 457ms - clear search | chart view - source code located on github