Bug 1979671
| Summary: | Warning annotation for pods with cpu requests or limits on single-node OpenShift cluster without workload partitioning | ||
|---|---|---|---|
| Product: | OpenShift Container Platform | Reporter: | Dwaine Gonyier <dgonyier> |
| Component: | Installer | Assignee: | Nahian <npathan> |
| Installer sub component: | Single Node OpenShift | QA Contact: | Omri Hochman <ohochman> |
| Status: | CLOSED ERRATA | Docs Contact: | |
| Severity: | medium | ||
| Priority: | low | CC: | aos-bugs, dgonyier, eparis, gpei, npathan, sasha, sponnaga |
| Version: | 4.8 | Flags: | npathan:
needinfo+
|
| Target Milestone: | --- | ||
| Target Release: | 4.11.0 | ||
| Hardware: | Unspecified | ||
| OS: | Unspecified | ||
| Whiteboard: | |||
| Fixed In Version: | Doc Type: | No Doc Update | |
| Doc Text: | Story Points: | --- | |
| Clone Of: | Environment: | ||
| Last Closed: | 2022-08-10 10:36:52 UTC | Type: | Bug |
| Regression: | --- | Mount Type: | --- |
| Documentation: | --- | CRM: | |
| Verified Versions: | Category: | --- | |
| oVirt Team: | --- | RHEL 7.3 requirements from Atomic Host: | |
| Cloudforms Team: | --- | Target Upstream Version: | |
| Embargoed: | |||
| Bug Depends On: | |||
| Bug Blocks: | 2049890 | ||
Verified:
OCP Version: 4.10.0-0.nightly-2022-02-16-171622
oc get pod -A|grep openshift-apiserver-operator
openshift-apiserver-operator openshift-apiserver-operator-6c45d8b9f5-nltfx 1/1 Running 1 (43m ago) 44m
oc get pod -o json -n openshift-apiserver-operator openshift-apiserver-operator-6c45d8b9f5-nltfx|jq '. | { ".metadata": { "metadata": { "annotations": { ".metadata.annotations.workload.openshift.io/warning": .metadata.annotations."workload.openshift.io/warning" } } } } '
{
".metadata": {
"metadata": {
"annotations": {
".metadata.annotations.workload.openshift.io/warning": null
}
}
}
}
oc get pod -o json -n openshift-apiserver-operator openshift-apiserver-operator-6c45d8b9f5-nltfx
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"annotations": {
"k8s.v1.cni.cncf.io/network-status": "[{\n \"name\": \"openshift-sdn\",\n \"interface\": \"eth0\",\n \"ips\": [\n \"10.128.0.5\"\n ],\n \"default\": true,\n \"dns\": {}\n}]",
"k8s.v1.cni.cncf.io/networks-status": "[{\n \"name\": \"openshift-sdn\",\n \"interface\": \"eth0\",\n \"ips\": [\n \"10.128.0.5\"\n ],\n \"default\": true,\n \"dns\": {}\n}]",
"openshift.io/scc": "anyuid"
},
"creationTimestamp": "2022-02-17T17:13:23Z",
"generateName": "openshift-apiserver-operator-6c45d8b9f5-",
"labels": {
"app": "openshift-apiserver-operator",
"pod-template-hash": "6c45d8b9f5"
},
"name": "openshift-apiserver-operator-6c45d8b9f5-nltfx",
"namespace": "openshift-apiserver-operator",
"ownerReferences": [
{
"apiVersion": "apps/v1",
"blockOwnerDeletion": true,
"controller": true,
"kind": "ReplicaSet",
"name": "openshift-apiserver-operator-6c45d8b9f5",
"uid": "7a1b576c-f509-4c02-801d-05b7609e6e81"
}
],
"resourceVersion": "5441",
"uid": "0a72bf41-1a0b-4d5c-8438-bc2d477e938a"
},
"spec": {
"containers": [
{
"args": [
"--config=/var/run/configmaps/config/config.yaml"
],
"command": [
"cluster-openshift-apiserver-operator",
"operator"
],
"env": [
{
"name": "IMAGE",
"value": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:867ef8c9bf8946c0bc0f01138fceec52d41409be2d035a8584b7646f2eb3b3c7"
},
{
"name": "OPERATOR_IMAGE",
"value": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ad307371d2b92ef76828c8e73ffd739af4dd07cede8a3cde13c72ee4644c4769"
},
{
"name": "OPERATOR_IMAGE_VERSION",
"value": "4.10.0-0.nightly-2022-02-16-171622"
},
{
"name": "OPERAND_IMAGE_VERSION",
"value": "4.10.0-0.nightly-2022-02-16-171622"
},
{
"name": "KUBE_APISERVER_OPERATOR_IMAGE",
"value": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:73a00e95d08cce0cf5e34602d160f4de72da69ffdd05a347fa705ee6e13cae7c"
}
],
"image": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ad307371d2b92ef76828c8e73ffd739af4dd07cede8a3cde13c72ee4644c4769",
"imagePullPolicy": "IfNotPresent",
"name": "openshift-apiserver-operator",
"ports": [
{
"containerPort": 8443,
"name": "metrics",
"protocol": "TCP"
}
],
"resources": {
"requests": {
"cpu": "10m",
"memory": "50Mi"
}
},
"securityContext": {
"capabilities": {
"drop": [
"MKNOD"
]
}
},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "FallbackToLogsOnError",
"volumeMounts": [
{
"mountPath": "/var/run/configmaps/config",
"name": "config"
},
{
"mountPath": "/var/run/secrets/serving-cert",
"name": "serving-cert"
},
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "kube-api-access-8jktj",
"readOnly": true
}
]
}
],
"dnsPolicy": "ClusterFirst",
"enableServiceLinks": true,
"nodeName": "sno-0-0",
"nodeSelector": {
"node-role.kubernetes.io/master": ""
},
"preemptionPolicy": "PreemptLowerPriority",
"priority": 2000000000,
"priorityClassName": "system-cluster-critical",
"restartPolicy": "Always",
"schedulerName": "default-scheduler",
"securityContext": {
"seLinuxOptions": {
"level": "s0:c14,c4"
}
},
"serviceAccount": "openshift-apiserver-operator",
"serviceAccountName": "openshift-apiserver-operator",
"terminationGracePeriodSeconds": 30,
"tolerations": [
{
"effect": "NoSchedule",
"key": "node-role.kubernetes.io/master",
"operator": "Exists"
},
{
"effect": "NoExecute",
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"tolerationSeconds": 120
},
{
"effect": "NoExecute",
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"tolerationSeconds": 120
},
{
"effect": "NoSchedule",
"key": "node.kubernetes.io/memory-pressure",
"operator": "Exists"
}
],
"volumes": [
{
"name": "serving-cert",
"secret": {
"defaultMode": 420,
"optional": true,
"secretName": "openshift-apiserver-operator-serving-cert"
}
},
{
"configMap": {
"defaultMode": 420,
"name": "openshift-apiserver-operator-config"
},
"name": "config"
},
{
"name": "kube-api-access-8jktj",
"projected": {
"defaultMode": 420,
"sources": [
{
"serviceAccountToken": {
"expirationSeconds": 3607,
"path": "token"
}
},
{
"configMap": {
"items": [
{
"key": "ca.crt",
"path": "ca.crt"
}
],
"name": "kube-root-ca.crt"
}
},
{
"downwardAPI": {
"items": [
{
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "metadata.namespace"
},
"path": "namespace"
}
]
}
},
{
"configMap": {
"items": [
{
"key": "service-ca.crt",
"path": "service-ca.crt"
}
],
"name": "openshift-service-ca.crt"
}
}
]
}
}
]
},
"status": {
"conditions": [
{
"lastProbeTime": null,
"lastTransitionTime": "2022-02-17T17:14:02Z",
"status": "True",
"type": "Initialized"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2022-02-17T17:14:39Z",
"status": "True",
"type": "Ready"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2022-02-17T17:14:39Z",
"status": "True",
"type": "ContainersReady"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2022-02-17T17:14:02Z",
"status": "True",
"type": "PodScheduled"
}
],
"containerStatuses": [
{
"containerID": "cri-o://b2010fdc79330dff150d1bcb1191c59c898c08764a39102c0ca10d97988cb0ea",
"image": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ad307371d2b92ef76828c8e73ffd739af4dd07cede8a3cde13c72ee4644c4769",
"imageID": "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ad307371d2b92ef76828c8e73ffd739af4dd07cede8a3cde13c72ee4644c4769",
"lastState": {
"terminated": {
"containerID": "cri-o://fb8751e7afdedd5ba7b6711cf2d9f3c8e2177958a43e62f97f3a5512b6fb520e",
"exitCode": 0,
"finishedAt": "2022-02-17T17:14:38Z",
"reason": "Completed",
"startedAt": "2022-02-17T17:14:12Z"
}
},
"name": "openshift-apiserver-operator",
"ready": true,
"restartCount": 1,
"started": true,
"state": {
"running": {
"startedAt": "2022-02-17T17:14:38Z"
}
}
}
],
"hostIP": "192.168.123.106",
"phase": "Running",
"podIP": "10.128.0.5",
"podIPs": [
{
"ip": "10.128.0.5"
}
],
"qosClass": "Burstable",
"startTime": "2022-02-17T17:14:02Z"
}
}
Moving back to ON_QA. This has to be verified with 4.11 bits. Checked again with 4.11.0-0.nightly-2022-07-13-131410.
In a SNO cluster without WP enabled, no such warning annotations added.
# oc get pod -o json -n openshift-apiserver-operator openshift-apiserver-operator-5945fcbb9d-xbgqf |jq '. | { ".metadata": { "metadata": { "annotations": { ".metadata.annotations.workload.openshift.io/warning": .metadata.annotations."workload.openshift.io/warning" } } } } '
{
".metadata": {
"metadata": {
"annotations": {
".metadata.annotations.workload.openshift.io/warning": null
}
}
}
}
Since the problem described in this bug report should be resolved in a recent advisory, it has been closed with a resolution of ERRATA. For information on the advisory (Important: OpenShift Container Platform 4.11.0 bug fix and security update), and where to find the updated files, follow the link below. If the solution does not work for you, open a new bug report. https://access.redhat.com/errata/RHSA-2022:5069 |
Description of problem: Warning annotation added to any pods with cpu requests / limits in manifest on Single-Node OpenShift cluster when workload partitioning is not enabled. Example (using jq to filter down output): oc get pod -o json openshift-apiserver-operator-xxxxxxxxxx-yyyyy | jq '. | { ".metadata": { "metadata": { "annotations": { ".metadata.annotations.workload.openshift.io/warning": .metadata.annotations."workload.openshift.io/warning" } } } } ' { ".metadata": { "metadata": { "annotations": { ".metadata.annotations.workload.openshift.io/warning": "the node \"ci-ln-0gm85yt-f76d1-bjqth-master-0\" does not have resource \"management.workload.openshift.io/cores\"" } } } } Version-Release number of selected component (if applicable): How reproducible: Always Steps to Reproduce: 1. Configure an SNO cluster without WP 2. Examine a management pod that has cpu requests/limits with 'oc get pod ...' Actual results: See description. Expected results: Suppress this warning if WP is not installed on cluster. Additional info: This warning could be confusing on a SNO cluster without WP.