See https://github.com/openshift/installer/pull/4206
Picking a severity so I can restore the target release without upsetting Eric's bot.
Verified on 4.6.0-0.nightly-2020-09-25-085318 $ oc get clusterversion NAME VERSION AVAILABLE PROGRESSING SINCE STATUS version 4.6.0-0.nightly-2020-09-25-085318 True False 80m Cluster version is 4.6.0-0.nightly-2020-09-25-085318 $ oc -n openshift-machine-api get machineset NAME DESIRED CURRENT READY AVAILABLE AGE mnguyen46-9tnv8-worker-us-west-2a 1 1 1 1 117m mnguyen46-9tnv8-worker-us-west-2b 1 1 1 1 117m mnguyen46-9tnv8-worker-us-west-2c 1 1 1 1 117m mnguyen46-9tnv8-worker-us-west-2d 0 0 117m $ oc -n openshift-machine-api get machineset/mnguyen46-9tnv8-worker-us-west-2a -o yaml apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: annotations: machine.openshift.io/GPU: "0" machine.openshift.io/memoryMb: "8192" machine.openshift.io/vCPU: "2" creationTimestamp: "2020-09-25T12:33:48Z" generation: 1 labels: machine.openshift.io/cluster-api-cluster: mnguyen46-9tnv8 managedFields: - apiVersion: machine.openshift.io/v1beta1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:labels: .: {} f:machine.openshift.io/cluster-api-cluster: {} f:spec: .: {} f:replicas: {} f:selector: .: {} f:matchLabels: .: {} f:machine.openshift.io/cluster-api-cluster: {} f:machine.openshift.io/cluster-api-machineset: {} f:template: .: {} f:metadata: .: {} f:labels: .: {} f:machine.openshift.io/cluster-api-cluster: {} f:machine.openshift.io/cluster-api-machine-role: {} f:machine.openshift.io/cluster-api-machine-type: {} f:machine.openshift.io/cluster-api-machineset: {} f:spec: .: {} f:metadata: {} f:providerSpec: .: {} f:value: .: {} f:ami: {} f:apiVersion: {} f:blockDevices: {} f:credentialsSecret: {} f:deviceIndex: {} f:iamInstanceProfile: {} f:instanceType: {} f:kind: {} f:metadata: {} f:placement: {} f:securityGroups: {} f:subnet: {} f:tags: {} f:userDataSecret: {} f:status: {} manager: cluster-bootstrap operation: Update time: "2020-09-25T12:33:48Z" - apiVersion: machine.openshift.io/v1beta1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: .: {} f:machine.openshift.io/GPU: {} f:machine.openshift.io/memoryMb: {} f:machine.openshift.io/vCPU: {} manager: machine-controller-manager operation: Update time: "2020-09-25T12:49:58Z" - apiVersion: machine.openshift.io/v1beta1 fieldsType: FieldsV1 fieldsV1: f:status: f:availableReplicas: {} f:fullyLabeledReplicas: {} f:observedGeneration: {} f:readyReplicas: {} f:replicas: {} manager: machineset-controller operation: Update time: "2020-09-25T12:54:23Z" name: mnguyen46-9tnv8-worker-us-west-2a namespace: openshift-machine-api resourceVersion: "26830" selfLink: /apis/machine.openshift.io/v1beta1/namespaces/openshift-machine-api/machinesets/mnguyen46-9tnv8-worker-us-west-2a uid: 0e9280ab-e706-48cc-89a2-303c7eee9c08 spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: mnguyen46-9tnv8 machine.openshift.io/cluster-api-machineset: mnguyen46-9tnv8-worker-us-west-2a template: metadata: labels: machine.openshift.io/cluster-api-cluster: mnguyen46-9tnv8 machine.openshift.io/cluster-api-machine-role: worker machine.openshift.io/cluster-api-machine-type: worker machine.openshift.io/cluster-api-machineset: mnguyen46-9tnv8-worker-us-west-2a spec: metadata: {} providerSpec: value: ami: id: ami-04cb8492cba4b5261 apiVersion: awsproviderconfig.openshift.io/v1beta1 blockDevices: - ebs: encrypted: true iops: 0 kmsKey: arn: "" volumeSize: 120 volumeType: gp2 credentialsSecret: name: aws-cloud-credentials deviceIndex: 0 iamInstanceProfile: id: mnguyen46-9tnv8-worker-profile instanceType: m5.large kind: AWSMachineProviderConfig metadata: creationTimestamp: null placement: availabilityZone: us-west-2a region: us-west-2 securityGroups: - filters: - name: tag:Name values: - mnguyen46-9tnv8-worker-sg subnet: filters: - name: tag:Name values: - mnguyen46-9tnv8-private-us-west-2a tags: - name: kubernetes.io/cluster/mnguyen46-9tnv8 value: owned userDataSecret: name: worker-user-data-managed status: availableReplicas: 1 fullyLabeledReplicas: 1 observedGeneration: 1 readyReplicas: 1 replicas: 1 $ oc get nodes NAME STATUS ROLES AGE VERSION ip-10-0-138-158.us-west-2.compute.internal Ready worker 98m v1.19.0+8a39924 ip-10-0-145-247.us-west-2.compute.internal Ready master 110m v1.19.0+8a39924 ip-10-0-160-60.us-west-2.compute.internal Ready worker 98m v1.19.0+8a39924 ip-10-0-173-59.us-west-2.compute.internal Ready master 110m v1.19.0+8a39924 ip-10-0-214-16.us-west-2.compute.internal Ready worker 98m v1.19.0+8a39924 ip-10-0-220-48.us-west-2.compute.internal Ready master 110m v1.19.0+8a39924 $ oc debug node/ip-10-0-160-60.us-west-2.compute.internal Starting pod/ip-10-0-160-60us-west-2computeinternal-debug ... To use host binaries, run `chroot /host` If you don't see a command prompt, try pressing enter. sh-4.2# chroot /host sh-4.4# rpm-ostree status State: idle Deployments: * pivot://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b973b2f9e432b12388874a9c8d191e699106bcbf12d962c729b4e16307dbd83f CustomOrigin: Managed by machine-config-operator Version: 46.82.202009222340-0 (2020-09-22T23:44:32Z) ostree://5d65bddfb072101a84501cd87b8abc650beb8dc0aa2bfeff022fc750cde52f1d Version: 46.82.202009222340-0 (2020-09-22T23:44:32Z) sh-4.4# exit exit sh-4.2# exit exit Removing debug pod ...
Since the problem described in this bug report should be resolved in a recent advisory, it has been closed with a resolution of ERRATA. For information on the advisory (OpenShift Container Platform 4.6 GA Images), and where to find the updated files, follow the link below. If the solution does not work for you, open a new bug report. https://access.redhat.com/errata/RHBA-2020:4196