Bug 1645258 - vSphere Cloud provider: detach volume when node is not present/ powered off
Summary: vSphere Cloud provider: detach volume when node is not present/ powered off
Keywords:
Status: CLOSED ERRATA
Alias: None
Product: OpenShift Container Platform
Classification: Red Hat
Component: Storage
Version: 3.11.0
Hardware: Unspecified
OS: Unspecified
medium
low
Target Milestone: ---
: 3.11.z
Assignee: Hemant Kumar
QA Contact: Chao Yang
URL:
Whiteboard:
Depends On: 1619514 1645260
Blocks:
TreeView+ depends on / blocked
 
Reported: 2018-11-01 18:24 UTC by Hemant Kumar
Modified: 2018-12-18 21:33 UTC (History)
14 users (show)

Fixed In Version:
Doc Type: If docs needed, set a value
Doc Text:
Clone Of: 1619514
Environment:
Last Closed: 2018-12-12 14:15:51 UTC
Target Upstream Version:
Embargoed:


Attachments (Terms of Use)


Links
System ID Private Priority Status Summary Last Updated
Red Hat Product Errata RHBA-2018:3743 0 None None None 2018-12-12 14:16:01 UTC

Comment 5 Chao Yang 2018-12-03 09:58:35 UTC
This is verified on
[root@ocp310 ~]# oc version
oc v3.11.50
kubernetes v1.11.0+d4cacc0
features: Basic-Auth GSSAPI Kerberos SPNEGO

Server https://ocp310.master.vsphere.local:8443
openshift v3.11.50
kubernetes v1.11.0+d4cacc0


1.Create sc, pvc and dc
[root@ocp310 ~]# oc get sc standard -o yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  annotations:
    storageclass.beta.kubernetes.io/is-default-class: "true"
  creationTimestamp: 2018-10-11T13:03:16Z
  name: standard
  resourceVersion: "9169"
  selfLink: /apis/storage.k8s.io/v1/storageclasses/standard
  uid: 0483205e-cd56-11e8-8870-0050569f4627
parameters:
  datastore: datastore1
provisioner: kubernetes.io/vsphere-volume
reclaimPolicy: Delete
volumeBindingMode: Immediate

[root@ocp310 ~]# oc get pvc azpvc -o yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  annotations:
    pv.kubernetes.io/bind-completed: "yes"
    pv.kubernetes.io/bound-by-controller: "yes"
    volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/vsphere-volume
  creationTimestamp: 2018-12-03T09:33:38Z
  finalizers:
  - kubernetes.io/pvc-protection
  name: azpvc
  namespace: chaoyang
  resourceVersion: "13829089"
  selfLink: /api/v1/namespaces/chaoyang/persistentvolumeclaims/azpvc
  uid: 83c05f6f-f6de-11e8-8870-0050569f4627
spec:
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
  storageClassName: standard
  volumeName: pvc-83c05f6f-f6de-11e8-8870-0050569f4627
status:
  accessModes:
  - ReadWriteOnce
  capacity:
    storage: 1Gi
  phase: Bound
 [root@ocp310 ~]# oc get dc -o yaml
apiVersion: v1
items:
- apiVersion: apps.openshift.io/v1
  kind: DeploymentConfig
  metadata:
    creationTimestamp: 2018-12-03T09:35:00Z
    generation: 1
    labels:
      run: hello-openshift
    name: vsphere
    namespace: chaoyang
    resourceVersion: "13830498"
    selfLink: /apis/apps.openshift.io/v1/namespaces/chaoyang/deploymentconfigs/vsphere
    uid: b49711bb-f6de-11e8-8870-0050569f4627
  spec:
    replicas: 1
    revisionHistoryLimit: 10
    selector:
      run: hello-openshift
    strategy:
      activeDeadlineSeconds: 21600
      recreateParams:
        timeoutSeconds: 600
      resources: {}
      type: Recreate
    template:
      metadata:
        creationTimestamp: null
        labels:
          run: hello-openshift
      spec:
        containers:
        - image: aosqe/hello-openshift
          imagePullPolicy: IfNotPresent
          name: hello-openshift
          ports:
          - containerPort: 8080
            protocol: TCP
          resources: {}
          terminationMessagePath: /dev/termination-log
          terminationMessagePolicy: File
          volumeMounts:
          - mountPath: /mnt/azure
            name: azure
        dnsPolicy: ClusterFirst
        restartPolicy: Always
        schedulerName: default-scheduler
        securityContext: {}
        terminationGracePeriodSeconds: 30
        volumes:
        - name: azure
          persistentVolumeClaim:
            claimName: azpvc
    test: false
    triggers:
    - type: ConfigChange
  status:
    availableReplicas: 0
    conditions:
    - lastTransitionTime: 2018-12-03T09:35:38Z
      lastUpdateTime: 2018-12-03T09:35:38Z
      message: replication controller "vsphere-1" successfully rolled out
      reason: NewReplicationControllerAvailable
      status: "True"
      type: Progressing
    - lastTransitionTime: 2018-12-03T09:48:09Z
      lastUpdateTime: 2018-12-03T09:48:09Z
      message: Deployment config does not have minimum availability.
      status: "False"
      type: Available
    details:
      causes:
      - type: ConfigChange
      message: config change
    latestVersion: 1
    observedGeneration: 1
    replicas: 1
    unavailableReplicas: 1
    updatedReplicas: 1
kind: List
metadata:
  resourceVersion: ""
  selfLink: ""
2.Waiting for pod is running
NAME              READY     STATUS    RESTARTS   AGE       IP            NODE                         NOMINATED NODE
vsphere-1-4lgdp   1/1       Running   0          1m        10.129.0.19   ocp310.node1.vsphere.local   <none>

3. Power off the instance ocp310.node1.vsphere.local 
[root@ocp310 ~]# oc get nodes
NAME                          STATUS     ROLES            AGE       VERSION
ocp310.master.vsphere.local   Ready      compute,master   52d       v1.11.0+d4cacc0
ocp310.node1.vsphere.local    NotReady   compute          52d       v1.11.0+d4cacc0

4. Delete the pod 
[root@ocp310 ~]# oc delete pod vsphere-1-4kwj7 --force=true --grace-period=0
5.[root@ocp310 ~]# oc get pods -o wide
NAME              READY     STATUS    RESTARTS   AGE       IP            NODE                          NOMINATED NODE
vsphere-1-t2lhp   1/1       Running   0          8m        10.128.0.95   ocp310.master.vsphere.local   <none>

Comment 7 errata-xmlrpc 2018-12-12 14:15:51 UTC
Since the problem described in this bug report should be
resolved in a recent advisory, it has been closed with a
resolution of ERRATA.

For information on the advisory, and where to find the updated
files, follow the link below.

If the solution does not work for you, open a new bug report.

https://access.redhat.com/errata/RHBA-2018:3743


Note You need to log in before you can comment on or make changes to this bug.