Description of problem: Emptydir volume is not limited because of gquota does not enable Version-Release number of selected component (if applicable): dev-preview-int How reproducible: Always Steps to Reproduce: 1.Create a pod using emptydir volume oc get pods t1 -o yaml apiVersion: v1 kind: Pod metadata: annotations: kubernetes.io/limit-ranger: 'LimitRanger plugin set: cpu, memory request for container hello-openshift; cpu, memory limit for container hello-openshift' openshift.io/scc: restricted creationTimestamp: 2016-03-18T05:47:43Z labels: name: hello-openshift name: t1 namespace: chao resourceVersion: "1964161" selfLink: /api/v1/namespaces/chao/pods/t1 uid: ef5fb9ee-eccc-11e5-b683-0aa949b0be09 spec: containers: - image: aosqe/hello-openshift imagePullPolicy: IfNotPresent name: hello-openshift ports: - containerPort: 8080 protocol: TCP resources: limits: cpu: "1" memory: 512Mi requests: cpu: 60m memory: 307Mi securityContext: capabilities: drop: - KILL - MKNOD - SETGID - SETUID - SYS_CHROOT privileged: false runAsUser: 1005860000 seLinuxOptions: level: s0:c77,c4 terminationMessagePath: /dev/termination-log volumeMounts: - mountPath: /tmp name: tmp - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: default-token-2t4kw readOnly: true dnsPolicy: ClusterFirst host: ip-172-31-15-139.ec2.internal imagePullSecrets: - name: default-dockercfg-r4z6e nodeName: ip-172-31-15-139.ec2.internal nodeSelector: type: compute restartPolicy: Always securityContext: fsGroup: 1005860000 seLinuxOptions: level: s0:c77,c4 serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 volumes: - emptyDir: {} name: tmp - name: default-token-2t4kw secret: secretName: default-token-2t4kw status: conditions: - lastProbeTime: null lastTransitionTime: 2016-03-18T05:47:46Z status: "True" type: Ready containerStatuses: - containerID: docker://d917a4a9c71457ba045aac4bdaf3f1b4b6b6e17e5ecf8e4517da0c7d1b250c57 image: aosqe/hello-openshift imageID: docker://05cb6f5c32bbb698cc4506619cc5483af99e69f11bfc0d1beaebc22e27c6c85f lastState: {} name: hello-openshift ready: true restartCount: 0 state: running: startedAt: 2016-03-18T05:47:46Z hostIP: 172.31.15.139 phase: Running podIP: 10.1.0.29 startTime: 2016-03-18T05:47:43Z 2. Check the volume size on the node ot@dev-preview-int-node-compute-74304 ~]# xfs_quota -x -c 'report -n -L 1005860000 -U 1005870000' /var/lib/origin/openshift.local.volumes xfs_quota: cannot setup path for mount /var/lib/origin/openshift.local.volumes: No such device or addres Actual results: Emptydir volume does not be limited Expected results: The command in step2 should be work, size should be listed Additional info: Emptydir volume size limit requires that the openshift node volume directory resides on an XFS partition which has been mounted with gquota enabled [root@dev-preview-int-node-compute-74304 ~]# mount sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime,seclabel) proc on /proc type proc (rw,nosuid,nodev,noexec,relatime) devtmpfs on /dev type devtmpfs (rw,nosuid,seclabel,size=8113740k,nr_inodes=2028435,mode=755) securityfs on /sys/kernel/security type securityfs (rw,nosuid,nodev,noexec,relatime) tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev,seclabel) devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,seclabel,gid=5,mode=620,ptmxmode=000) tmpfs on /run type tmpfs (rw,nosuid,nodev,seclabel,mode=755) tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,seclabel,mode=755) cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd) pstore on /sys/fs/pstore type pstore (rw,nosuid,nodev,noexec,relatime) cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpuacct,cpu) cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer) cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory) cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb) cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices) cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio) cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event) cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset) cgroup on /sys/fs/cgroup/net_cls type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls) configfs on /sys/kernel/config type configfs (rw,relatime) /dev/xvda2 on / type xfs (rw,relatime,seclabel,attr2,inode64,noquota) selinuxfs on /sys/fs/selinux type selinuxfs (rw,relatime) debugfs on /sys/kernel/debug type debugfs (rw,relatime) hugetlbfs on /dev/hugepages type hugetlbfs (rw,relatime,seclabel) mqueue on /dev/mqueue type mqueue (rw,relatime,seclabel) nfsd on /proc/fs/nfsd type nfsd (rw,relatime) systemd-1 on /proc/sys/fs/binfmt_misc type autofs (rw,relatime,fd=32,pgrp=1,timeout=300,minproto=5,maxproto=5,direct) /dev/xvda3 on /var type xfs (rw,relatime,seclabel,attr2,inode64,noquota) sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw,relatime) tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,seclabel,size=1600528k,mode=700) tmpfs on /var/lib/origin/openshift.local.volumes/pods/130a41bc-eba6-11e5-b346-0aa949b0be09/volumes/kubernetes.io~secret/secret type tmpfs (rw,relatime,rootcontext="system_u:object_r:svirt_sandbox_file_t:s0:c0,c5",seclabel)
Chao I think the issue here is that the directory used in the xfs_report command must be the actual device mount point, or the device itself. If the volume is not mounted directly at the volumeDirectory location, the testing command will fail. An example: (root@wrx ~) $ mount /dev/mapper/test-openshift--dev--vol--dir on /home/dgoodwin/openshift/test type xfs (rw,relatime,seclabel,attr2,inode64,grpquota) (root@wrx ~) $ xfs_quota -x -c 'report -n -L 1005860000 -U 1005870000' /home/dgoodwin/openshift/test (root@wrx ~) $ xfs_quota -x -c 'report -n -L 1005860000 -U 1005870000' /dev/mapper/test-openshift--dev--vol--dir (root@wrx ~) $ xfs_quota -x -c 'report -n -L 1005860000 -U 1005870000' /home/dgoodwin/openshift/test/openshift.local.config xfs_quota: cannot setup path for mount /home/dgoodwin/openshift/test/openshift.local.config: No such device or address So your command above will only work if the filesystem device is mounted directly at /var/lib/origin/openshift.local.volumes. In this deployment that may not be the case. This should help: (1) Double check the volumeDirectory in node-config.yaml, they may not be using /var/lib/origin/openshift.local.volumes in this environment. (2) Find out what filesystem device this directory is on: df --output=source /home/dgoodwin/openshift/test $ df --output=source /home/dgoodwin/openshift/test Filesystem /dev/mapper/test-openshift--dev--vol--dir Now use this device in the xfs_report command instead of the directory.
Hi I think this should because of no grpquto device mounted to node. The ebs volume mounted to node as "noquota" So we need to change the config first on online env
Chao Ops indicates that the environment should be rebuild with correct mount options now.
1. Create a pod using emptydir apiVersion: v1 kind: Pod metadata: annotations: kubernetes.io/limit-ranger: 'LimitRanger plugin set: cpu, memory request for container hello-openshift; cpu, memory limit for container hello-openshift' openshift.io/scc: restricted creationTimestamp: 2016-03-25T01:29:37Z labels: name: hello-openshift name: t1 namespace: chao resourceVersion: "95882" selfLink: /api/v1/namespaces/chao/pods/t1 uid: 09d29f99-f229-11e5-9454-0af496bbcb03 spec: containers: - image: aosqe/hello-openshift imagePullPolicy: IfNotPresent name: hello-openshift ports: - containerPort: 8080 protocol: TCP resources: limits: cpu: "1" memory: 512Mi requests: cpu: 60m memory: 322122547200m securityContext: capabilities: drop: - KILL - MKNOD - SETGID - SETUID - SYS_CHROOT privileged: false runAsUser: 1000060000 seLinuxOptions: level: s0:c8,c2 terminationMessagePath: /dev/termination-log volumeMounts: - mountPath: /tmp name: tmp - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: default-token-sxmwa readOnly: true dnsPolicy: ClusterFirst host: ip-172-31-6-100.ec2.internal imagePullSecrets: - name: default-dockercfg-g4afv nodeName: ip-172-31-6-100.ec2.internal nodeSelector: type: compute restartPolicy: Always securityContext: fsGroup: 1000060000 seLinuxOptions: level: s0:c8,c2 serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 volumes: - emptyDir: {} name: tmp - name: default-token-sxmwa secret: secretName: default-token-sxmwa status: conditions: - lastProbeTime: null lastTransitionTime: 2016-03-25T01:29:44Z status: "True" type: Ready containerStatuses: - containerID: docker://5f18e2bff585df20bb3134a9b16ca0479d2bf1911ae68449b82ab3326c321211 image: aosqe/hello-openshift imageID: docker://cddcd4ab363acd31256ed7880d4b669fa45227e49eec41429f80a4f252dfb0da lastState: {} name: hello-openshift ready: true restartCount: 0 state: running: startedAt: 2016-03-25T01:29:43Z hostIP: 172.31.6.100 phase: Running podIP: 10.1.0.3 startTime: 2016-03-25T01:29:37Z 2. [root@dev-preview-int-node-compute-4f437 ~]# xfs_quota -x -c 'report -n -L 1000060000 -U 1000070000' /var Group quota on /var (/dev/xvda3) Blocks Group ID Used Soft Hard Warn/Grace ---------- -------------------------------------------------- #1000060000 0 262144 262144 00 [--------] [root@dev-preview-int-node-compute-4f437 ~]# So online server has the grpquota