This service will be undergoing maintenance at 00:00 UTC, 2017-10-23 It is expected to last about 30 minutes
Bug 1480658 - ceph: OSP12 using ceph-ansible broke the storage nodes setup: Running command: /usr/sbin/mkfs -t xfs -f -i size=2048 -f -- /dev/vdb1", "/dev/vdb1: No such file or directory [NEEDINFO]
ceph: OSP12 using ceph-ansible broke the storage nodes setup: Running command...
Status: POST
Product: Red Hat Ceph Storage
Classification: Red Hat
Component: Ceph-Disk (Show other bugs)
2.4
Unspecified Unspecified
unspecified Severity unspecified
: rc
: 2.5
Assigned To: Loic Dachary
Warren
:
Depends On:
Blocks: 1484447
  Show dependency treegraph
 
Reported: 2017-08-11 11:26 EDT by Alexander Chuzhoy
Modified: 2017-10-04 15:47 EDT (History)
11 users (show)

See Also:
Fixed In Version:
Doc Type: If docs needed, set a value
Doc Text:
Story Points: ---
Clone Of:
Environment:
Last Closed:
Type: Bug
Regression: ---
Mount Type: ---
Documentation: ---
CRM:
Verified Versions:
Category: ---
oVirt Team: ---
RHEL 7.3 requirements from Atomic Host:
Cloudforms Team: ---
johfulto: needinfo? (wusui)


Attachments (Terms of Use)


External Trackers
Tracker ID Priority Status Summary Last Updated
Ceph Project Bug Tracker 19428 None None None 2017-08-11 14:03 EDT

  None (edit)
Description Alexander Chuzhoy 2017-08-11 11:26:52 EDT
ceph: OSP12 using ceph-ansible broke the storage nodes setup: Running command: /usr/sbin/mkfs -t xfs -f -i size=2048 -f -- /dev/vdb1", "/dev/vdb1: No such file or directory


Environment:
python-cephfs-10.2.7-28.el7cp.x86_64
ceph-mon-10.2.7-28.el7cp.x86_64
ceph-mds-10.2.7-28.el7cp.x86_64
libcephfs1-10.2.7-28.el7cp.x86_64
puppet-ceph-2.3.1-0.20170805094345.868e6d6.el7ost.noarch
ceph-common-10.2.7-28.el7cp.x86_64
ceph-osd-10.2.7-28.el7cp.x86_64
ceph-selinux-10.2.7-28.el7cp.x86_64
ceph-base-10.2.7-28.el7cp.x86_64
ceph-radosgw-10.2.7-28.el7cp.x86_64
openstack-tripleo-heat-templates-7.0.0-0.20170805163045.el7ost.noarch
instack-undercloud-7.2.1-0.20170729010705.el7ost.noarch
openstack-puppet-modules-10.0.0-0.20170315222135.0333c73.el7.1.noarch

Steps to reproduce:
Try to deploy OC with ceph nodes.

Result:
Deployment fails.

f0d55f77-09a2-44c9-92d6-7012744167d5
"Failed to run action [action_ex_id=8caa3119-83df-42ff-848f-9f666127a562, action_cls='<class 'mistral.actions.action_factory.AnsiblePlaybookAction'>', attributes='{}', params='{u'remote_user': u'tripleo-admin', u'ssh_extra_args': u'-o UserKnownHostsFile=/dev/null', u'inventory': {u'mdss': {u'hosts': {}}, u'rgws': {u'hosts': {}}, u'mons': {u'hosts': {u'192.168.24.8': {}, u'192.168.24.13': {}, u'192.168.24.14': {}}}, u'clients': {u'hosts': {u'192.168.24.6': {}, u'192.168.24.16': {}}}, u'osds': {u'hosts': {u'192.168.24.11': {}, u'192.168.24.20': {}, u'192.168.24.10': {}}}}, u'become_user': u'root', u'extra_vars': {u'monitor_secret': u'AQDAxIxZAAAAABAADX6y419VkBUeTlP2cvQKLw==', u'ceph_conf_overrides': {u'global': {u'osd_pool_default_pg_num': 32, u'osd_pool_default_size': 3}}, u'fetch_directory': u'/tmp/file-mistral-actionh8nqFY', u'user_config': True, u'ceph_docker_image_tag': u'latest', u'containerized_deployment': True, u'public_network': u'172.17.3.0/24', u'generate_fsid': False, u'monitor_address_block': u'172.17.3.0/24', u'monitor_interface': u'br_ex', u'admin_secret': u'AQDAxIxZAAAAABAA4Xv7qlxhIIg0YANzrPfeWA==', u'keys': [{u'mon_cap': u'allow r', u'osd_cap': u'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=backups, allow rwx pool=vms, allow rwx pool=images, allow rwx pool=metrics', u'name': u'client.openstack', u'key': u'AQDAxIxZAAAAABAAvfk0xx/BVtEgaOKbB4chjQ==', u'mode': u'0644'}], u'openstack_keys': [{u'mon_cap': u'allow r', u'osd_cap': u'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=backups, allow rwx pool=vms, allow rwx pool=images, allow rwx pool=metrics', u'name': u'client.openstack', u'key': u'AQDAxIxZAAAAABAAvfk0xx/BVtEgaOKbB4chjQ==', u'mode': u'0644'}], u'journal_collocation': True, u'ntp_service_enabled': False, u'ceph_docker_image': u'rhceph-2-rhel7', u'docker': True, u'fsid': u'279922ae-7e0c-11e7-bf80-525400127838', u'journal_size': 512, u'openstack_config': True, u'ceph_docker_registry': u'192.168.24.1:8787/ceph', u'ceph_stable': True, u'devices': [u'/dev/vdb'], u'ceph_origin': u'distro', u'openstack_pools': [{u'rule_name': u'', u'pg_num': 32, u'name': u'volumes'}, {u'rule_name': u'', u'pg_num': 32, u'name': u'backups'}, {u'rule_name': u'', u'pg_num': 32, u'name': u'vms'}, {u'rule_name': u'', u'pg_num': 32, u'name': u'images'}, {u'rule_name': u'', u'pg_num': 32, u'name': u'metrics'}], u'pools': [], u'cluster_network': u'172.17.4.0/24', u'ip_version': u'ipv4'}, u'verbosity': 0, u'extra_env_variables': {u'ANSIBLE_ACTION_PLUGINS': u'/usr/share/ceph-ansible/plugins/actions/', u'ANSIBLE_LIBRARY': u'/usr/share/ceph-ansible/library', u'ANSIBLE_CONFIG': u'/usr/share/ceph-ansible/ansible.cfg'}, u'skip_tags': u'package-install,with_pkg', u'ssh_private_key': u'-----BEGIN RSA PRIVATE KEY-----\
MIIEpAIBAAKCAQEA1ZcKa9D25pwxQSi42DQ3Akzv+14eRqIm3IRn/T9NSBFc5Mim\
n6jn5Wv9q69HRqMTV29WgymE53IjSzDTUkNh2Rp6tLH7AiLIB5lgZ/TiMp0Z/Ywx\
H0eAMQAmjP51y09hjH8GrDBpF4pGyzdxr+qyZ6s10I71zkJO7Ll4GfRuLBPBEcP6\
ZOw5qdUexnZdAV/ZWkTKO0GEAsYFGeS/P7MVqTtOi+v9b9DETdv9JfEl9ZGub8cX\
pzAcA3dqpEu0S99AHtd6scJfhkd/CjU4SnPEi096MepTwkN8YwJL3BBDA/WlFUjt\
B3U2dfOqfgEQbJNbDh0Mpty5LxLXSLLYWVgJoQIDAQABAoIBADLj9QCxXlKd/oJW\
8IvbDQxIRpmu2CneMsIx7hECByjXsjl0sXazKOWZ8Gv6rFHhQxd/GK7Rsq/umwgs\
Tv13vpWa4IezLGpa3SwqWDUMvORwWCP4Ri/8ukdykLzhWDGj7UsrbOyT8EZClRGQ\
WU8f7XgT7BOQLHMDi4iNvFb6zVFEyCPaE2djXIVAfPnMlT2zOc2U7vZLydjrob93\
t5LHEhIvpMeF+9fjTyTURk8aDT1mA1Jk0NvrLSSs/kfVMxAPgKltgq+oJ7LmHyjF\
col4Irf9/dDom76Pm/MZX1rPj30xQWpVO/hEr9Cjw8aYZlQqdlYiFyhq237miKmh\
gYtfF20CgYEA81xiEA3RGWVCVPDhxRlSizddQRzlbGztLrqY3pdw4sHfF6fJic2x\
RwYr0+BxiC8CTBapv3aD4p5m/9zbH3T3uUtsXppzT2g+qb3I70H3lW6je0nvMXw6\
APAT8GAjuPuKopKg7Dy5zP/Lx/Hopiv2DcXIPYiYEfsb1Ofj6oHaVpsCgYEA4K7W\
aRQW0NfHhpiEqEN1wz7P6yuYHCoHa2m3Ni3hKY3/nm+fb/JWuM5g4AwpOma2uBnc\
x2HGD5dcNaW5jhSnNzLuDPj7TDPGy5QOlfDqSU7s/JRtfbxVeo1SKNrZd6zTf5MG\
Edmwgh0PAfg20/zTn//YH8mGWOgoANFnJiiqhnMCgYEAluJLZnq6jw1KEBH9WM+3\
bttXtTwuPohWkeddSn0In37mS+tauFbQuCFqYQZCPdYvjW9rtWrMoF/nqnGLZjiG\
MttBiHIoVi5i06j4gOcckwiy0YjohXzpFZXENv/UhidnMB7LKU0JQcHC3eftf8G3\
35gl64jaCCPxgJJ5q/bwoUsCgYAGgcm/3U8T2Uvz//LHlLySXP1UiDythCIW4Ebo\
EMgWlgAzLHRnn5MXYxK3KmL/R/4+srEMLS65QN7wxs3q6Xo2RQlT4toUSOOW/cC2\
YvXGDb0bS4koPQ4UsIcSm9P0fVLavP1OAwnzmrfWjMO5u/a9CYHO2d1EMeUGsQnV\
txPJNQKBgQCEo2U5funOfg2NAuZ8+ePMt09vmB/Nw4rEpKarH/sPQzjJOXoh09PM\
SHzGo+YWeDf2ly+Id0TrHONXN2MLo/0LtD1Murt/mW/0TYNMy2OBms6firhnXLgQ\
gFCjSoa385BtoqZuE10P5emhGTSuLcWBuOHbajkQNvPwSIKsNDfNow==\
-----END RSA PRIVATE KEY-----\
', u'become': True, u'forks': 8, u'ssh_common_args': u'-o StrictHostKeyChecking=no', u'playbook': u'/usr/share/ceph-ansible/site-docker.yml.sample'}']
 Unexpected error while running command.
Command: ansible-playbook /usr/share/ceph-ansible/site-docker.yml.sample --user tripleo-admin --become --become-user root --extra-vars {"monitor_secret": "***", "ceph_conf_overrides": {"global": {"osd_pool_default_pg_num": 32, "osd_pool_default_size": 3}}, "fetch_directory": "/tmp/file-mistral-actionh8nqFY", "user_config": true, "ceph_docker_image_tag": "latest", "containerized_deployment": true, "public_network": "172.17.3.0/24", "generate_fsid": false, "monitor_address_block": "172.17.3.0/24", "monitor_interface": "br_ex", "admin_secret": "***", "keys": [{"mon_cap": "allow r", "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=backups, allow rwx pool=vms, allow rwx pool=images, allow rwx pool=metrics", "name": "client.openstack", "key": "AQDAxIxZAAAAABAAvfk0xx/BVtEgaOKbB4chjQ==", "mode": "0644"}], "openstack_keys": [{"mon_cap": "allow r", "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=backups, allow rwx pool=vms, allow rwx pool=images, allow rwx pool=metrics", "name": "client.openstack", "key": "AQDAxIxZAAAAABAAvfk0xx/BVtEgaOKbB4chjQ==", "mode": "0644"}], "journal_collocation": true, "ntp_service_enabled": false, "ceph_docker_image": "rhceph-2-rhel7", "docker": true, "fsid": "279922ae-7e0c-11e7-bf80-525400127838", "journal_size": 512, "openstack_config": true, "ceph_docker_registry": "192.168.24.1:8787/ceph", "ceph_stable": true, "devices": ["/dev/vdb"], "ceph_origin": "distro", "openstack_pools": [{"rule_name": "", "pg_num": 32, "name": "volumes"}, {"rule_name": "", "pg_num": 32, "name": "backups"}, {"rule_name": "", "pg_num": 32, "name": "vms"}, {"rule_name": "", "pg_num": 32, "name": "images"}, {"rule_name": "", "pg_num": 32, "name": "metrics"}], "pools": [], "cluster_network": "172.17.4.0/24", "ip_version": "ipv4"} --forks 8 --ssh-common-args "-o StrictHostKeyChecking=no" --ssh-extra-args "-o UserKnownHostsFile=/dev/null" --inventory-file /tmp/ansible-mistral-actionSmVs7s/inventory.yaml --private-key /tmp/ansible-mistral-actionSmVs7s/ssh_private_key --skip-tags package-install,with_pkg
Exit code: 2
Stdout: u'\
Comment 1 Alexander Chuzhoy 2017-08-11 11:31:30 EDT
TASK [ceph-osd : prepare ceph osd disk] ****************************************\
failed: [192.168.24.20] (item=[u\\'/dev/vdb\\', {\\'_ansible_parsed\\': True, \\'stderr_lines\\': [], \\'_ansible_item_result\\': True, u\\'end\\': u\\'2017-08-10 21:34:32.563220\\', \\'_ansible_no_log\\': False, u\\'stdout\\': u\\'\\', u\\'cmd\\': u"lsblk -o PARTLABEL /dev/vdb | grep -sq \\'ceph\\'", u\\'rc\\': 1, \\'item\\': u\\'/dev/vdb\\', u\\'delta\\': u\\'0:00:00.005968\\', u\\'stderr\\': u\\'\\', u\\'changed\\': True, u\\'invocation\\': {u\\'module_args\\': {u\\'warn\\': True, u\\'executable\\': None, u\\'_uses_shell\\': True, u\\'_raw_params\\': u"lsblk -o PARTLABEL /dev/vdb | grep -sq \\'ceph\\'", u\\'removes\\': None, u\\'creates\\': None, u\\'chdir\\': None}}, \\'stdout_lines\\': [], \\'failed_when_result\\': False, u\\'start\\': u\\'2017-08-10 21:34:32.557252\\', \\'failed\\': False}]) => {"changed": true, "cmd": "docker run --net=host --pid=host --privileged=true --name=\\\\"ceph-osd-prepare-overcloud-cephstorage-1-devdevvdb\\\\" -v /etc/ceph:/etc/ceph -v /var/lib/ceph/:/var/lib/ceph/ -v /dev:/dev -v /etc/localtime:/etc/localtime:ro -e \\\\"OSD_DEVICE=/dev/vdb\\\\" -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE -e CLUSTER=ceph -e OSD_JOURNAL_SIZE=512 -e OSD_FORCE_ZAP=1 \\\\"192.168.24.1:8787/ceph/rhceph-2-rhel7:latest\\\\"", "delta": "0:00:05.897068", "end": "2017-08-10 21:34:38.837396", "failed": true, "item": ["/dev/vdb", {"_ansible_item_result": true, "_ansible_no_log": false, "_ansible_parsed": true, "changed": true, "cmd": "lsblk -o PARTLABEL /dev/vdb | grep -sq \\'ceph\\'", "delta": "0:00:00.005968", "end": "2017-08-10 21:34:32.563220", "failed": false, "failed_when_result": false, "invocation": {"module_args": {"_raw_params": "lsblk -o PARTLABEL /dev/vdb | grep -sq \\'ceph\\'", "_uses_shell": true, "chdir": null, "creates": null, "executable": null, "removes": null, "warn": true}}, "item": "/dev/vdb", "rc": 1, "start": "2017-08-10 21:34:32.557252", "stderr": "", "stderr_lines": [], "stdout": "", "stdout_lines": []}], "rc": 1, "start": "2017-08-10 21:34:32.940328", "stderr": "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
zap: Zapping partition table on /dev/vdb\\\
command_check_call: Running command: /usr/sbin/sgdisk --zap-all -- /dev/vdb\\\
command_check_call: Running command: /usr/sbin/sgdisk --clear --mbrtogpt -- /dev/vdb\\\
update_partition: Calling partprobe on zapped device /dev/vdb\\\
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600\\\
command: Running command: /usr/bin/flock -s /dev/vdb /usr/sbin/partprobe /dev/vdb\\\
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600\\\
command: Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=fsid\\\
command: Running command: /usr/bin/ceph-osd --check-allows-journal -i 0 --log-file $run_dir/$cluster-osd-check.log --cluster ceph --setuser ceph --setgroup ceph\\\
command: Running command: /usr/bin/ceph-osd --check-wants-journal -i 0 --log-file $run_dir/$cluster-osd-check.log --cluster ceph --setuser ceph --setgroup ceph\\\
command: Running command: /usr/bin/ceph-osd --check-needs-journal -i 0 --log-file $run_dir/$cluster-osd-check.log --cluster ceph --setuser ceph --setgroup ceph\\\
get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
set_type: Will colocate journal with data on /dev/vdb\\\
command: Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=osd_journal_size\\\
get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_type\\\
command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_options_xfs\\\
command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs\\\
get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
ptype_tobe_for_name: name = journal\\\
get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
create_partition: Creating journal partition num 2 size 512 on /dev/vdb\\\
command_check_call: Running command: /usr/sbin/sgdisk --new=2:0:+512M --change-name=2:ceph journal --partition-guid=2:cd623927-69db-4fc4-b49b-5b3cace42b63 --typecode=2:45b0969e-9b03-4f30-b4c6-b4b80ceff106 --mbrtogpt -- /dev/vdb\\\
update_partition: Calling partprobe on created device /dev/vdb\\\
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600\\\
command: Running command: /usr/bin/flock -s /dev/vdb /usr/sbin/partprobe /dev/vdb\\\
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600\\\
get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
get_dm_uuid: get_dm_uuid /dev/vdb2 uuid path is /sys/dev/block/252:18/dm/uuid\\\
prepare_device: Journal is GPT partition /dev/disk/by-partuuid/cd623927-69db-4fc4-b49b-5b3cace42b63\\\
prepare_device: Journal is GPT partition /dev/disk/by-partuuid/cd623927-69db-4fc4-b49b-5b3cace42b63\\\
get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
set_data_partition: Creating osd partition on /dev/vdb\\\
get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
ptype_tobe_for_name: name = data\\\
get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
create_partition: Creating data partition num 1 size 0 on /dev/vdb\\\
command_check_call: Running command: /usr/sbin/sgdisk --largest-new=1 --change-name=1:ceph data --partition-guid=1:84c3e31e-033b-484d-a2b0-2c826f7d16c3 --typecode=1:89c57f98-2fe5-4dc0-89c1-f3ad0ceff2be --mbrtogpt -- /dev/vdb\\\
update_partition: Calling partprobe on created device /dev/vdb\\\
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600\\\
command: Running command: /usr/bin/flock -s /dev/vdb /usr/sbin/partprobe /dev/vdb\\\
command_check_call: Running command: /usr/bin/udevadm settle --timeout=600\\\
get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid\\\
get_dm_uuid: get_dm_uuid /dev/vdb1 uuid path is /sys/dev/block/252:17/dm/uuid\\\
populate_data_path_device: Creating xfs fs on /dev/vdb1\\\
command_check_call: Running command: /usr/sbin/mkfs -t xfs -f -i size=2048 -f -- /dev/vdb1\\\
/dev/vdb1: No such file or directory\\\
Usage: mkfs.xfs\\\
/* blocksize */\\\\t\\\\t[-b log=n|size=num]\\\
/* metadata */\\\\t\\\\t[-m crc=0|1,finobt=0|1,uuid=xxx]\\\
/* data subvol */\\\\t[-d agcount=n,agsize=n,file,name=xxx,size=num,\\\
\\\\t\\\\t\\\\t    (sunit=value,swidth=value|su=num,sw=num|noalign),\\\
\\\\t\\\\t\\\\t    sectlog=n|sectsize=num\\\
/* force overwrite */\\\\t[-f]\\\
/* inode size */\\\\t[-i log=n|perblock=n|size=num,maxpct=n,attr=0|1|2,\\\
\\\\t\\\\t\\\\t    projid32bit=0|1]\\\
/* no discard */\\\\t[-K]\\\
/* log subvol */\\\\t[-l agnum=n,internal,size=num,logdev=xxx,version=n\\\
\\\\t\\\\t\\\\t    sunit=value|su=num,sectlog=n|sectsize=num,\\\
\\\\t\\\\t\\\\t    lazy-count=0|1]\\\
/* label */\\\\t\\\\t[-L label (maximum 12 characters)]\\\
/* naming */\\\\t\\\\t[-n log=n|size=num,version=2|ci,ftype=0|1]\\\
/* no-op info only */\\\\t[-N]\\\
/* prototype file */\\\\t[-p fname]\\\
/* quiet */\\\\t\\\\t[-q]\\\
/* realtime subvol */\\\\t[-r extsize=num,size=num,rtdev=xxx]\\\
/* sectorsize */\\\\t[-s log=n|size=num]\\\
/* version */\\\\t\\\\t[-V]\\\
\\\\t\\\\t\\\\tdevicename\\\
<devicename> is required unless -d name=xxx is given.\\\
<num> is xxx (bytes), xxxs (sectors), xxxb (fs blocks), xxxk (xxx KiB),\\\
      xxxm (xxx MiB), xxxg (xxx GiB), xxxt (xxx TiB) or xxxp (xxx PiB).\\\
<value> is xxx (512 byte blocks).\\\
Traceback (most recent call last):\\\
  File \\\\"/usr/sbin/ceph-disk\\\\", line 9, in <module>\\\
    load_entry_point(\\'ceph-disk==1.0.0\\', \\'console_scripts\\', \\'ceph-disk\\')()\\\
  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 5326, in run\\\
    main(sys.argv[1:])\\\
  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 5277, in main\\\
    args.func(args)\\\
  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 1879, in main\\\
    Prepare.factory(args).prepare()\\\
  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 1868, in prepare\\\
    self.prepare_locked()\\\
  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 1899, in prepare_locked\\\
    self.data.prepare(self.journal)\\\
  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 2566, in prepare\\\
    self.prepare_device(*to_prepare_list)\\\
  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 2730, in prepare_device\\\
    self.populate_data_path_device(*to_prepare_list)\\\
  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 2685, in populate_data_path_device\\\
    raise Error(e)\\\
ceph_disk.main.Error: Error: Command \\'[\\'/usr/sbin/mkfs\\', \\'-t\\', u\\'xfs\\', u\\'-f\\', u\\'-i\\', u\\'size=2048\\', \\'-f\\', \\'--\\', \\'/dev/vdb1\\']\\' returned non-zero exit status 1", "stderr_lines": ["get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "zap: Zapping partition table on /dev/vdb", "command_check_call: Running command: /usr/sbin/sgdisk --zap-all -- /dev/vdb", "command_check_call: Running command: /usr/sbin/sgdisk --clear --mbrtogpt -- /dev/vdb", "update_partition: Calling partprobe on zapped device /dev/vdb", "command_check_call: Running command: /usr/bin/udevadm settle --timeout=600", "command: Running command: /usr/bin/flock -s /dev/vdb /usr/sbin/partprobe /dev/vdb", "command_check_call: Running command: /usr/bin/udevadm settle --timeout=600", "command: Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=fsid", "command: Running command: /usr/bin/ceph-osd --check-allows-journal -i 0 --log-file $run_dir/$cluster-osd-check.log --cluster ceph --setuser ceph --setgroup ceph", "command: Running command: /usr/bin/ceph-osd --check-wants-journal -i 0 --log-file $run_dir/$cluster-osd-check.log --cluster ceph --setuser ceph --setgroup ceph", "command: Running command: /usr/bin/ceph-osd --check-needs-journal -i 0 --log-file $run_dir/$cluster-osd-check.log --cluster ceph --setuser ceph --setgroup ceph", "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "set_type: Will colocate journal with data on /dev/vdb", "command: Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=osd_journal_size", "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_type", "command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_options_xfs", "command: Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs", "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "ptype_tobe_for_name: name = journal", "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "create_partition: Creating journal partition num 2 size 512 on /dev/vdb", "command_check_call: Running command: /usr/sbin/sgdisk --new=2:0:+512M --change-name=2:ceph journal --partition-guid=2:cd623927-69db-4fc4-b49b-5b3cace42b63 --typecode=2:45b0969e-9b03-4f30-b4c6-b4b80ceff106 --mbrtogpt -- /dev/vdb", "update_partition: Calling partprobe on created device /dev/vdb", "command_check_call: Running command: /usr/bin/udevadm settle --timeout=600", "command: Running command: /usr/bin/flock -s /dev/vdb /usr/sbin/partprobe /dev/vdb", "command_check_call: Running command: /usr/bin/udevadm settle --timeout=600", "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "get_dm_uuid: get_dm_uuid /dev/vdb2 uuid path is /sys/dev/block/252:18/dm/uuid", "prepare_device: Journal is GPT partition /dev/disk/by-partuuid/cd623927-69db-4fc4-b49b-5b3cace42b63", "prepare_device: Journal is GPT partition /dev/disk/by-partuuid/cd623927-69db-4fc4-b49b-5b3cace42b63", "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "set_data_partition: Creating osd partition on /dev/vdb", "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "ptype_tobe_for_name: name = data", "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "create_partition: Creating data partition num 1 size 0 on /dev/vdb", "command_check_call: Running command: /usr/sbin/sgdisk --largest-new=1 --change-name=1:ceph data --partition-guid=1:84c3e31e-033b-484d-a2b0-2c826f7d16c3 --typecode=1:89c57f98-2fe5-4dc0-89c1-f3ad0ceff2be --mbrtogpt -- /dev/vdb", "update_partition: Calling partprobe on created device /dev/vdb", "command_check_call: Running command: /usr/bin/udevadm settle --timeout=600", "command: Running command: /usr/bin/flock -s /dev/vdb /usr/sbin/partprobe /dev/vdb", "command_check_call: Running command: /usr/bin/udevadm settle --timeout=600", "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "get_dm_uuid: get_dm_uuid /dev/vdb uuid path is /sys/dev/block/252:16/dm/uuid", "get_dm_uuid: get_dm_uuid /dev/vdb1 uuid path is /sys/dev/block/252:17/dm/uuid", "populate_data_path_device: Creating xfs fs on /dev/vdb1", "command_check_call: Running command: /usr/sbin/mkfs -t xfs -f -i size=2048 -f -- /dev/vdb1", "/dev/vdb1: No such file or directory", "Usage: mkfs.xfs", "/* blocksize */\\\\t\\\\t[-b log=n|size=num]", "/* metadata */\\\\t\\\\t[-m crc=0|1,finobt=0|1,uuid=xxx]", "/* data subvol */\\\\t[-d agcount=n,agsize=n,file,name=xxx,size=num,", "\\\\t\\\\t\\\\t    (sunit=value,swidth=value|su=num,sw=num|noalign),", "\\\\t\\\\t\\\\t    sectlog=n|sectsize=num", "/* force overwrite */\\\\t[-f]", "/* inode size */\\\\t[-i log=n|perblock=n|size=num,maxpct=n,attr=0|1|2,", "\\\\t\\\\t\\\\t    projid32bit=0|1]", "/* no discard */\\\\t[-K]", "/* log subvol */\\\\t[-l agnum=n,internal,size=num,logdev=xxx,version=n", "\\\\t\\\\t\\\\t    sunit=value|su=num,sectlog=n|sectsize=num,", "\\\\t\\\\t\\\\t    lazy-count=0|1]", "/* label */\\\\t\\\\t[-L label (maximum 12 characters)]", "/* naming */\\\\t\\\\t[-n log=n|size=num,version=2|ci,ftype=0|1]", "/* no-op info only */\\\\t[-N]", "/* prototype file */\\\\t[-p fname]", "/* quiet */\\\\t\\\\t[-q]", "/* realtime subvol */\\\\t[-r extsize=num,size=num,rtdev=xxx]", "/* sectorsize */\\\\t[-s log=n|size=num]", "/* version */\\\\t\\\\t[-V]", "\\\\t\\\\t\\\\tdevicename", "<devicename> is required unless -d name=xxx is given.", "<num> is xxx (bytes), xxxs (sectors), xxxb (fs blocks), xxxk (xxx KiB),", "      xxxm (xxx MiB), xxxg (xxx GiB), xxxt (xxx TiB) or xxxp (xxx PiB).", "<value> is xxx (512 byte blocks).", "Traceback (most recent call last):", "  File \\\\"/usr/sbin/ceph-disk\\\\", line 9, in <module>", "    load_entry_point(\\'ceph-disk==1.0.0\\', \\'console_scripts\\', \\'ceph-disk\\')()", "  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 5326, in run", "    main(sys.argv[1:])", "  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 5277, in main", "    args.func(args)", "  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 1879, in main", "    Prepare.factory(args).prepare()", "  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 1868, in prepare", "    self.prepare_locked()", "  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 1899, in prepare_locked", "    self.data.prepare(self.journal)", "  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 2566, in prepare", "    self.prepare_device(*to_prepare_list)", "  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 2730, in prepare_device", "    self.populate_data_path_device(*to_prepare_list)", "  File \\\\"/usr/lib/python2.7/site-packages/ceph_disk/main.py\\\\", line 2685, in populate_data_path_device", "    raise Error(e)", "ceph_disk.main.Error: Error: Command \\'[\\'/usr/sbin/mkfs\\', \\'-t\\', u\\'xfs\\', u\\'-f\\', u\\'-i\\', u\\'size=2048\\', \\'-f\\', \\'--\\', \\'/dev/vdb1\\']\\' returned non-zero exit status 1"], "stdout": "2017-08-10 21:34:33  /entrypoint.sh: static: does not generate config\\\
HEALTH_ERR no osds\\\
2017-08-10 21:34:33  /entrypoint.sh: It looks like /dev/vdb isn\\'t consistent, however OSD_FORCE_ZAP is enabled so we are zapping the device anyway\\\
Creating new GPT entries.\\\
GPT data structures destroyed! You may now partition the disk using fdisk or\\\
other utilities.\\\
Creating new GPT entries.\\\
The operation has completed successfully.\\\
The operation has completed successfully.\\\
The operation has completed successfully.", "stdout_lines": ["2017-08-10 21:34:33  /entrypoint.sh: static: does not generate config", "HEALTH_ERR no osds", "2017-08-10 21:34:33  /entrypoint.sh: It looks like /dev/vdb isn\\'t consistent, however OSD_FORCE_ZAP is enabled so we are zapping the device anyway", "Creating new GPT entries.", "GPT data structures destroyed! You may now partition the disk using fdisk or", "other utilities.", "Creating new GPT entries.", "The operation has completed successfully.", "The operation has completed successfully.", "The operation has completed successfully."]}\
changed: [192.168.24.11] => (item=[u\\'/dev/vdb\\', {\\'_ansible_parsed\\': True, \\'stderr_lines\\': [], \\'_ansible_item_result\\': True, u\\'end\\': u\\'2017-08-10 21:34:32.543649\\', \\'_ansible_no_log\\': False, u\\'stdout\\': u\\'\\', u\\'cmd\\': u"lsblk -o PARTLABEL /dev/vdb | grep -sq \\'ceph\\'", u\\'rc\\': 1, \\'item\\': u\\'/dev/vdb\\', u\\'delta\\': u\\'0:00:00.005656\\', u\\'stderr\\': u\\'\\', u\\'changed\\': True, u\\'invocation\\': {u\\'module_args\\': {u\\'warn\\': True, u\\'executable\\': None, u\\'_uses_shell\\': True, u\\'_raw_params\\': u"lsblk -o PARTLABEL /dev/vdb | grep -sq \\'ceph\\'", u\\'removes\\': None, u\\'creates\\': None, u\\'chdir\\': None}}, \\'stdout_lines\\': [], \\'failed_when_result\\': False, u\\'start\\': u\\'2017-08-10 21:34:32.537993\\', \\'failed\\': False}])\
changed: [192.168.24.10] => (item=[u\\'/dev/vdb\\', {\\'_ansible_parsed\\': True, \\'stderr_lines\\': [], \\'_ansible_item_result\\': True, u\\'end\\': u\\'2017-08-10 21:34:32.536827\\', \\'_ansible_no_log\\': False, u\\'stdout\\': u\\'\\', u\\'cmd\\': u"lsblk -o PARTLABEL /dev/vdb | grep -sq \\'ceph\\'", u\\'rc\\': 1, \\'item\\': u\\'/dev/vdb\\', u\\'delta\\': u\\'0:00:00.005597\\', u\\'stderr\\': u\\'\\', u\\'changed\\': True, u\\'invocation\\': {u\\'module_args\\': {u\\'warn\\': True, u\\'executable\\': None, u\\'_uses_shell\\': True, u\\'_raw_params\\': u"lsblk -o PARTLABEL /dev/vdb | grep -sq \\'ceph\\'", u\\'removes\\': None, u\\'creates\\': None, u\\'chdir\\': None}}, \\'stdout_lines\\': [], \\'failed_when_result\\': False, u\\'start\\': u\\'2017-08-10 21:34:32.531230\\', \\'failed\\': False}])\
\
Comment 4 Alexander Chuzhoy 2017-08-11 12:25:48 EDT
Manually applying this patch to the OC image seems to resolve disk activation:
https://github.com/ceph/ceph/pull/14275/files
Comment 8 John Fulton 2017-10-03 15:19:44 EDT
I see the fix [1] in the following docker image containing the following RPM

[root@overcloud-controller-0 heat-config-ansible]# docker images | grep ceph 
docker-registry.engineering.redhat.com/ceph/rhceph-2-rhel7                                  pending             e0d7162b5287        5 days ago          491.6 MB
[root@overcloud-controller-0 heat-config-ansible]# docker ps | grep ceph 
0ddcc287fb89        docker-registry.engineering.redhat.com/ceph/rhceph-2-rhel7:pending                                       "/entrypoint.sh"         20 hours ago        Up 20 hours                                        ceph-mds-overcloud-controller-0
0bb4f7268175        docker-registry.engineering.redhat.com/ceph/rhceph-2-rhel7:pending                                       "/entrypoint.sh"         20 hours ago        Up 20 hours                                        ceph-mon-overcloud-controller-0
[root@overcloud-controller-0 heat-config-ansible]# docker exec -ti 0bb4f7268175 /bin/bash
[root@overcloud-controller-0 /]# rpm -q ceph-osd
ceph-osd-10.2.7-39.el7cp.x86_64
[root@overcloud-controller-0 /]# grep -B 25 -A 5 time.sleep\(.2\) /usr/lib/python2.7/site-packages/ceph_disk/main.py 
    max_retry = 10
    for retry in range(0, max_retry + 1):
        partname = None
        error_msg = ""
        if is_mpath(dev):
            partname = get_partition_mpath(dev, pnum)
        else:
            name = get_dev_name(os.path.realpath(dev))
            sys_entry = os.path.join('/sys/block', name)
            error_msg = " in %s" % sys_entry
            for f in os.listdir(sys_entry):
                if f.startswith(name) and f.endswith(str(pnum)):
                    # we want the shortest name that starts with the base name
                    # and ends with the partition number
                    if not partname or len(f) < len(partname):
                        partname = f
        if partname:
            if retry:
                LOG.info('Found partition %d for %s after %d tries' %
                         (pnum, dev, retry))
            return get_dev_path(partname)
        else:
            if retry < max_retry:
                LOG.info('Try %d/%d : partition %d for %s does not exist%s' %
                         (retry + 1, max_retry, pnum, dev, error_msg))
                time.sleep(.2)
                continue
            else:
                raise Error('partition %d for %s does not appear to exist%s' %
                            (pnum, dev, error_msg))

[root@overcloud-controller-0 /]# 

[1] https://github.com/ceph/ceph/commit/702edb5519e67bc5f8c5b65c6f63c9635cd758cf

Note You need to log in before you can comment on or make changes to this bug.