Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 1452942 Details for
Bug 1592556
FFU: openstack overcloud upgrade run --roles Controller fails when hostnames include capital letters in HostnameMap
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
inventory.yaml
inventory.yaml (text/plain), 409.67 KB, created by
Marius Cornea
on 2018-06-19 12:54:09 UTC
(
hide
)
Description:
inventory.yaml
Filename:
MIME Type:
Creator:
Marius Cornea
Created:
2018-06-19 12:54:09 UTC
Size:
409.67 KB
patch
obsolete
>undercloud: > hosts: > localhost: {} > vars: > ansible_connection: local > auth_url: https://192.168.0.2:13000/ > cacert: null > os_auth_token: gAAAAABbKArGNGFCC3A9u32xKyu0bgZUVQckbFYWqGPpQQXzKvxoqmbsbvUZpodUYBnNG3qRdmzdu2D8n0jLAVsWtXQhn6rsqA6TiUuFQ9rvJ4E8gUVHgmm4gjw8HKuvSlj2qlXH4T9IYJJsW-Dh3rvIfH6jt1BVkoKdfh_VEHRA-7Eh4UKu7lY > overcloud_admin_password: aVP2a7sFnMyx7wW7XeAH9CKQ8 > overcloud_horizon_url: http://172.16.18.34:80/dashboard > overcloud_keystone_url: http://172.16.18.34:5000/ > plan: overcloud > project_name: admin > undercloud_service_list: [openstack-nova-compute, openstack-heat-engine, openstack-ironic-conductor, > openstack-swift-container, openstack-swift-object, openstack-mistral-engine] > undercloud_swift_url: https://192.168.0.2:13808/v1/AUTH_62cf9ec8d9f449e99bf696a5f3425050 > username: admin >CONTROLLER-R00-00: > hosts: > 192.168.0.13: {} > vars: > ctlplane_ip: 192.168.0.13 > deploy_server_id: 9b2e7b63-8bb8-4e4b-a3b8-df5dab131de3 > enabled_networks: [management, storage, ctlplane, external, internal_api, storage_mgmt, > tenant] > external_ip: 172.16.18.26 > internal_api_ip: 10.0.0.10 > management_ip: 172.16.17.211 > storage_ip: 10.0.0.149 > storage_mgmt_ip: 10.0.1.20 > tenant_ip: 10.0.1.142 >Controller: > children: > CONTROLLER-R00-00: {} > vars: > ansible_ssh_user: heat-admin > bootstrap_server_id: 9b2e7b63-8bb8-4e4b-a3b8-df5dab131de3 > role_data_cellv2_discovery: false > role_data_config_settings: {} > role_data_deploy_steps_tasks: [] > role_data_docker_config: > step_1: > cinder_volume_image_tag: > command: [/bin/bash, -c, '/usr/bin/docker tag ''rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-volume:2018-06-15.2'' > ''rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-volume:pcmklatest'''] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-volume:2018-06-15.2 > net: host > start_order: 1 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/dev/shm:/dev/shm:rw', '/etc/sysconfig/docker:/etc/sysconfig/docker:ro', > '/usr/bin:/usr/bin:ro', '/var/run/docker.sock:/var/run/docker.sock:rw'] > haproxy_image_tag: > command: [/bin/bash, -c, '/usr/bin/docker tag ''rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-haproxy:2018-06-15.2'' > ''rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-haproxy:pcmklatest'''] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-haproxy:2018-06-15.2 > net: host > start_order: 1 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/dev/shm:/dev/shm:rw', '/etc/sysconfig/docker:/etc/sysconfig/docker:ro', > '/usr/bin:/usr/bin:ro', '/var/run/docker.sock:/var/run/docker.sock:rw'] > memcached: > command: [/bin/bash, -c, 'source /etc/sysconfig/memcached; /usr/bin/memcached > -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS >> /var/log/memcached.log > 2>&1'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-memcached:2018-06-15.2 > net: host > privileged: false > restart: always > start_order: 1 > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro', > '/var/log/containers/memcached:/var/log/'] > memcached_init_logs: > command: [/bin/bash, -c, 'source /etc/sysconfig/memcached; touch /var/log/memcached.log > && chown ${USER} /var/log/memcached.log'] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-memcached:2018-06-15.2 > privileged: false > start_order: 0 > user: root > volumes: ['/var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro', > '/var/log/containers/memcached:/var/log/'] > mysql_bootstrap: > command: [bash, -ec, 'if [ -e /var/lib/mysql/mysql ]; then exit 0; fi > > echo -e "\n[mysqld]\nwsrep_provider=none" >> /etc/my.cnf > > kolla_set_configs > > sudo -u mysql -E kolla_extend_start > > mysqld_safe --skip-networking --wsrep-on=OFF & > > timeout ${DB_MAX_TIMEOUT} /bin/bash -c ''until mysqladmin -uroot -p"${DB_ROOT_PASSWORD}" > ping 2>/dev/null; do sleep 1; done'' > > mysql -uroot -p"${DB_ROOT_PASSWORD}" -e "CREATE USER ''clustercheck''@''localhost'' > IDENTIFIED BY ''${DB_CLUSTERCHECK_PASSWORD}'';" > > mysql -uroot -p"${DB_ROOT_PASSWORD}" -e "GRANT PROCESS ON *.* TO ''clustercheck''@''localhost'' > WITH GRANT OPTION;" > > timeout ${DB_MAX_TIMEOUT} mysqladmin -uroot -p"${DB_ROOT_PASSWORD}" > shutdown'] > detach: false > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS, KOLLA_BOOTSTRAP=True, DB_MAX_TIMEOUT=60, > DB_CLUSTERCHECK_PASSWORD=EFTVYffcauTEVP9KMzDYeabXt, DB_ROOT_PASSWORD=rVbzhalk5t] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:2018-06-15.2 > net: host > start_order: 1 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json', > '/var/lib/config-data/puppet-generated/mysql/:/var/lib/kolla/config_files/src:ro', > '/var/lib/mysql:/var/lib/mysql'] > mysql_data_ownership: > command: [chown, -R, 'mysql:', /var/lib/mysql] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:2018-06-15.2 > net: host > start_order: 0 > user: root > volumes: ['/var/lib/mysql:/var/lib/mysql'] > mysql_image_tag: > command: [/bin/bash, -c, '/usr/bin/docker tag ''rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:2018-06-15.2'' > ''rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:pcmklatest'''] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:2018-06-15.2 > net: host > start_order: 2 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/dev/shm:/dev/shm:rw', '/etc/sysconfig/docker:/etc/sysconfig/docker:ro', > '/usr/bin:/usr/bin:ro', '/var/run/docker.sock:/var/run/docker.sock:rw'] > rabbitmq_bootstrap: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS, KOLLA_BOOTSTRAP=True, RABBITMQ_CLUSTER_COOKIE=CgymAHzuhrGljFWjqwam] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-rabbitmq:2018-06-15.2 > net: host > privileged: false > start_order: 0 > volumes: ['/var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/rabbitmq/:/var/lib/kolla/config_files/src:ro', > '/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', '/var/lib/rabbitmq:/var/lib/rabbitmq'] > rabbitmq_image_tag: > command: [/bin/bash, -c, '/usr/bin/docker tag ''rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-rabbitmq:2018-06-15.2'' > ''rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-rabbitmq:pcmklatest'''] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-rabbitmq:2018-06-15.2 > net: host > start_order: 1 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/dev/shm:/dev/shm:rw', '/etc/sysconfig/docker:/etc/sysconfig/docker:ro', > '/usr/bin:/usr/bin:ro', '/var/run/docker.sock:/var/run/docker.sock:rw'] > redis_image_tag: > command: [/bin/bash, -c, '/usr/bin/docker tag ''rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-redis:2018-06-15.2'' > ''rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-redis:pcmklatest'''] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-redis:2018-06-15.2 > net: host > start_order: 1 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/dev/shm:/dev/shm:rw', '/etc/sysconfig/docker:/etc/sysconfig/docker:ro', > '/usr/bin:/usr/bin:ro', '/var/run/docker.sock:/var/run/docker.sock:rw'] > step_2: > aodh_init_log: > command: [/bin/bash, -c, 'chown -R aodh:aodh /var/log/aodh'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-aodh-api:2018-06-15.2 > user: root > volumes: ['/var/log/containers/aodh:/var/log/aodh', '/var/log/containers/httpd/aodh-api:/var/log/httpd'] > cinder_api_init_logs: > command: [/bin/bash, -c, 'chown -R cinder:cinder /var/log/cinder'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-api:2018-06-15.2 > privileged: false > user: root > volumes: ['/var/log/containers/cinder:/var/log/cinder', '/var/log/containers/httpd/cinder-api:/var/log/httpd'] > cinder_scheduler_init_logs: > command: [/bin/bash, -c, 'chown -R cinder:cinder /var/log/cinder'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-scheduler:2018-06-15.2 > privileged: false > user: root > volumes: ['/var/log/containers/cinder:/var/log/cinder'] > clustercheck: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:2018-06-15.2 > net: host > restart: always > start_order: 1 > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/clustercheck.json:/var/lib/kolla/config_files/config.json', > '/var/lib/config-data/puppet-generated/clustercheck/:/var/lib/kolla/config_files/src:ro', > '/var/lib/mysql:/var/lib/mysql'] > create_dnsmasq_wrapper: > command: [/docker_puppet_apply.sh, '4', file, 'include ::tripleo::profile::base::neutron::dhcp_agent_wrappers'] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-dhcp-agent:2018-06-15.2 > net: host > pid: host > start_order: 1 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/var/lib/docker-config-scripts/docker_puppet_apply.sh:/docker_puppet_apply.sh:ro', > '/etc/puppet:/tmp/puppet-etc:ro', '/usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro', > '/run/openvswitch:/run/openvswitch', '/var/lib/neutron:/var/lib/neutron'] > create_keepalived_wrapper: > command: [/docker_puppet_apply.sh, '4', file, 'include ::tripleo::profile::base::neutron::l3_agent_wrappers'] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-l3-agent:2018-06-15.2 > net: host > pid: host > start_order: 1 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/var/lib/docker-config-scripts/docker_puppet_apply.sh:/docker_puppet_apply.sh:ro', > '/etc/puppet:/tmp/puppet-etc:ro', '/usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro', > '/run/openvswitch:/run/openvswitch', '/var/lib/neutron:/var/lib/neutron'] > glance_init_logs: > command: [/bin/bash, -c, 'chown -R glance:glance /var/log/glance'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-glance-api:2018-06-15.2 > privileged: false > user: root > volumes: ['/var/log/containers/glance:/var/log/glance'] > gnocchi_init_lib: > command: [/bin/bash, -c, 'chown -R gnocchi:gnocchi /var/lib/gnocchi'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-gnocchi-api:2018-06-15.2 > user: root > volumes: ['/var/lib/gnocchi:/var/lib/gnocchi:rw'] > gnocchi_init_log: > command: [/bin/bash, -c, 'chown -R gnocchi:gnocchi /var/log/gnocchi'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-gnocchi-api:2018-06-15.2 > user: root > volumes: ['/var/log/containers/gnocchi:/var/log/gnocchi', '/var/log/containers/httpd/gnocchi-api:/var/log/httpd'] > haproxy_init_bundle: > command: [/docker_puppet_apply.sh, '2', 'file,file_line,concat,augeas,tripleo::firewall::rule,pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation', > 'include ::tripleo::profile::base::pacemaker; include ::tripleo::profile::pacemaker::haproxy_bundle', > ''] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-haproxy:2018-06-15.2 > net: host > privileged: true > start_order: 3 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/var/lib/docker-config-scripts/docker_puppet_apply.sh:/docker_puppet_apply.sh:ro', > '/etc/puppet:/tmp/puppet-etc:ro', '/usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro', > '/etc/ipa/ca.crt:/etc/ipa/ca.crt:ro', '/etc/pki/tls/private/haproxy:/etc/pki/tls/private/haproxy:ro', > '/etc/pki/tls/certs/haproxy:/etc/pki/tls/certs/haproxy:ro', '/etc/pki/tls/private/overcloud_endpoint.pem:/etc/pki/tls/private/overcloud_endpoint.pem:ro', > '/etc/sysconfig:/etc/sysconfig:rw', '/usr/libexec/iptables:/usr/libexec/iptables:ro', > '/usr/libexec/initscripts/legacy-actions:/usr/libexec/initscripts/legacy-actions:ro', > '/etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro', '/dev/shm:/dev/shm:rw'] > heat_init_log: > command: [/bin/bash, -c, 'chown -R heat:heat /var/log/heat'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-heat-engine:2018-06-15.2 > user: root > volumes: ['/var/log/containers/heat:/var/log/heat'] > horizon_fix_perms: > command: [/bin/bash, -c, 'touch /var/log/horizon/horizon.log && chown -R > apache:apache /var/log/horizon && chmod -R a+rx /etc/openstack-dashboard'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-horizon:2018-06-15.2 > user: root > volumes: ['/var/log/containers/horizon:/var/log/horizon', '/var/log/containers/httpd/horizon:/var/log/httpd', > '/var/lib/config-data/puppet-generated/horizon/etc/openstack-dashboard:/etc/openstack-dashboard'] > keystone_init_log: > command: [/bin/bash, -c, 'chown -R keystone:keystone /var/log/keystone'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-keystone:2018-06-15.2 > start_order: 1 > user: root > volumes: ['/var/log/containers/keystone:/var/log/keystone', '/var/log/containers/httpd/keystone:/var/log/httpd'] > mysql_init_bundle: > command: [/docker_puppet_apply.sh, '2', 'file,file_line,concat,augeas,pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation,galera_ready,mysql_database,mysql_grant,mysql_user', > 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::database::mysql_bundle', > ''] > detach: false > environment: [TRIPLEO_DEPLOY_IDENTIFIER=] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:2018-06-15.2 > net: host > start_order: 1 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/var/lib/docker-config-scripts/docker_puppet_apply.sh:/docker_puppet_apply.sh:ro', > '/etc/puppet:/tmp/puppet-etc:ro', '/usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro', > '/etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro', '/dev/shm:/dev/shm:rw', > '/var/lib/mysql:/var/lib/mysql:rw'] > neutron_init_logs: > command: [/bin/bash, -c, 'chown -R neutron:neutron /var/log/neutron'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-server:2018-06-15.2 > privileged: false > user: root > volumes: ['/var/log/containers/neutron:/var/log/neutron', '/var/log/containers/httpd/neutron-api:/var/log/httpd'] > nova_api_init_logs: > command: [/bin/bash, -c, 'chown -R nova:nova /var/log/nova'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2 > privileged: false > user: root > volumes: ['/var/log/containers/nova:/var/log/nova', '/var/log/containers/httpd/nova-api:/var/log/httpd'] > nova_metadata_init_log: > command: [/bin/bash, -c, 'chown -R nova:nova /var/log/nova'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2 > privileged: false > user: root > volumes: ['/var/log/containers/nova:/var/log/nova'] > nova_placement_init_log: > command: [/bin/bash, -c, 'chown -R nova:nova /var/log/nova'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-placement-api:2018-06-15.2 > start_order: 1 > user: root > volumes: ['/var/log/containers/nova:/var/log/nova', '/var/log/containers/httpd/nova-placement:/var/log/httpd'] > panko_init_log: > command: [/bin/bash, -c, 'chown -R panko:panko /var/log/panko'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-panko-api:2018-06-15.2 > user: root > volumes: ['/var/log/containers/panko:/var/log/panko', '/var/log/containers/httpd/panko-api:/var/log/httpd'] > rabbitmq_init_bundle: > command: [/docker_puppet_apply.sh, '2', 'file,file_line,concat,augeas,pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation,rabbitmq_policy,rabbitmq_user,rabbitmq_ready', > 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::rabbitmq_bundle', > ''] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-rabbitmq:2018-06-15.2 > net: host > start_order: 0 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/var/lib/docker-config-scripts/docker_puppet_apply.sh:/docker_puppet_apply.sh:ro', > '/etc/puppet:/tmp/puppet-etc:ro', '/usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro', > '/etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro', '/dev/shm:/dev/shm:rw', > '/bin/true:/bin/epmd'] > redis_init_bundle: > command: [/docker_puppet_apply.sh, '2', 'file,file_line,concat,augeas,pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation', > 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::database::redis_bundle', > ''] > config_volume: redis_init_bundle > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-redis:2018-06-15.2 > net: host > start_order: 2 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/var/lib/docker-config-scripts/docker_puppet_apply.sh:/docker_puppet_apply.sh:ro', > '/etc/puppet:/tmp/puppet-etc:ro', '/usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro', > '/etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro', '/dev/shm:/dev/shm:rw'] > step_3: > aodh_db_sync: > command: /usr/bin/bootstrap_host_exec aodh_api su aodh -s /bin/bash -c /usr/bin/aodh-dbsync > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-aodh-api:2018-06-15.2 > net: host > privileged: false > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/config-data/aodh/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro', > '/var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro', '/var/log/containers/aodh:/var/log/aodh', > '/var/log/containers/httpd/aodh-api:/var/log/httpd'] > ceilometer_init_log: > command: [/bin/bash, -c, 'chown -R ceilometer:ceilometer /var/log/ceilometer'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-ceilometer-notification:2018-06-15.2 > start_order: 0 > user: root > volumes: ['/var/log/containers/ceilometer:/var/log/ceilometer'] > cinder_api_db_sync: > command: [/usr/bin/bootstrap_host_exec, cinder_api, su cinder -s /bin/bash > -c 'cinder-manage db sync --bump-versions'] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-api:2018-06-15.2 > net: host > privileged: false > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/config-data/cinder/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro', > '/var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro', '/var/log/containers/cinder:/var/log/cinder', > '/var/log/containers/httpd/cinder-api:/var/log/httpd'] > cinder_volume_init_logs: > command: [/bin/bash, -c, 'chown -R cinder:cinder /var/log/cinder'] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-volume:2018-06-15.2 > privileged: false > start_order: 0 > user: root > volumes: ['/var/log/containers/cinder:/var/log/cinder'] > glance_api_db_sync: > command: /usr/bin/bootstrap_host_exec glance_api su glance -s /bin/bash > -c '/usr/local/bin/kolla_start' > detach: false > environment: [KOLLA_BOOTSTRAP=True, KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-glance-api:2018-06-15.2 > net: host > privileged: false > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/glance:/var/log/glance', '/var/lib/kolla/config_files/glance_api.json:/var/lib/kolla/config_files/config.json', > '/var/lib/config-data/puppet-generated/glance_api/:/var/lib/kolla/config_files/src:ro', > '/etc/ceph:/var/lib/kolla/config_files/src-ceph:ro', '', ''] > heat_engine_db_sync: > command: /usr/bin/bootstrap_host_exec heat_engine su heat -s /bin/bash -c > 'heat-manage db_sync' > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-heat-engine:2018-06-15.2 > net: host > privileged: false > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/heat:/var/log/heat', '/var/lib/config-data/heat/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro', > '/var/lib/config-data/heat/etc/heat/:/etc/heat/:ro'] > horizon: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS, ENABLE_IRONIC=yes, ENABLE_MANILA=yes, > ENABLE_MISTRAL=yes, ENABLE_OCTAVIA=yes, ENABLE_SAHARA=yes, ENABLE_CLOUDKITTY=no, > ENABLE_FREEZER=no, ENABLE_FWAAS=no, ENABLE_KARBOR=no, ENABLE_DESIGNATE=no, > ENABLE_MAGNUM=no, ENABLE_MURANO=no, ENABLE_NEUTRON_LBAAS=no, ENABLE_SEARCHLIGHT=no, > ENABLE_SENLIN=no, ENABLE_SOLUM=no, ENABLE_TACKER=no, ENABLE_TROVE=no, > ENABLE_WATCHER=no, ENABLE_ZAQAR=no, ENABLE_ZUN=no] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-horizon:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/horizon.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/horizon/:/var/lib/kolla/config_files/src:ro', > '/var/log/containers/horizon:/var/log/horizon', '/var/log/containers/httpd/horizon:/var/log/httpd', > '', ''] > iscsid: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-iscsid:2018-06-15.2 > net: host > privileged: true > restart: always > start_order: 2 > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', > '/dev/:/dev/', '/run/:/run/', '/sys:/sys', '/lib/modules:/lib/modules:ro', > '/etc/iscsi:/var/lib/kolla/config_files/src-iscsid:ro'] > keystone: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-keystone:2018-06-15.2 > net: host > privileged: false > restart: always > start_order: 2 > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/keystone:/var/log/keystone', '/var/log/containers/httpd/keystone:/var/log/httpd', > '/var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/keystone/:/var/lib/kolla/config_files/src:ro', > '', ''] > keystone_bootstrap: > action: exec > command: [keystone, /usr/bin/bootstrap_host_exec, keystone, keystone-manage, > bootstrap, --bootstrap-password, aVP2a7sFnMyx7wW7XeAH9CKQ8] > start_order: 3 > user: root > keystone_cron: > command: [/bin/bash, -c, /usr/local/bin/kolla_set_configs && /usr/sbin/crond > -n] > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-keystone:2018-06-15.2 > net: host > privileged: false > restart: always > start_order: 4 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/keystone:/var/log/keystone', '/var/log/containers/httpd/keystone:/var/log/httpd', > '/var/lib/kolla/config_files/keystone_cron.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/keystone/:/var/lib/kolla/config_files/src:ro'] > keystone_db_sync: > command: [/usr/bin/bootstrap_host_exec, keystone, /usr/local/bin/kolla_start] > detach: false > environment: [KOLLA_BOOTSTRAP=True, KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-keystone:2018-06-15.2 > net: host > privileged: false > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/keystone:/var/log/keystone', '/var/log/containers/httpd/keystone:/var/log/httpd', > '/var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/keystone/:/var/lib/kolla/config_files/src:ro', > '', ''] > neutron_db_sync: > command: [/usr/bin/bootstrap_host_exec, neutron_api, neutron-db-manage, > upgrade, heads] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-server:2018-06-15.2 > net: host > privileged: false > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/neutron:/var/log/neutron', '/var/log/containers/httpd/neutron-api:/var/log/httpd', > '/var/lib/config-data/neutron/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro', > '/var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro', '/var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro'] > neutron_ovs_bridge: > command: [puppet, apply, --modulepath, '/etc/puppet/modules:/usr/share/openstack-puppet/modules', > --tags, 'file,file_line,concat,augeas,neutron::plugins::ovs::bridge,vs_config', > -v, -e, 'include neutron::agents::ml2::ovs'] > detach: false > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-server:2018-06-15.2 > net: host > pid: host > privileged: true > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/neutron_ovs_agent.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/neutron/:/var/lib/kolla/config_files/src:ro', > '/lib/modules:/lib/modules:ro', '/run/openvswitch:/run/openvswitch', '/etc/puppet:/etc/puppet:ro', > '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', > '/var/run/openvswitch/:/var/run/openvswitch/'] > nova_api_db_sync: > command: /usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage > api_db sync' > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2 > net: host > start_order: 0 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/nova:/var/log/nova', '/var/log/containers/httpd/nova-api:/var/log/httpd', > '/var/lib/config-data/nova/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro', > '/var/lib/config-data/nova/etc/nova/:/etc/nova/:ro'] > nova_api_ensure_default_cell: > command: /usr/bin/bootstrap_host_exec nova_api /nova_api_ensure_default_cell.sh > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2 > net: host > start_order: 2 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/nova:/var/log/nova', '/var/log/containers/httpd/nova-api:/var/log/httpd', > '/var/lib/config-data/nova/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro', > '/var/lib/config-data/nova/etc/nova/:/etc/nova/:ro', '/var/lib/config-data/nova/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro', > '/var/lib/config-data/nova/etc/nova/:/etc/nova/:ro', '/var/log/containers/nova:/var/log/nova', > '/var/lib/docker-config-scripts/nova_api_ensure_default_cell.sh:/nova_api_ensure_default_cell.sh:ro'] > nova_api_map_cell0: > command: /usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage > cell_v2 map_cell0' > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2 > net: host > start_order: 1 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/nova:/var/log/nova', '/var/log/containers/httpd/nova-api:/var/log/httpd', > '/var/lib/config-data/nova/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro', > '/var/lib/config-data/nova/etc/nova/:/etc/nova/:ro'] > nova_db_sync: > command: /usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage > db sync' > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2 > net: host > start_order: 3 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/nova:/var/log/nova', '/var/log/containers/httpd/nova-api:/var/log/httpd', > '/var/lib/config-data/nova/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro', > '/var/lib/config-data/nova/etc/nova/:/etc/nova/:ro'] > nova_placement: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-placement-api:2018-06-15.2 > net: host > restart: always > start_order: 1 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/nova:/var/log/nova', '/var/log/containers/httpd/nova-placement:/var/log/httpd', > '/var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/nova_placement/:/var/lib/kolla/config_files/src:ro', > '', ''] > panko_db_sync: > command: /usr/bin/bootstrap_host_exec panko_api su panko -s /bin/bash -c > '/usr/bin/panko-dbsync ' > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-panko-api:2018-06-15.2 > net: host > privileged: false > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/panko:/var/log/panko', '/var/log/containers/httpd/panko-api:/var/log/httpd', > '/var/lib/config-data/panko/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro', > '/var/lib/config-data/panko/etc/panko:/etc/panko:ro'] > swift_copy_rings: > command: [/bin/bash, -c, cp -v -a -t /etc/swift /swift_ringbuilder/etc/swift/*.gz > /swift_ringbuilder/etc/swift/*.builder /swift_ringbuilder/etc/swift/backups] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-proxy-server:2018-06-15.2 > user: root > volumes: ['/var/lib/config-data/puppet-generated/swift/etc/swift:/etc/swift:rw', > '/var/lib/config-data/swift_ringbuilder:/swift_ringbuilder:ro'] > swift_setup_srv: > command: [chown, -R, 'swift:', /srv/node] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-account:2018-06-15.2 > user: root > volumes: ['/srv/node:/srv/node'] > step_4: > aodh_api: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-aodh-api:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/aodh_api.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/aodh/:/var/lib/kolla/config_files/src:ro', > '/var/log/containers/aodh:/var/log/aodh', '/var/log/containers/httpd/aodh-api:/var/log/httpd', > '', ''] > aodh_evaluator: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-aodh-evaluator:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/aodh_evaluator.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/aodh/:/var/lib/kolla/config_files/src:ro', > '/var/log/containers/aodh:/var/log/aodh'] > aodh_listener: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-aodh-listener:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/aodh_listener.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/aodh/:/var/lib/kolla/config_files/src:ro', > '/var/log/containers/aodh:/var/log/aodh'] > aodh_notifier: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-aodh-notifier:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/aodh_notifier.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/aodh/:/var/lib/kolla/config_files/src:ro', > '/var/log/containers/aodh:/var/log/aodh'] > ceilometer_agent_central: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-ceilometer-central:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/ceilometer_agent_central.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/ceilometer/:/var/lib/kolla/config_files/src:ro', > '/var/log/containers/ceilometer:/var/log/ceilometer'] > ceilometer_agent_notification: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-ceilometer-notification:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/ceilometer_agent_notification.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/ceilometer/:/var/lib/kolla/config_files/src:ro', > '/var/lib/config-data/puppet-generated/panko/:/var/lib/kolla/config_files/src-panko:ro', > '/var/log/containers/ceilometer:/var/log/ceilometer'] > cinder_api: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-api:2018-06-15.2 > net: host > privileged: false > restart: always > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/cinder_api.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/cinder/:/var/lib/kolla/config_files/src:ro', > '/var/log/containers/cinder:/var/log/cinder', '/var/log/containers/httpd/cinder-api:/var/log/httpd', > '', ''] > cinder_api_cron: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-api:2018-06-15.2 > net: host > privileged: false > restart: always > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/cinder_api_cron.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/cinder/:/var/lib/kolla/config_files/src:ro', > '/var/log/containers/cinder:/var/log/cinder', '/var/log/containers/httpd/cinder-api:/var/log/httpd'] > cinder_scheduler: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-scheduler:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/cinder_scheduler.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/cinder/:/var/lib/kolla/config_files/src:ro', > '/var/log/containers/cinder:/var/log/cinder'] > glance_api: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-glance-api:2018-06-15.2 > net: host > privileged: false > restart: always > start_order: 2 > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/glance:/var/log/glance', '/var/lib/kolla/config_files/glance_api.json:/var/lib/kolla/config_files/config.json', > '/var/lib/config-data/puppet-generated/glance_api/:/var/lib/kolla/config_files/src:ro', > '/etc/ceph:/var/lib/kolla/config_files/src-ceph:ro', '', ''] > gnocchi_db_sync: > detach: false > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-gnocchi-api:2018-06-15.2 > net: host > privileged: false > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/gnocchi_db_sync.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/gnocchi/:/var/lib/kolla/config_files/src:ro', > '/var/lib/gnocchi:/var/lib/gnocchi:rw', '/var/log/containers/gnocchi:/var/log/gnocchi', > '/var/log/containers/httpd/gnocchi-api:/var/log/httpd', '/etc/ceph:/var/lib/kolla/config_files/src-ceph:ro'] > heat_api: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-heat-api:2018-06-15.2 > net: host > privileged: false > restart: always > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/heat:/var/log/heat', '/var/log/containers/httpd/heat-api:/var/log/httpd', > '/var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/heat_api/:/var/lib/kolla/config_files/src:ro', > '', ''] > heat_api_cfn: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-heat-api-cfn:2018-06-15.2 > net: host > privileged: false > restart: always > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/heat:/var/log/heat', '/var/log/containers/httpd/heat-api-cfn:/var/log/httpd', > '/var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/heat_api_cfn/:/var/lib/kolla/config_files/src:ro', > '', ''] > heat_api_cron: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-heat-api:2018-06-15.2 > net: host > privileged: false > restart: always > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/heat:/var/log/heat', '/var/log/containers/httpd/heat-api:/var/log/httpd', > '/var/lib/kolla/config_files/heat_api_cron.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/heat_api/:/var/lib/kolla/config_files/src:ro'] > heat_engine: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-heat-engine:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/heat:/var/log/heat', '/var/lib/kolla/config_files/heat_engine.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/heat/:/var/lib/kolla/config_files/src:ro'] > keystone_refresh: > action: exec > command: [keystone, pkill, --signal, USR1, httpd] > start_order: 1 > user: root > logrotate_crond: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cron:2018-06-15.2 > net: none > pid: host > privileged: true > restart: always > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/logrotate-crond.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/crond/:/var/lib/kolla/config_files/src:ro', > '/var/log/containers:/var/log/containers'] > neutron_api: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-server:2018-06-15.2 > net: host > privileged: false > restart: always > start_order: 0 > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/neutron:/var/log/neutron', '/var/log/containers/httpd/neutron-api:/var/log/httpd', > '/var/lib/kolla/config_files/neutron_api.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/neutron/:/var/lib/kolla/config_files/src:ro'] > neutron_dhcp: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-dhcp-agent:2018-06-15.2 > net: host > pid: host > privileged: true > restart: always > start_order: 10 > ulimit: [nofile=1024] > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/neutron:/var/log/neutron', '/var/lib/kolla/config_files/neutron_dhcp.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/neutron/:/var/lib/kolla/config_files/src:ro', > '/lib/modules:/lib/modules:ro', '/run/openvswitch:/run/openvswitch', '/var/lib/neutron:/var/lib/neutron', > '/run/netns:/run/netns:shared', '/var/lib/openstack:/var/lib/openstack', > '/var/lib/neutron/dnsmasq_wrapper:/usr/local/bin/dnsmasq:ro', '/var/lib/neutron/dhcp_haproxy_wrapper:/usr/local/bin/haproxy:ro'] > neutron_l3_agent: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-l3-agent:2018-06-15.2 > net: host > pid: host > privileged: true > restart: always > start_order: 10 > ulimit: [nofile=1024] > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/neutron:/var/log/neutron', '/var/lib/kolla/config_files/neutron_l3_agent.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/neutron/:/var/lib/kolla/config_files/src:ro', > '/lib/modules:/lib/modules:ro', '/run/openvswitch:/run/openvswitch', '/var/lib/neutron:/var/lib/neutron', > '/run/netns:/run/netns:shared', '/var/lib/openstack:/var/lib/openstack', > '/var/lib/neutron/keepalived_wrapper:/usr/local/bin/keepalived:ro', '/var/lib/neutron/l3_haproxy_wrapper:/usr/local/bin/haproxy:ro', > '/var/lib/neutron/dibbler_wrapper:/usr/local/bin/dibbler_client:ro'] > neutron_metadata_agent: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-metadata-agent:2018-06-15.2 > net: host > pid: host > privileged: true > restart: always > start_order: 10 > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/neutron:/var/log/neutron', '/var/lib/kolla/config_files/neutron_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/neutron/:/var/lib/kolla/config_files/src:ro', > '/lib/modules:/lib/modules:ro', '/var/lib/neutron:/var/lib/neutron'] > neutron_ovs_agent: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-openvswitch-agent:2018-06-15.2 > net: host > pid: host > privileged: true > restart: always > start_order: 10 > ulimit: [nofile=1024] > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/neutron:/var/log/neutron', '/var/lib/kolla/config_files/neutron_ovs_agent.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/neutron/:/var/lib/kolla/config_files/src:ro', > '/var/lib/docker-config-scripts/neutron_ovs_agent_launcher.sh:/neutron_ovs_agent_launcher.sh:ro', > '/lib/modules:/lib/modules:ro', '/run/openvswitch:/run/openvswitch'] > nova_api: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2 > net: host > privileged: true > restart: always > start_order: 2 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/nova:/var/log/nova', '/var/log/containers/httpd/nova-api:/var/log/httpd', > '/var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/nova/:/var/lib/kolla/config_files/src:ro', > '', ''] > nova_api_cron: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2 > net: host > privileged: false > restart: always > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/nova:/var/log/nova', '/var/log/containers/httpd/nova-api:/var/log/httpd', > '/var/lib/kolla/config_files/nova_api_cron.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/nova/:/var/lib/kolla/config_files/src:ro'] > nova_conductor: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-conductor:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/nova:/var/log/nova', '/var/lib/kolla/config_files/nova_conductor.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/nova/:/var/lib/kolla/config_files/src:ro'] > nova_consoleauth: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-consoleauth:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/nova:/var/log/nova', '/var/lib/kolla/config_files/nova_consoleauth.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/nova/:/var/lib/kolla/config_files/src:ro'] > nova_metadata: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2 > net: host > privileged: true > restart: always > start_order: 2 > user: nova > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/nova:/var/log/nova', '/var/lib/kolla/config_files/nova_metadata.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/nova/:/var/lib/kolla/config_files/src:ro'] > nova_scheduler: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-scheduler:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/nova:/var/log/nova', '/var/lib/kolla/config_files/nova_scheduler.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/nova/:/var/lib/kolla/config_files/src:ro', > '/run:/run'] > nova_vnc_proxy: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-novncproxy:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/nova:/var/log/nova', '/var/lib/kolla/config_files/nova_vnc_proxy.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/nova/:/var/lib/kolla/config_files/src:ro'] > panko_api: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-panko-api:2018-06-15.2 > net: host > privileged: false > restart: always > start_order: 2 > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/panko:/var/log/panko', '/var/log/containers/httpd/panko-api:/var/log/httpd', > '/var/lib/kolla/config_files/panko_api.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/panko/:/var/lib/kolla/config_files/src:ro', > '', ''] > swift_account_auditor: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-account:2018-06-15.2 > net: host > restart: always > user: swift > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_account_auditor.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/srv/node:/srv/node', '/dev:/dev', '/var/cache/swift:/var/cache/swift'] > swift_account_reaper: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-account:2018-06-15.2 > net: host > restart: always > user: swift > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_account_reaper.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/srv/node:/srv/node', '/dev:/dev', '/var/cache/swift:/var/cache/swift'] > swift_account_replicator: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-account:2018-06-15.2 > net: host > restart: always > user: swift > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_account_replicator.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/srv/node:/srv/node', '/dev:/dev', '/var/cache/swift:/var/cache/swift'] > swift_account_server: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-account:2018-06-15.2 > net: host > restart: always > user: swift > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_account_server.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/srv/node:/srv/node', '/dev:/dev', '/var/cache/swift:/var/cache/swift'] > swift_container_auditor: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-container:2018-06-15.2 > net: host > restart: always > user: swift > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_container_auditor.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/srv/node:/srv/node', '/dev:/dev', '/var/cache/swift:/var/cache/swift'] > swift_container_replicator: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-container:2018-06-15.2 > net: host > restart: always > user: swift > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_container_replicator.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/srv/node:/srv/node', '/dev:/dev', '/var/cache/swift:/var/cache/swift'] > swift_container_server: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-container:2018-06-15.2 > net: host > restart: always > user: swift > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_container_server.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/srv/node:/srv/node', '/dev:/dev', '/var/cache/swift:/var/cache/swift'] > swift_container_updater: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-container:2018-06-15.2 > net: host > restart: always > user: swift > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_container_updater.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/srv/node:/srv/node', '/dev:/dev', '/var/cache/swift:/var/cache/swift'] > swift_object_auditor: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-object:2018-06-15.2 > net: host > restart: always > user: swift > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_object_auditor.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/srv/node:/srv/node', '/dev:/dev', '/var/cache/swift:/var/cache/swift'] > swift_object_expirer: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-proxy-server:2018-06-15.2 > net: host > restart: always > user: swift > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_object_expirer.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/srv/node:/srv/node', '/dev:/dev', '/var/cache/swift:/var/cache/swift'] > swift_object_replicator: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-object:2018-06-15.2 > net: host > restart: always > user: swift > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_object_replicator.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/srv/node:/srv/node', '/dev:/dev', '/var/cache/swift:/var/cache/swift'] > swift_object_server: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-object:2018-06-15.2 > net: host > restart: always > user: swift > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_object_server.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/srv/node:/srv/node', '/dev:/dev', '/var/cache/swift:/var/cache/swift'] > swift_object_updater: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-object:2018-06-15.2 > net: host > restart: always > user: swift > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_object_updater.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/srv/node:/srv/node', '/dev:/dev', '/var/cache/swift:/var/cache/swift'] > swift_proxy: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-proxy-server:2018-06-15.2 > net: host > restart: always > start_order: 2 > user: swift > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_proxy.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/run:/run', '/srv/node:/srv/node', '/dev:/dev'] > swift_rsync: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-object:2018-06-15.2 > net: host > privileged: true > restart: always > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/swift_rsync.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/swift/:/var/lib/kolla/config_files/src:ro', > '/srv/node:/srv/node', '/dev:/dev'] > step_5: > ceilometer_gnocchi_upgrade: > command: [/usr/bin/bootstrap_host_exec, ceilometer_agent_central, 'su ceilometer > -s /bin/bash -c ''for n in {1..10}; do /usr/bin/ceilometer-upgrade --skip-metering-database > && exit 0 || sleep 5; done; exit 1'''] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-ceilometer-central:2018-06-15.2 > net: host > privileged: false > start_order: 1 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro', > '/var/log/containers/ceilometer:/var/log/ceilometer'] > cinder_volume_init_bundle: > command: [/docker_puppet_apply.sh, '5', 'file,file_line,concat,augeas,pacemaker::resource::bundle,pacemaker::property,pacemaker::constraint::location', > 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::cinder::volume_bundle', > ''] > detach: false > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-volume:2018-06-15.2 > net: host > start_order: 0 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/var/lib/docker-config-scripts/docker_puppet_apply.sh:/docker_puppet_apply.sh:ro', > '/etc/puppet:/tmp/puppet-etc:ro', '/usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro', > '/etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro', '/dev/shm:/dev/shm:rw'] > gnocchi_api: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-gnocchi-api:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/gnocchi:/var/lib/gnocchi:rw', '/var/lib/kolla/config_files/gnocchi_api.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/gnocchi/:/var/lib/kolla/config_files/src:ro', > '/var/log/containers/gnocchi:/var/log/gnocchi', '/var/log/containers/httpd/gnocchi-api:/var/log/httpd', > '/etc/ceph:/var/lib/kolla/config_files/src-ceph:ro', '', ''] > gnocchi_metricd: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-gnocchi-metricd:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/gnocchi_metricd.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/gnocchi/:/var/lib/kolla/config_files/src:ro', > '/var/lib/gnocchi:/var/lib/gnocchi:rw', '/var/log/containers/gnocchi:/var/log/gnocchi', > '/etc/ceph:/var/lib/kolla/config_files/src-ceph:ro'] > gnocchi_statsd: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-gnocchi-statsd:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/gnocchi_statsd.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/gnocchi/:/var/lib/kolla/config_files/src:ro', > '/var/log/containers/gnocchi:/var/log/gnocchi', '/var/lib/gnocchi:/var/lib/gnocchi:rw', > '/etc/ceph:/var/lib/kolla/config_files/src-ceph:ro'] > nova_api_discover_hosts: > command: /usr/bin/bootstrap_host_exec nova_api /nova_api_discover_hosts.sh > detach: false > environment: [TRIPLEO_DEPLOY_IDENTIFIER=] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2 > net: host > start_order: 1 > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/nova:/var/log/nova', '/var/log/containers/httpd/nova-api:/var/log/httpd', > '/var/lib/config-data/nova/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro', > '/var/lib/config-data/nova/etc/nova/:/etc/nova/:ro', '/var/lib/config-data/nova/etc/my.cnf.d/tripleo.cnf:/etc/my.cnf.d/tripleo.cnf:ro', > '/var/lib/config-data/nova/etc/nova/:/etc/nova/:ro', '/var/log/containers/nova:/var/log/nova', > '/var/lib/docker-config-scripts/nova_api_discover_hosts.sh:/nova_api_discover_hosts.sh:ro'] > role_data_docker_config_scripts: > create_swift_secret.sh: {content: "#!/bin/bash\nexport OS_PROJECT_DOMAIN_ID=$(crudini\ > \ --get /etc/swift/keymaster.conf kms_keymaster project_domain_id)\nexport\ > \ OS_USER_DOMAIN_ID=$(crudini --get /etc/swift/keymaster.conf kms_keymaster\ > \ user_domain_id)\nexport OS_PROJECT_NAME=$(crudini --get /etc/swift/keymaster.conf\ > \ kms_keymaster project_name)\nexport OS_USERNAME=$(crudini --get /etc/swift/keymaster.conf\ > \ kms_keymaster username)\nexport OS_PASSWORD=$(crudini --get /etc/swift/keymaster.conf\ > \ kms_keymaster password)\nexport OS_AUTH_URL=$(crudini --get /etc/swift/keymaster.conf\ > \ kms_keymaster auth_endpoint)\nexport OS_AUTH_TYPE=password\nexport OS_IDENTITY_API_VERSION=3\n\ > \necho \"Check if secret already exists\"\nsecret_href=$(openstack secret\ > \ list --name swift_root_secret_uuid)\nrc=$?\nif [[ $rc != 0 ]]; then\n\ > \ echo \"Failed to check secrets, check if Barbican in enabled and responding\ > \ properly\"\n exit $rc;\nfi\nif [ -z \"$secret_href\" ]; then\n echo\ > \ \"Create new secret\"\n order_href=$(openstack secret order create --name\ > \ swift_root_secret_uuid --payload-content-type=\"application/octet-stream\"\ > \ --algorithm aes --bit-length 256 --mode ctr key -f value -c \"Order href\"\ > )\nfi\n", mode: '0700'} > docker_puppet_apply.sh: {content: "#!/bin/bash\nset -eux\nSTEP=$1\nTAGS=$2\n\ > CONFIG=$3\nEXTRA_ARGS=${4:-''}\nif [ -d /tmp/puppet-etc ]; then\n # ignore\ > \ copy failures as these may be the same file depending on docker mounts\n\ > \ cp -a /tmp/puppet-etc/* /etc/puppet || true\nfi\necho \"{\\\"step\\\"\ > : ${STEP}}\" > /etc/puppet/hieradata/docker.json\nexport FACTER_uuid=docker\n\ > set +e\npuppet apply $EXTRA_ARGS \\\n --verbose \\\n --detailed-exitcodes\ > \ \\\n --summarize \\\n --color=false \\\n --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules\ > \ \\\n --tags $TAGS \\\n -e \"${CONFIG}\"\nrc=$?\nset -e\nset +ux\n\ > if [ $rc -eq 2 -o $rc -eq 0 ]; then\n exit 0\nfi\nexit $rc\n", mode: '0700'} > neutron_ovs_agent_launcher.sh: {content: '#!/bin/bash > > set -xe > > /usr/bin/python -m neutron.cmd.destroy_patch_ports --config-file /usr/share/neutron/neutron-dist.conf > --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini > --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-openvswitch-agent > > /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf > --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini > --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-dir /etc/neutron/conf.d/common > --log-file=/var/log/neutron/openvswitch-agent.log > > ', mode: '0755'} > nova_api_discover_hosts.sh: {content: "#!/bin/bash\nexport OS_PROJECT_DOMAIN_NAME=$(crudini\ > \ --get /etc/nova/nova.conf keystone_authtoken project_domain_name)\nexport\ > \ OS_USER_DOMAIN_NAME=$(crudini --get /etc/nova/nova.conf keystone_authtoken\ > \ user_domain_name)\nexport OS_PROJECT_NAME=$(crudini --get /etc/nova/nova.conf\ > \ keystone_authtoken project_name)\nexport OS_USERNAME=$(crudini --get /etc/nova/nova.conf\ > \ keystone_authtoken username)\nexport OS_PASSWORD=$(crudini --get /etc/nova/nova.conf\ > \ keystone_authtoken password)\nexport OS_AUTH_URL=$(crudini --get /etc/nova/nova.conf\ > \ keystone_authtoken auth_url)\nexport OS_AUTH_TYPE=password\nexport OS_IDENTITY_API_VERSION=3\n\ > \necho \"(cellv2) Running cell_v2 host discovery\"\ntimeout=600\nloop_wait=30\n\ > declare -A discoverable_hosts\nfor host in $(hiera -c /etc/puppet/hiera.yaml\ > \ cellv2_discovery_hosts | sed -e '/^nil$/d' | tr \",\" \" \"); do discoverable_hosts[$host]=1;\ > \ done\ntimeout_at=$(( $(date +\"%s\") + ${timeout} ))\necho \"(cellv2)\ > \ Waiting ${timeout} seconds for hosts to register\"\nfinished=0\nwhile\ > \ : ; do\n for host in $(openstack -q compute service list -c 'Host' -c\ > \ 'Zone' -f value | awk '$2 != \"internal\" { print $1 }'); do\n if ((\ > \ discoverable_hosts[$host] == 1 )); then\n echo \"(cellv2) compute\ > \ node $host has registered\"\n unset discoverable_hosts[$host]\n \ > \ fi\n done\n finished=1\n for host in \"${!discoverable_hosts[@]}\"\ > ; do\n if (( ${discoverable_hosts[$host]} == 1 )); then\n echo \"\ > (cellv2) compute node $host has not registered\"\n finished=0\n \ > \ fi\n done\n remaining=$(( $timeout_at - $(date +\"%s\") ))\n if ((\ > \ $finished == 1 )); then\n echo \"(cellv2) All nodes registered\"\n\ > \ break\n elif (( $remaining <= 0 )); then\n echo \"(cellv2) WARNING:\ > \ timeout waiting for nodes to register, running host discovery regardless\"\ > \n echo \"(cellv2) Expected host list:\" $(hiera -c /etc/puppet/hiera.yaml\ > \ cellv2_discovery_hosts | sed -e '/^nil$/d' | sort -u | tr ',' ' ')\n\ > \ echo \"(cellv2) Detected host list:\" $(openstack -q compute service\ > \ list -c 'Host' -c 'Zone' -f value | awk '$2 != \"internal\" { print $1\ > \ }' | sort -u | tr '\\n', ' ')\n break\n else\n echo \"(cellv2)\ > \ Waiting ${remaining} seconds for hosts to register\"\n sleep $loop_wait\n\ > \ fi\ndone\necho \"(cellv2) Running host discovery...\"\nsu nova -s /bin/bash\ > \ -c \"/usr/bin/nova-manage cell_v2 discover_hosts --by-service --verbose\"\ > \n", mode: '0700'} > nova_api_ensure_default_cell.sh: {content: "#!/bin/bash\nDEFID=$(nova-manage\ > \ cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == \"\ > default\" {print $4}')\nif [ \"$DEFID\" ]; then\n echo \"(cellv2) Updating\ > \ default cell_v2 cell $DEFID\"\n su nova -s /bin/bash -c \"/usr/bin/nova-manage\ > \ cell_v2 update_cell --cell_uuid $DEFID --name=default\"\nelse\n echo\ > \ \"(cellv2) Creating default cell_v2 cell\"\n su nova -s /bin/bash -c\ > \ \"/usr/bin/nova-manage cell_v2 create_cell --name=default\"\nfi\n", mode: '0700'} > set_swift_keymaster_key_id.sh: {content: "#!/bin/bash\nexport OS_PROJECT_DOMAIN_ID=$(crudini\ > \ --get /etc/swift/keymaster.conf kms_keymaster project_domain_id)\nexport\ > \ OS_USER_DOMAIN_ID=$(crudini --get /etc/swift/keymaster.conf kms_keymaster\ > \ user_domain_id)\nexport OS_PROJECT_NAME=$(crudini --get /etc/swift/keymaster.conf\ > \ kms_keymaster project_name)\nexport OS_USERNAME=$(crudini --get /etc/swift/keymaster.conf\ > \ kms_keymaster username)\nexport OS_PASSWORD=$(crudini --get /etc/swift/keymaster.conf\ > \ kms_keymaster password)\nexport OS_AUTH_URL=$(crudini --get /etc/swift/keymaster.conf\ > \ kms_keymaster auth_endpoint)\nexport OS_AUTH_TYPE=password\nexport OS_IDENTITY_API_VERSION=3\n\ > echo \"retrieve key_id\"\nloop_wait=2\nfor i in {0..5}; do\n #TODO update\ > \ uuid from mistral here too\n secret_href=$(openstack secret list --name\ > \ swift_root_secret_uuid)\n if [ \"$secret_href\" ]; then\n echo \"\ > set key_id in keymaster.conf\"\n secret_href=$(openstack secret list\ > \ --name swift_root_secret_uuid -f value -c \"Secret href\")\n crudini\ > \ --set /etc/swift/keymaster.conf kms_keymaster key_id ${secret_href##*/}\n\ > \ exit 0\n else\n echo \"no key, wait for $loop_wait and check again\"\ > \n sleep $loop_wait\n ((loop_wait++))\n fi\ndone\necho \"Failed to\ > \ set secret in keymaster.conf, check if Barbican is enabled and responding\ > \ properly\"\nexit 1\n", mode: '0700'} > role_data_docker_puppet_tasks: > step_3: > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-keystone:2018-06-15.2', > config_volume: keystone_init_tasks, puppet_tags: 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain', > step_config: 'include ::tripleo::profile::base::keystone'} > role_data_external_deploy_tasks: [] > role_data_external_post_deploy_tasks: [] > role_data_fast_forward_post_upgrade_tasks: > - name: Register repo type and args > set_fact: > fast_forward_repo_args: > tripleo_repos: {ocata: -b ocata current, pike: -b pike current, queens: -b > queens current} > fast_forward_repo_type: custom-script > - debug: {msg: 'fast_forward_repo_type: {{ fast_forward_repo_type }} fast_forward_repo_args: > {{ fast_forward_repo_args }}'} > - block: > - git: {dest: /home/stack/tripleo-repos/, repo: 'https://github.com/openstack/tripleo-repos.git'} > name: clone tripleo-repos > - args: {chdir: /home/stack/tripleo-repos/} > command: python setup.py install > name: install tripleo-repos > - {command: 'tripleo-repos {{ fast_forward_repo_args.tripleo_repos[release] > }}', name: Enable tripleo-repos} > when: [ffu_packages_apply|bool, fast_forward_repo_type == 'tripleo-repos'] > - block: > - copy: {content: "set -e\nMIRROR=tlv\nyum localinstall -y http://download.lab.bos.redhat.com/rcm-guest/puddles/OpenStack/rhos-release/rhos-release-latest.noarch.rpm\n\ > rhos-release -x\ncase $1 in\n ocata)\n rhos-release 11\n ;;\n \ > \ pike)\n rhos-release 12\n ;;\n queens)\n rhos-release 13\n\ > \ rm -rf /etc/yum.repos.d/rhos-release-ceph*\n ;;\n *)\n echo\ > \ \"unknown release $1\" >&2\n exit 1\nesac\nif [[ $MIRROR != default_repo_mirror\ > \ ]]; then\n sed -i \"s/download.*\\.lab.*\\.redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com/\"\ > \ /etc/yum.repos.d/*.repo\n sed -i \"s/download\\(-node.*\\)\\?\\.eng.*\\\ > .redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com/\" /etc/yum.repos.d/*.repo\n\ > \ sed -i \"s/rhos-release.*\\.redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com\\\ > /rhos-release/\" /etc/yum.repos.d/*.repo\n sed -r -i \"s/ayanami.*\\\ > .redhat.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com\\/ayanami/\" /etc/yum.repos.d/*.repo\n\ > \ sed -i \"s/pulp.*\\.redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com\\\ > /pulp/\" /etc/yum.repos.d/*.repo\nfi\nyum clean all\n", dest: /root/ffu_update_repo.sh, > mode: 448} > name: Create custom Script for upgrading repo. > - {name: Execute custom script for upgrading repo., shell: '/root/ffu_update_repo.sh > {{release}}'} > when: [ffu_packages_apply|bool, fast_forward_repo_type == 'custom-script'] > role_data_fast_forward_upgrade_tasks: > - ignore_errors: true > name: Check for aodh running under apache > register: aodh_httpd_enabled_result > shell: httpd -t -D DUMP_VHOSTS | grep -q aodh_wsgi > tags: common > when: [step|int == 0, release == 'ocata'] > - name: Set fact aodh_httpd_enabled > set_fact: {aodh_httpd_enabled: '{{ aodh_httpd_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - command: systemctl is-active --quiet httpd > ignore_errors: true > name: Check if httpd is running > register: httpd_running_result > when: [step|int == 0, release == 'ocata', httpd_running is undefined] > - name: Set fact httpd_running if undefined > set_fact: {httpd_running: '{{ httpd_running_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata', httpd_running is undefined] > - name: Stop and disable aodh (under httpd) > service: name=httpd state=stopped enabled=no > when: [step|int == 1, release == 'ocata', aodh_httpd_enabled|bool, httpd_running|bool] > - name: Aodh package update > shell: yum -y update openstack-aodh* > when: [step|int == 6, is_bootstrap_node|bool, aodh_httpd_enabled|bool] > - command: aodh-dbsync > name: aodh db sync > when: [step|int == 8, is_bootstrap_node|bool, aodh_httpd_enabled|bool] > - command: systemctl is-enabled --quiet openstack-aodh-evaluator > ignore_errors: true > name: FFU check if openstack-aodh-evaluator is deployed > register: aodh_evaluator_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact aodh_evaluator_enabled > set_fact: {aodh_evaluator_enabled: '{{ aodh_evaluator_enabled_result.rc == 0 > }}'} > when: [step|int == 0, release == 'ocata'] > - name: FFU stop and disable openstack-aodh-evaluator service > service: name=openstack-aodh-evaluator state=stopped enabled=no > when: [step|int == 1, release == 'ocata', aodh_evaluator_enabled|bool] > - command: systemctl is-enabled --quiet openstack-aodh-listener > ignore_errors: true > name: FFU check if openstack-aodh-listener is deployed > register: aodh_listener_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact aodh_listener_enabled > set_fact: {aodh_listener_enabled: '{{ aodh_listener_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: FFU stop and disable openstack-aodh-listener service > service: name=openstack-aodh-listener state=stopped enabled=no > when: [step|int == 1, release == 'ocata', aodh_listener_enabled|bool] > - command: systemctl is-enabled --quiet openstack-aodh-notifier > ignore_errors: true > name: FFU check if openstack-aodh-notifier is deployed > register: aodh_notifier_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact aodh_notifier_enabled > set_fact: {aodh_notifier_enabled: '{{ aodh_notifier_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: FFU stop and disable openstack-aodh-notifier service > service: name=openstack-aodh-notifier state=stopped enabled=no > when: [step|int == 1, release == 'ocata', aodh_notifier_enabled|bool] > - file: path=/etc/httpd/conf.d/10-ceilometer_wsgi.conf state=absent > name: Purge Ceilometer apache config files > when: [step|int == 1, release == 'ocata'] > - lineinfile: dest=/etc/httpd/conf/ports.conf state=absent regexp="8777$" > name: Clean up ceilometer port from ports.conf > when: [step|int == 1, release == 'ocata'] > - command: systemctl is-enabled --quiet openstack-ceilometer-collector > ignore_errors: true > name: FFU check if openstack-ceilometer-collector is deployed > register: ceilometer_agent_collector_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact ceilometer_agent_collector_enabled > set_fact: {ceilometer_agent_collector_enabled: '{{ ceilometer_agent_collector_enabled_result.rc > == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop and disable ceilometer_collector service on upgrade > service: name=openstack-ceilometer-collector state=stopped enabled=no > when: [step|int == 1, release == 'ocata', ceilometer_agent_collector_enabled|bool] > - changed_when: [step|int == 1, release == 'ocata', remove_ceilometer_expirer_crontab.stderr > != "no crontab for ceilometer"] > failed_when: [step|int == 1, release == 'ocata', remove_ceilometer_expirer_crontab.rc > != 0, remove_ceilometer_expirer_crontab.stderr != "no crontab for ceilometer"] > name: Remove ceilometer expirer cron tab on upgrade > register: remove_ceilometer_expirer_crontab > shell: /usr/bin/crontab -u ceilometer -r > - command: systemctl is-enabled --quiet openstack-ceilometer-central > ignore_errors: true > name: FFU check if openstack-ceilometer-central is deployed > register: ceilometer_agent_central_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact ceilometer_agent_central_enabled > set_fact: {ceilometer_agent_central_enabled: '{{ ceilometer_agent_central_enabled_result.rc > == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: FFU stop and disable openstack-ceilometer-central service > service: name=openstack-ceilometer-central state=stopped enabled=no > when: [step|int == 1, release == 'ocata', ceilometer_agent_central_enabled|bool] > - command: systemctl is-enabled openstack-ceilometer-notification > ignore_errors: true > name: FFU check if openstack-ceilometer-notification is deployed > register: ceilometer_agent_notification_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact ceilometer_agent_notification_enabled > set_fact: {ceilometer_agent_notification_enabled: '{{ ceilometer_agent_notification_enabled_result.rc > == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: FFU stop and diable openstack-ceilometer-notification service > service: name=openstack-ceilometer-notification state=stopped enabled=no > when: [step|int == 1, release == 'ocata', ceilometer_agent_notification_enabled|bool] > - command: systemctl is-enabled --quiet openstack-cinder-api > ignore_errors: true > name: Check is cinder_api is deployed > register: cinder_api_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact cinder_api_enabled > set_fact: {cinder_api_enabled: '{{ cinder_api_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop openstack-cinder-api > service: name=openstack-cinder-api state=stopped > when: [step|int == 1, release == 'ocata', cinder_api_enabled|bool] > - name: Extra removal of services for cinder > shell: 'cinder-manage service list |\ > > grep -v Binary | tr ''@'' '' '' |\ > > awk ''{print $1 " " $2}'' |\ > > while read i ; do cinder-manage service remove $i ; done > > ' > when: [step|int == 5, release == 'pike', is_bootstrap_node|bool] > - command: cinder-manage db online_data_migrations > name: Extra migration for cinder > when: [step|int == 5, release == 'pike', is_bootstrap_node|bool] > - name: Cinder package update > shell: yum -y update openstack-cinder* > when: [step|int == 6, is_bootstrap_node|bool] > - command: cinder-manage db sync > name: Cinder db sync > when: [step|int == 8, is_bootstrap_node|bool] > - command: systemctl is-enabled --quiet openstack-cinder-scheduler > ignore_errors: true > name: Check if cinder_scheduler is deployed > register: cinder_scheduler_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact cinder_scheduler_enabled > set_fact: {cinder_scheduler_enabled: '{{ cinder_scheduler_enabled_result.rc > == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop openstack-cinder-scheduler > service: name=openstack-cinder-scheduler state=stopped enabled=no > when: [step|int == 1, release == 'ocata', cinder_scheduler_enabled|bool] > - ignore_errors: true > name: Check cluster resource status > pacemaker_resource: {check_mode: false, resource: openstack-cinder-volume, state: show} > register: cinder_volume_res_result > when: [step|int == 0, release == 'ocata', is_bootstrap_node|bool] > - name: Set fact cinder_volume_res > set_fact: {cinder_volume_res: '{{ cinder_volume_res_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata', is_bootstrap_node|bool] > - name: Disable the openstack-cinder-volume cluster resource > pacemaker_resource: {resource: openstack-cinder-volume, state: disable, wait_for_resource: true} > register: cinder_volume_output > retries: 5 > until: cinder_volume_output.rc == 0 > when: [step|int == 2, release == 'ocata', is_bootstrap_node|bool, cinder_volume_res|bool] > - command: systemctl is-enabled --quiet openstack-glance-api > ignore_errors: true > name: Check if glance_api is deployed > register: glance_api_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact glance_api_enabled > set_fact: {glance_api_enabled: '{{ glance_api_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop openstack-glance-api > service: name=openstack-glance-api state=stopped enabled=no > when: [step|int == 1, release == 'ocata', glance_api_enabled|bool] > - name: glance package update > when: [step|int == 6, is_bootstrap_node|bool] > yum: name=openstack-glance state=latest > - command: glance-manage db_sync > name: glance db sync > when: [step|int == 8, is_bootstrap_node|bool] > - command: systemctl is-active --quiet httpd > ignore_errors: true > name: Check if httpd service is running > register: httpd_running_result > tags: common > when: [step|int == 0, release == 'ocata', httpd_running is undefined] > - name: Set fact httpd_running if unset > set_fact: {httpd_running: '{{ httpd_running_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata', httpd_running is undefined] > - command: systemctl is-enabled --quiet openstack-gnocchi-api > ignore_errors: true > name: Check if gnocchi_api is deployed > register: gnocchi_api_enabled_result > tags: common > when: [step|int == 0, release == 'ocata'] > - name: Set fact gnocchi_api_enabled > set_fact: {gnocchi_api_enabled: '{{ gnocchi_api_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - ignore_errors: true > name: Check for gnocchi_api running under apache > register: gnocchi_httpd_enabled_result > shell: httpd -t -D DUMP_VHOSTS | grep -q gnocchi > tags: common > when: [step|int == 0, release == 'ocata'] > - name: Set fact gnocchi_httpd_enabled > set_fact: {gnocchi_httpd_enabled: '{{ gnocchi_httpd_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop and disable gnocchi_api service > service: name=openstack-gnocchi-api state=stopped enabled=no > when: [step|int == 1, release == 'ocata', gnocchi_api_enabled|bool] > - name: Stop and disable httpd service > service: name=httpd state=stopped enabled=no > when: [step|int == 1, release == 'ocata', gnocchi_httpd_enabled|bool, httpd_running|bool] > - name: Update gnocchi packages > when: [step|int == 6, is_bootstrap_node|bool] > with_items: [openstack-gnocchi*, numpy] > yum: name={{ item }} state=latest > - command: gnocchi-upgrade --skip-storage > name: Sync gnocchi DB > when: [step|int == 8, is_bootstrap_node|bool] > - command: systemctl is-enabled --quiet openstack-gnocchi-metricd > ignore_errors: true > name: FFU check if openstack-gnocchi-metricd is deployed > register: gnocchi_metricd_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact gnocchi_metricd_enabled > set_fact: {gnocchi_metricd_enabled: '{{ gnocchi_metricd_enabled_result.rc == > 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: FFU stop and disable openstack-gnocchi-metricd service > service: name=openstack-gnocchi-metricd state=stopped enabled=no > when: [step|int == 1, release == 'ocata', gnocchi_metricd_enabled|bool] > - command: systemctl is-enabled --quiet openstack-gnocchi-statsd > ignore_errors: true > name: FFU check if openstack-gnocchi-statsd is deployed > register: gnocchi_statsd_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact gnocchi_statsd_enabled > set_fact: {gnocchi_statsd_enabled: '{{ gnocchi_statsd_enabled_result.rc == 0 > }}'} > when: [step|int == 0, release == 'ocata'] > - name: FFU stop and disable openstack-gnocchi-statsd service > service: name=openstack-gnocchi-statsd state=stopped enabled=no > when: [step|int == 2, release == 'ocata', gnocchi_statsd_enabled|bool] > - command: systemctl is-enabled openstack-heat-api > ignore_errors: true > name: FFU check openstack-heat-api is enabled > register: heat_api_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact heat_api_enabled > set_fact: {heat_api_enabled: '{{ heat_api_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: FFU stop and disable openstack-heat-api > service: name=openstack-heat-api state=stopped enabled=no > when: [step|int == 1, release == 'ocata', heat_api_enabled|bool] > - name: FFU Heat package update > shell: yum -y update openstack-heat* > when: [step|int == 6, is_bootstrap_node|bool] > - command: heat-manage db_sync > name: FFU Heat db-sync > when: [step|int == 8, is_bootstrap_node|bool] > - command: systemctl is-enabled openstack-heat-api-cloudwatch > ignore_errors: true > name: FFU check if heat_api_cloudwatch is deployed > register: heat_api_cloudwatch_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact heat_api_cloudwatch_enabled > set_fact: {heat_api_cloudwatch_enabled: '{{ heat_api_cloudwatch_enabled_result.rc > == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: FFU stop and disable the heat-api-cloudwatch service. > service: name=openstack-heat-api-cloudwatch state=stopped enabled=no > when: [step|int == 1, release == 'ocata', heat_api_cloudwatch_enabled|bool] > - ignore_errors: true > name: Remove heat_api_cloudwatch package > when: [step|int == 2, release == 'ocata'] > yum: name=openstack-heat-api-cloudwatch state=removed > - command: systemctl is-enabled openstack-heat-api-cfn > ignore_errors: true > name: FFU check if openstack-heat-api-cfn service is enabled > register: heat_api_cfn_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact heat_api_cfn_enabled > set_fact: {heat_api_cfn_enabled: '{{ heat_api_cfn_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: FFU stop and disable openstack-heat-api-cfn service > service: name=openstack-heat-api-cfn state=stopped enabled=no > when: [step|int == 1, release == 'ocata', heat_api_cfn_enabled|bool] > - command: systemctl is-enabled --quiet openstack-heat-engine > ignore_errors: true > name: FFU check if openstack-heat-engine is enabled > register: heat_engine_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact heat_engine_enabled > set_fact: {heat_engine_enabled: '{{ heat_engine_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: FFU stop and disable openstack-heat-engine service > service: name=openstack-heat-engine state=stopped enabled=no > when: [step|int == 1, release == 'ocata', heat_engine_enabled|bool] > - ignore_errors: true > name: Check for keystone running under apache > register: keystone_httpd_enabled_result > shell: httpd -t -D DUMP_VHOSTS | grep -q keystone_wsgi > tags: common > when: [step|int == 0, release == 'ocata'] > - name: Set fact keystone_httpd_enabled > set_fact: {keystone_httpd_enabled: '{{ keystone_httpd_enabled_result.rc == 0 > }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop and disable keystone (under httpd) > service: name=httpd state=stopped enabled=no > when: [step|int == 1, release == 'ocata', keystone_httpd_enabled|bool, httpd_running|bool] > - name: Keystone package update > shell: yum -y update openstack-keystone* > when: [step|int == 6, is_bootstrap_node|bool] > - command: keystone-manage db_sync > name: keystone db sync > when: [step|int == 8, is_bootstrap_node|bool] > - command: systemctl is-enabled --quiet memcached > ignore_errors: true > name: Check if memcached is deployed > register: memcached_enabled_result > tags: common > when: [step|int == 0, release == 'ocata'] > - name: memcached_enabled > set_fact: {memcached_enabled: '{{ memcached_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop and disable memcached service > service: name=memcached state=stopped enabled=no > when: [step|int == 2, release == 'ocata', memcached_enabled|bool] > - command: systemctl is-enabled --quiet neutron-server > ignore_errors: true > name: Check if neutron_server is deployed > register: neutron_server_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact neutron_server_enabled > set_fact: {neutron_server_enabled: '{{ neutron_server_enabled_result.rc == 0 > }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop neutron_server > service: name=neutron-server state=stopped enabled=no > when: [step|int == 1, release == 'ocata', neutron_server_enabled|bool] > - name: Neutron package update > shell: yum -y update openstack-neutron* > when: [step|int == 6, is_bootstrap_node|bool] > - name: Neutron package update workaround > when: [step|int == 6, is_bootstrap_node|bool] > yum: name=python-networking-odl state=latest > - command: neutron-db-manage upgrade head > name: Neutron db sync > when: [step|int == 8, is_bootstrap_node|bool] > - command: systemctl is-enabled --quiet neutron-dhcp-agent > ignore_errors: true > name: Check if neutron_dhcp_agent is deployed > register: neutron_dhcp_agent_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact neutron_dhcp_agent_enabled > set_fact: {neutron_dhcp_agent_enabled: '{{ neutron_dhcp_agent_enabled_result.rc > == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop neutron_dhcp_agent > service: name=neutron-dhcp-agent state=stopped enabled=no > when: [step|int == 2, release == 'ocata', neutron_dhcp_agent_enabled|bool] > - command: systemctl is-enabled --quiet neutron-l3-agent > ignore_errors: true > name: Check if neutron_l3_agent is deployed > register: neutron_l3_agent_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact neutron_l3_agent_enabled > set_fact: {neutron_l3_agent_enabled: '{{ neutron_l3_agent_enabled_result.rc > == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop neutron_l3_agent > service: name=neutron-l3-agent state=stopped enabled=no > when: [step|int == 1, release == 'ocata', neutron_l3_agent_enabled|bool] > - command: systemctl is-enabled --quiet neutron-metadata-agent > ignore_errors: true > name: Check if neutron_metadata_agent is deployed > register: neutron_metadata_agent_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact neutron_metadata_agent_enabled > set_fact: {neutron_metadata_agent_enabled: '{{ neutron_metadata_agent_enabled_result.rc > == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop neutron_metadata_agent > service: name=neutron-metadata-agent state=stopped enabled=no > when: [step|int == 1, release == 'ocata', neutron_metadata_agent_enabled|bool] > - command: systemctl is-enabled --quiet neutron-openvswitch-agent > ignore_errors: true > name: Check if neutron_ovs_agent is deployed > register: neutron_ovs_agent_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact neutron_ovs_agent_enabled > set_fact: {neutron_ovs_agent_enabled: '{{ neutron_ovs_agent_enabled_result.rc > == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop neutron_openvswitch_agent > service: name=neutron-openvswitch-agent state=stopped enabled=no > when: [step|int == 1, release == 'ocata', neutron_ovs_agent_enabled|bool] > - command: systemctl is-enabled --quiet openstack-nova-api > ignore_errors: true > name: Check if nova-api is deployed > register: nova_api_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact nova_api_enabled > set_fact: {nova_api_enabled: '{{ nova_api_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop openstack-nova-api service > service: name=openstack-nova-api state=stopped > when: [step|int == 1, nova_api_enabled|bool, release == 'ocata'] > - command: nova-manage db online_data_migrations > name: Extra migration for nova tripleo/+bug/1656791 > when: [step|int == 5, release == 'ocata', is_bootstrap_node|bool] > - command: yum update -y *nova* > name: Update nova packages > when: [step|int == 6, is_bootstrap_node|bool] > - block: > - mysql_db: {name: nova_cell0, state: present} > name: Create cell0 db > - mysql_user: {host_all: true, name: nova, priv: '*.*:ALL', state: present} > name: Grant access to cell0 db > - copy: {content: "$transport_url = os_transport_url({\n 'transport' => hiera('messaging_service_name',\ > \ 'rabbit'),\n 'hosts' => any2array(hiera('rabbitmq_node_names',\ > \ undef)),\n 'port' => sprintf('%s',hiera('nova::rabbit_port', '5672')\ > \ ),\n 'username' => hiera('nova::rabbit_userid', 'guest'),\n 'password'\ > \ => hiera('nova::rabbit_password'),\n 'ssl' => sprintf('%s',\ > \ bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))\n}) oslo::messaging::default\ > \ { 'nova_config':\n transport_url => $transport_url\n}\n", dest: /root/nova-api_upgrade_manifest.pp, > mode: 384} > name: Create puppet manifest to set transport_url in nova.conf > - {changed_when: puppet_apply_nova_api_upgrade.rc == 2, command: 'puppet apply > --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules > --detailed-exitcodes /root/nova-api_upgrade_manifest.pp', failed_when: 'puppet_apply_nova_api_upgrade.rc > not in [0,2]', name: Run puppet apply to set tranport_url in nova.conf, > register: puppet_apply_nova_api_upgrade} > - {name: Setup cell_v2 (map cell0), shell: 'nova-manage cell_v2 map_cell0 --database_connection=mysql+pymysql://nova:8MMttgFpZ4fKUe99GWMpEwNqC@10.0.0.12/nova_cell0'} > - {changed_when: nova_api_create_cell.rc == 0, failed_when: 'nova_api_create_cell.rc > not in [0,2]', name: Setup cell_v2 (create default cell), register: nova_api_create_cell, > shell: 'nova-manage cell_v2 create_cell --name=''default'' --database_connection=$(hiera > nova::database_connection)'} > - {async: 300, command: nova-manage db sync, name: Setup cell_v2 (sync nova/cell > DB), poll: 10} > - {name: Setup cell_v2 (get cell uuid), register: nova_api_cell_uuid, shell: 'nova-manage > cell_v2 list_cells | sed -e ''1,3d'' -e ''$d'' | awk -F '' *| *'' ''$2 == > "default" {print $4}'''} > - {command: 'nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} > --verbose', name: Setup cell_v2 (migrate hosts)} > - {command: 'nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}', > name: Setup cell_v2 (migrate instances)} > when: [step|int == 7, release == 'ocata', is_bootstrap_node|bool] > - command: nova-manage api_db sync > name: Sync nova_api DB > when: [step|int == 8, is_bootstrap_node|bool] > - command: nova-manage db online_data_migrations > name: Online data migration for nova > when: [step|int == 8, is_bootstrap_node|bool] > - command: systemctl is-enabled --quiet openstack-nova-conductor > ignore_errors: true > name: Check if nova_conductor is deployed > register: nova_conductor_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact nova_conductor_enabled > set_fact: {nova_conductor_enabled: '{{ nova_conductor_enabled_result.rc == 0 > }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop and disable nova_conductor service > service: name=openstack-nova-conductor state=stopped > when: [step|int == 1, release == 'ocata', nova_conductor_enabled|bool] > - command: systemctl is-active --quiet openstack-nova-consoleauth > ignore_errors: true > name: Check if nova_consoleauth is deployed > register: nova_consoleauth_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact nova_consoleauth_enabled > set_fact: {nova_consoleauth_enabled: '{{ nova_consoleauth_enabled_result.rc > == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop and disable nova-consoleauth service > service: name=openstack-nova-consoleauth state=stopped > when: [step|int == 1, release == 'ocata', nova_consoleauth_enabled|bool] > - command: systemctl is-enabled --quiet openstack-nova-api > ignore_errors: true > name: Check if nova_api_metadata is deployed > register: nova_metadata_enabled_result > tags: common > when: [step|int == 0, release == 'ocata'] > - name: Set fact nova_metadata_enabled > set_fact: {nova_metadata_enabled: '{{ nova_metadata_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop and disable nova_api service > service: name=openstack-nova-api state=stopped enabled=no > when: [step|int == 1, release == 'ocata', nova_metadata_enabled|bool] > - command: systemctl is-enabled --quiet openstack-nova-scheduler > ignore_errors: true > name: Check if nova_scheduler is deployed > register: nova_scheduler_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact nova_scheduler_enabled > set_fact: {nova_scheduler_enabled: '{{ nova_scheduler_enabled_result.rc == 0 > }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop and disable nova-scheduler service > service: name=openstack-nova-scheduler state=stopped > when: [step|int == 1, release == 'ocata', nova_scheduler_enabled|bool] > - command: systemctl is-enabled --quiet openstack-nova-novncproxy > ignore_errors: true > name: Check if nova vncproxy is deployed > register: nova_vncproxy_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact nova_vncproxy_enabled > set_fact: {nova_vncproxy_enabled: '{{ nova_vncproxy_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop and disable nova-novncproxy service > service: name=openstack-nova-novncproxy state=stopped > when: [step|int == 1, release == 'ocata', nova_vncproxy_enabled|bool] > - ignore_errors: true > name: Check cluster resource status of rabbitmq > pacemaker_resource: {check_mode: false, resource: rabbitmq, state: show} > register: rabbitmq_res_result > when: [step|int == 0, release == 'ocata', is_bootstrap_node|bool] > - name: Set fact rabbitmq_res > set_fact: {rabbitmq_res: '{{ rabbitmq_res_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata', is_bootstrap_node|bool] > - name: Disable the rabitmq cluster resource > pacemaker_resource: {resource: rabbitmq, state: disable, wait_for_resource: true} > register: rabbitmq_output > retries: 5 > until: rabbitmq_output.rc == 0 > when: [step|int == 2, release == 'ocata', is_bootstrap_node|bool, rabbitmq_res|bool] > - ignore_errors: true > name: Check cluster resource status of redis > pacemaker_resource: {check_mode: false, resource: redis, state: show} > register: redis_res_result > when: [step|int == 0, release == 'ocata', is_bootstrap_node|bool] > - name: Set fact redis_res > set_fact: {redis_res: '{{ redis_res_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata', is_bootstrap_node|bool] > - name: Disable the redis cluster resource > pacemaker_resource: {resource: redis, state: disable, wait_for_resource: true} > register: redis_output > retries: 5 > until: redis_output.rc == 0 > when: [step|int == 2, release == 'ocata', is_bootstrap_node|bool, redis_res|bool] > - command: systemctl is-enabled --quiet "{{ item }}" > ignore_errors: true > name: Check if swift-proxy or swift-object-expirer are deployed > register: swift_proxy_services_enabled_result > when: [step|int == 0, release == 'ocata'] > with_items: [openstack-swift-proxy, openstack-swift-object-expirer] > - name: Set fact swift_proxy_services_enabled > set_fact: {swift_proxy_services_enabled: '{{ swift_proxy_services_enabled_result > }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop swift-proxy and swift-object-expirer services > service: name={{ item.item }} state=stopped enabled=no > when: [step|int == 1, release == 'ocata', item.rc == 0] > with_items: '{{ swift_proxy_services_enabled.results }}' > - command: systemctl is-enabled --quiet "{{ item }}" > ignore_errors: true > name: Check if swift storage services are deployed > register: swift_services_enabled_result > when: [step|int == 0, release == 'ocata'] > with_items: [openstack-swift-account-auditor, openstack-swift-account-reaper, > openstack-swift-account-replicator, openstack-swift-account, openstack-swift-container-auditor, > openstack-swift-container-replicator, openstack-swift-container-updater, openstack-swift-container, > openstack-swift-object-auditor, openstack-swift-object-replicator, openstack-swift-object-updater, > openstack-swift-object] > - name: Set fact swift_services_enabled > set_fact: {swift_services_enabled: '{{ swift_services_enabled_result }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop swift storage services > service: name={{ item.item }} state=stopped enabled=no > when: [step|int == 1, release == 'ocata', item.rc == 0] > with_items: '{{ swift_services_enabled.results }}' > - name: Register repo type and args > set_fact: > fast_forward_repo_args: > tripleo_repos: {ocata: -b ocata current, pike: -b pike current, queens: -b > queens current} > fast_forward_repo_type: custom-script > when: step|int == 3 > - debug: {msg: 'fast_forward_repo_type: {{ fast_forward_repo_type }} fast_forward_repo_args: > {{ fast_forward_repo_args }}'} > when: step|int == 3 > - block: > - git: {dest: /home/stack/tripleo-repos/, repo: 'https://github.com/openstack/tripleo-repos.git'} > name: clone tripleo-repos > - args: {chdir: /home/stack/tripleo-repos/} > command: python setup.py install > name: install tripleo-repos > - {command: 'tripleo-repos {{ fast_forward_repo_args.tripleo_repos[release] > }}', name: Enable tripleo-repos} > when: [step|int == 3, ffu_packages_apply|bool, fast_forward_repo_type == 'tripleo-repos'] > - block: > - copy: {content: "set -e\nMIRROR=tlv\nyum localinstall -y http://download.lab.bos.redhat.com/rcm-guest/puddles/OpenStack/rhos-release/rhos-release-latest.noarch.rpm\n\ > rhos-release -x\ncase $1 in\n ocata)\n rhos-release 11\n ;;\n \ > \ pike)\n rhos-release 12\n ;;\n queens)\n rhos-release 13\n\ > \ rm -rf /etc/yum.repos.d/rhos-release-ceph*\n ;;\n *)\n echo\ > \ \"unknown release $1\" >&2\n exit 1\nesac\nif [[ $MIRROR != default_repo_mirror\ > \ ]]; then\n sed -i \"s/download.*\\.lab.*\\.redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com/\"\ > \ /etc/yum.repos.d/*.repo\n sed -i \"s/download\\(-node.*\\)\\?\\.eng.*\\\ > .redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com/\" /etc/yum.repos.d/*.repo\n\ > \ sed -i \"s/rhos-release.*\\.redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com\\\ > /rhos-release/\" /etc/yum.repos.d/*.repo\n sed -r -i \"s/ayanami.*\\\ > .redhat.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com\\/ayanami/\" /etc/yum.repos.d/*.repo\n\ > \ sed -i \"s/pulp.*\\.redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com\\\ > /pulp/\" /etc/yum.repos.d/*.repo\nfi\nyum clean all\n", dest: /root/ffu_update_repo.sh, > mode: 448} > name: Create custom Script for upgrading repo. > - {name: Execute custom script for upgrading repo., shell: '/root/ffu_update_repo.sh > {{release}}'} > when: [step|int == 3, ffu_packages_apply|bool, fast_forward_repo_type == 'custom-script'] > role_data_global_config_settings: {} > role_data_host_prep_tasks: > - file: {path: '{{ item }}', state: directory} > name: create persistent logs directory > with_items: [/var/log/containers/aodh, /var/log/containers/httpd/aodh-api] > - copy: {content: 'Log files from aodh containers can be found under > > /var/log/containers/aodh and /var/log/containers/httpd/aodh-api. > > ', dest: /var/log/aodh/readme.txt} > ignore_errors: true > name: aodh logs readme > - file: {path: /var/log/containers/aodh, state: directory} > name: create persistent logs directory > - file: {path: /var/log/containers/ceilometer, state: directory} > name: create persistent logs directory > - copy: {content: 'Log files from ceilometer containers can be found under > > /var/log/containers/ceilometer. > > ', dest: /var/log/ceilometer/readme.txt} > ignore_errors: true > name: ceilometer logs readme > - file: {path: '{{ item }}', state: directory} > name: create persistent logs directory > with_items: [/var/log/containers/cinder, /var/log/containers/httpd/cinder-api] > - copy: {content: 'Log files from cinder containers can be found under > > /var/log/containers/cinder and /var/log/containers/httpd/cinder-api. > > ', dest: /var/log/cinder/readme.txt} > ignore_errors: true > name: cinder logs readme > - file: {path: '{{ item }}', state: directory} > name: create persistent directories > with_items: [/var/log/containers/cinder] > - file: {path: '{{ item }}', state: directory} > name: create persistent directories > with_items: [/var/log/containers/cinder, /var/lib/cinder] > - file: {path: /etc/ceph, state: directory} > name: ensure ceph configurations exist > - name: cinder_enable_iscsi_backend fact > set_fact: {cinder_enable_iscsi_backend: true} > - args: {creates: /var/lib/cinder/cinder-volumes} > command: dd if=/dev/zero of=/var/lib/cinder/cinder-volumes bs=1 count=0 seek=10280M > name: cinder create LVM volume group dd > when: cinder_enable_iscsi_backend > - args: {creates: /dev/loop2, executable: /bin/bash} > name: cinder create LVM volume group > shell: "if ! losetup /dev/loop2; then\n losetup /dev/loop2 /var/lib/cinder/cinder-volumes\n\ > fi\nif ! pvdisplay | grep cinder-volumes; then\n pvcreate /dev/loop2\nfi\n\ > if ! vgdisplay | grep cinder-volumes; then\n vgcreate cinder-volumes /dev/loop2\n\ > fi\n" > when: cinder_enable_iscsi_backend > - file: {path: '{{ item }}', state: directory} > name: create persistent logs directory > with_items: [/var/log/containers/glance] > - copy: {content: 'Log files from glance containers can be found under > > /var/log/containers/glance. > > ', dest: /var/log/glance/readme.txt} > ignore_errors: true > name: glance logs readme > - block: > - name: null > set_fact: {remote_file_path: /etc/glance/glance-metadata-file.conf} > - file: {path: '{{ remote_file_path }}', state: touch} > name: null > - {register: file_path, stat: 'path="{{ remote_file_path }}"'} > - copy: > content: {mount_point: /var/lib/glance/images, share_location: '{{item.NETAPP_SHARE}}', > type: nfs} > dest: '{{ remote_file_path }}' > when: [file_path.stat.exists == true] > with_items: > - {NETAPP_SHARE: ''} > - mount: name=/var/lib/glance/images src="{{item.NETAPP_SHARE}}" fstype=nfs4 > opts="{{item.NFS_OPTIONS}}" state=mounted > name: null > with_items: > - {NETAPP_SHARE: '', NFS_OPTIONS: '_netdev,bg,intr,context=system_u:object_r:glance_var_lib_t:s0'} > name: Mount Netapp NFS > vars: {netapp_nfs_backend_enable: false} > when: netapp_nfs_backend_enable > - mount: name=/var/lib/glance/images src="{{item.NFS_SHARE}}" fstype=nfs4 opts="{{item.NFS_OPTIONS}}" > state=mounted > name: Mount NFS on host > vars: {nfs_backend_enable: false} > when: [nfs_backend_enable] > with_items: > - {NFS_OPTIONS: '_netdev,bg,intr,context=system_u:object_r:glance_var_lib_t:s0', > NFS_SHARE: ''} > - file: {path: '{{ item }}', state: directory} > name: create persistent logs directory > with_items: [/var/log/containers/gnocchi, /var/log/containers/httpd/gnocchi-api] > - copy: {content: 'Log files from gnocchi containers can be found under > > /var/log/containers/gnocchi and /var/log/containers/httpd/gnocchi-api. > > ', dest: /var/log/gnocchi/readme.txt} > ignore_errors: true > name: gnocchi logs readme > - file: {path: /var/log/containers/gnocchi, state: directory} > name: create persistent logs directory > - file: {path: '{{ item }}', state: directory} > name: create persistent directories > with_items: [/var/lib/haproxy] > - file: {path: '{{ item }}', state: directory} > name: create persistent logs directory > with_items: [/var/log/containers/heat, /var/log/containers/httpd/heat-api] > - copy: {content: 'Log files from heat containers can be found under > > /var/log/containers/heat and /var/log/containers/httpd/heat-api*. > > ', dest: /var/log/heat/readme.txt} > ignore_errors: true > name: heat logs readme > - file: {path: '{{ item }}', state: directory} > name: create persistent logs directory > with_items: [/var/log/containers/heat, /var/log/containers/httpd/heat-api-cfn] > - file: {path: /var/log/containers/heat, state: directory} > name: create persistent logs directory > - file: {path: '{{ item }}', state: directory} > name: create persistent logs directory > with_items: [/var/log/containers/horizon, /var/log/containers/httpd/horizon] > - copy: {content: 'Log files from horizon containers can be found under > > /var/log/containers/horizon and /var/log/containers/httpd/horizon. > > ', dest: /var/log/horizon/readme.txt} > ignore_errors: true > name: horizon logs readme > - {name: stat /lib/systemd/system/iscsid.socket, register: stat_iscsid_socket, > stat: path=/lib/systemd/system/iscsid.socket} > - {name: Stop and disable iscsid.socket service, service: name=iscsid.socket state=stopped > enabled=no, when: stat_iscsid_socket.stat.exists} > - file: {path: '{{ item }}', state: directory} > name: create persistent logs directory > with_items: [/var/log/containers/keystone, /var/log/containers/httpd/keystone] > - copy: {content: 'Log files from keystone containers can be found under > > /var/log/containers/keystone and /var/log/containers/httpd/keystone. > > ', dest: /var/log/keystone/readme.txt} > ignore_errors: true > name: keystone logs readme > - file: {path: /var/log/containers/memcached, state: directory} > name: create persistent logs directory > - copy: {content: 'Log files from memcached containers can be found under > > /var/log/containers/memcached. > > ', dest: /var/log/memcached-readme.txt} > ignore_errors: true > name: memcached logs readme > - file: {path: '{{ item }}', state: directory} > name: create persistent directories > with_items: [/var/log/containers/mysql, /var/lib/mysql] > - copy: {content: 'Log files from mysql containers can be found under > > /var/log/containers/mysql. > > ', dest: /var/log/mariadb/readme.txt} > ignore_errors: true > name: mysql logs readme > - file: {path: '{{ item }}', state: directory} > name: create persistent logs directory > with_items: [/var/log/containers/neutron, /var/log/containers/httpd/neutron-api] > - copy: {content: 'Log files from neutron containers can be found under > > /var/log/containers/neutron and /var/log/containers/httpd/neutron-api. > > ', dest: /var/log/neutron/readme.txt} > ignore_errors: true > name: neutron logs readme > - file: {path: '{{ item }}', state: directory} > name: create persistent logs directory > with_items: [/var/log/containers/neutron] > - file: {path: /var/lib/neutron, state: directory} > name: create /var/lib/neutron > - file: {path: '{{ item }}', state: directory} > name: create persistent logs directory > with_items: [/var/log/containers/nova, /var/log/containers/httpd/nova-api] > - copy: {content: 'Log files from nova containers can be found under > > /var/log/containers/nova and /var/log/containers/httpd/nova-*. > > ', dest: /var/log/nova/readme.txt} > ignore_errors: true > name: nova logs readme > - file: {path: /var/log/containers/nova, state: directory} > name: create persistent logs directory > - file: {path: '{{ item }}', state: directory} > name: create persistent logs directory > with_items: [/var/log/containers/nova, /var/log/containers/httpd/nova-placement] > - file: {path: '{{ item }}', state: directory} > name: create persistent logs directory > with_items: [/var/log/containers/panko, /var/log/containers/httpd/panko-api] > - copy: {content: 'Log files from panko containers can be found under > > /var/log/containers/panko and /var/log/containers/httpd/panko-api. > > ', dest: /var/log/panko/readme.txt} > ignore_errors: true > name: panko logs readme > - file: {path: '{{ item }}', state: directory} > name: create persistent directories > with_items: [/var/lib/rabbitmq, /var/log/containers/rabbitmq] > - copy: {content: 'Log files from rabbitmq containers can be found under > > /var/log/containers/rabbitmq. > > ', dest: /var/log/rabbitmq/readme.txt} > ignore_errors: true > name: rabbitmq logs readme > - {name: stop the Erlang port mapper on the host and make sure it cannot bind > to the port used by container, shell: 'echo ''export ERL_EPMD_ADDRESS=127.0.0.1'' > > /etc/rabbitmq/rabbitmq-env.conf > > echo ''export ERL_EPMD_PORT=4370'' >> /etc/rabbitmq/rabbitmq-env.conf > > for pid in $(pgrep epmd --ns 1 --nslist pid); do kill $pid; done > > '} > - file: {path: '{{ item }}', state: directory} > name: create persistent directories > with_items: [/var/lib/redis, /var/log/containers/redis, /var/run/redis] > - copy: {content: 'Log files from redis containers can be found under > > /var/log/containers/redis. > > ', dest: /var/log/redis/readme.txt} > ignore_errors: true > name: redis logs readme > - file: {path: '{{ item }}', state: directory} > name: create persistent directories > with_items: [/srv/node, /var/log/swift] > - file: {dest: /var/log/containers/swift, src: /var/log/swift, state: link} > name: Create swift logging symlink > - file: {path: '{{ item }}', state: directory} > name: create persistent directories > with_items: [/srv/node, /var/log/swift, /var/log/containers] > - name: Set swift_use_local_disks fact > set_fact: {swift_use_local_disks: true} > - file: {path: /srv/node/d1, state: directory} > name: Create Swift d1 directory if needed > when: swift_use_local_disks > - copy: {content: 'Log files from swift containers can be found under > > /var/log/containers/swift and /var/log/containers/httpd/swift-*. > > ', dest: /var/log/swift/readme.txt} > ignore_errors: true > name: swift logs readme > - filesystem: {dev: '/dev/{{ item }}', fstype: xfs, opts: -f -i size=1024} > name: Format SwiftRawDisks > with_items: > - [] > - mount: {fstype: xfs, name: '/srv/node/{{ item }}', opts: noatime, src: '/dev/{{ > item }}', state: mounted} > name: Mount devices defined in SwiftRawDisks > with_items: > - [] > role_data_kolla_config: > /var/lib/kolla/config_files/aodh_api.json: > command: /usr/sbin/httpd -DFOREGROUND > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'aodh:aodh', path: /var/log/aodh, recurse: true} > /var/lib/kolla/config_files/aodh_evaluator.json: > command: /usr/bin/aodh-evaluator > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'aodh:aodh', path: /var/log/aodh, recurse: true} > /var/lib/kolla/config_files/aodh_listener.json: > command: /usr/bin/aodh-listener > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'aodh:aodh', path: /var/log/aodh, recurse: true} > /var/lib/kolla/config_files/aodh_notifier.json: > command: /usr/bin/aodh-notifier > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'aodh:aodh', path: /var/log/aodh, recurse: true} > /var/lib/kolla/config_files/ceilometer_agent_central.json: > command: /usr/bin/ceilometer-polling --polling-namespaces central --logfile > /var/log/ceilometer/central.log > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/ceilometer_agent_notification.json: > command: /usr/bin/ceilometer-agent-notification --logfile /var/log/ceilometer/agent-notification.log > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-panko/*} > permissions: > - {owner: 'root:ceilometer', path: /etc/panko, recurse: true} > /var/lib/kolla/config_files/cinder_api.json: > command: /usr/sbin/httpd -DFOREGROUND > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'cinder:cinder', path: /var/log/cinder, recurse: true} > /var/lib/kolla/config_files/cinder_api_cron.json: > command: /usr/sbin/crond -n > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'cinder:cinder', path: /var/log/cinder, recurse: true} > /var/lib/kolla/config_files/cinder_scheduler.json: > command: /usr/bin/cinder-scheduler --config-file /usr/share/cinder/cinder-dist.conf > --config-file /etc/cinder/cinder.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'cinder:cinder', path: /var/log/cinder, recurse: true} > /var/lib/kolla/config_files/cinder_volume.json: > command: /usr/bin/cinder-volume --config-file /usr/share/cinder/cinder-dist.conf > --config-file /etc/cinder/cinder.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > - {dest: /etc/ceph/, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-ceph/} > - {dest: /etc/iscsi/, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-iscsid/*} > permissions: > - {owner: 'cinder:cinder', path: /var/log/cinder, recurse: true} > /var/lib/kolla/config_files/clustercheck.json: > command: /usr/sbin/xinetd -dontfork > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/glance_api.json: > command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf > --config-file /etc/glance/glance-api.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > - {dest: /etc/ceph/, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-ceph/} > permissions: > - {owner: 'glance:glance', path: /var/lib/glance, recurse: true} > - {owner: 'glance:glance', path: /etc/ceph/ceph.client.openstack.keyring, > perm: '0600'} > /var/lib/kolla/config_files/glance_api_tls_proxy.json: > command: /usr/sbin/httpd -DFOREGROUND > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/gnocchi_api.json: > command: /usr/sbin/httpd -DFOREGROUND > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > - {dest: /etc/ceph/, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-ceph/} > permissions: > - {owner: 'gnocchi:gnocchi', path: /var/log/gnocchi, recurse: true} > - {owner: 'gnocchi:gnocchi', path: /etc/ceph/ceph.client.openstack.keyring, > perm: '0600'} > /var/lib/kolla/config_files/gnocchi_db_sync.json: > command: /usr/bin/bootstrap_host_exec gnocchi_api /usr/bin/gnocchi-upgrade > --sacks-number=128 > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > - {dest: /etc/ceph/, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-ceph/} > permissions: > - {owner: 'gnocchi:gnocchi', path: /var/log/gnocchi, recurse: true} > - {owner: 'gnocchi:gnocchi', path: /etc/ceph/ceph.client.openstack.keyring, > perm: '0600'} > /var/lib/kolla/config_files/gnocchi_metricd.json: > command: /usr/bin/gnocchi-metricd > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > - {dest: /etc/ceph/, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-ceph/} > permissions: > - {owner: 'gnocchi:gnocchi', path: /var/log/gnocchi, recurse: true} > - {owner: 'gnocchi:gnocchi', path: /etc/ceph/ceph.client.openstack.keyring, > perm: '0600'} > /var/lib/kolla/config_files/gnocchi_statsd.json: > command: /usr/bin/gnocchi-statsd > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > - {dest: /etc/ceph/, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-ceph/} > permissions: > - {owner: 'gnocchi:gnocchi', path: /var/log/gnocchi, recurse: true} > - {owner: 'gnocchi:gnocchi', path: /etc/ceph/ceph.client.openstack.keyring, > perm: '0600'} > /var/lib/kolla/config_files/haproxy.json: > command: /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg > config_files: > - {dest: /, merge: true, optional: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > - {dest: /, merge: true, optional: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-tls/*} > permissions: > - {owner: 'haproxy:haproxy', path: /var/lib/haproxy, recurse: true} > - {optional: true, owner: 'haproxy:haproxy', path: /etc/pki/tls/certs/haproxy/*, > perm: '0600'} > - {optional: true, owner: 'haproxy:haproxy', path: /etc/pki/tls/private/haproxy/*, > perm: '0600'} > /var/lib/kolla/config_files/heat_api.json: > command: /usr/sbin/httpd -DFOREGROUND > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'heat:heat', path: /var/log/heat, recurse: true} > /var/lib/kolla/config_files/heat_api_cfn.json: > command: /usr/sbin/httpd -DFOREGROUND > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'heat:heat', path: /var/log/heat, recurse: true} > /var/lib/kolla/config_files/heat_api_cron.json: > command: /usr/sbin/crond -n > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'heat:heat', path: /var/log/heat, recurse: true} > /var/lib/kolla/config_files/heat_engine.json: > command: '/usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf > --config-file /etc/heat/heat.conf ' > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'heat:heat', path: /var/log/heat, recurse: true} > /var/lib/kolla/config_files/horizon.json: > command: /usr/sbin/httpd -DFOREGROUND > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'apache:apache', path: /var/log/horizon/, recurse: true} > - {owner: 'apache:apache', path: /etc/openstack-dashboard/, recurse: true} > - {owner: 'apache:apache', path: /usr/share/openstack-dashboard/openstack_dashboard/local/, > recurse: false} > - {owner: 'apache:apache', path: /usr/share/openstack-dashboard/openstack_dashboard/local/local_settings.d/, > recurse: false} > /var/lib/kolla/config_files/iscsid.json: > command: /usr/sbin/iscsid -f > config_files: > - {dest: /etc/iscsi/, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-iscsid/*} > /var/lib/kolla/config_files/keystone.json: > command: /usr/sbin/httpd -DFOREGROUND > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/keystone_cron.json: > command: /usr/sbin/crond -n > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'keystone:keystone', path: /var/log/keystone, recurse: true} > /var/lib/kolla/config_files/logrotate-crond.json: > command: /usr/sbin/crond -s -n > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/mysql.json: > command: /usr/sbin/pacemaker_remoted > config_files: > - {dest: /etc/libqb/force-filesystem-sockets, owner: root, perm: '0644', source: /dev/null} > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > - {dest: /, merge: true, optional: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-tls/*} > permissions: > - {owner: 'mysql:mysql', path: /var/log/mysql, recurse: true} > - {optional: true, owner: 'mysql:mysql', path: /etc/pki/tls/certs/mysql.crt, > perm: '0600'} > - {optional: true, owner: 'mysql:mysql', path: /etc/pki/tls/private/mysql.key, > perm: '0600'} > /var/lib/kolla/config_files/neutron_api.json: > command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf > --config-dir /usr/share/neutron/server --config-file /etc/neutron/neutron.conf > --config-file /etc/neutron/plugin.ini --config-dir /etc/neutron/conf.d/common > --config-dir /etc/neutron/conf.d/neutron-server --log-file=/var/log/neutron/server.log > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'neutron:neutron', path: /var/log/neutron, recurse: true} > /var/lib/kolla/config_files/neutron_dhcp.json: > command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf > --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini > --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-dhcp-agent > --log-file=/var/log/neutron/dhcp-agent.log > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'neutron:neutron', path: /var/log/neutron, recurse: true} > - {owner: 'neutron:neutron', path: /var/lib/neutron, recurse: true} > - {owner: 'neutron:neutron', path: /etc/pki/tls/certs/neutron.crt} > - {owner: 'neutron:neutron', path: /etc/pki/tls/private/neutron.key} > /var/lib/kolla/config_files/neutron_l3_agent.json: > command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf > --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf > --config-file /etc/neutron/l3_agent.ini --config-dir /etc/neutron/conf.d/common > --config-dir /etc/neutron/conf.d/neutron-l3-agent --log-file=/var/log/neutron/l3-agent.log > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'neutron:neutron', path: /var/log/neutron, recurse: true} > - {owner: 'neutron:neutron', path: /var/lib/neutron, recurse: true} > /var/lib/kolla/config_files/neutron_metadata_agent.json: > command: /usr/bin/neutron-metadata-agent --config-file /usr/share/neutron/neutron-dist.conf > --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/metadata_agent.ini > --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-metadata-agent > --log-file=/var/log/neutron/metadata-agent.log > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'neutron:neutron', path: /var/log/neutron, recurse: true} > - {owner: 'neutron:neutron', path: /var/lib/neutron, recurse: true} > /var/lib/kolla/config_files/neutron_ovs_agent.json: > command: /neutron_ovs_agent_launcher.sh > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'neutron:neutron', path: /var/log/neutron, recurse: true} > /var/lib/kolla/config_files/neutron_server_tls_proxy.json: > command: /usr/sbin/httpd -DFOREGROUND > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/nova_api.json: > command: /usr/sbin/httpd -DFOREGROUND > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'nova:nova', path: /var/log/nova, recurse: true} > /var/lib/kolla/config_files/nova_api_cron.json: > command: /usr/sbin/crond -n > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'nova:nova', path: /var/log/nova, recurse: true} > /var/lib/kolla/config_files/nova_conductor.json: > command: '/usr/bin/nova-conductor ' > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'nova:nova', path: /var/log/nova, recurse: true} > /var/lib/kolla/config_files/nova_consoleauth.json: > command: '/usr/bin/nova-consoleauth ' > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'nova:nova', path: /var/log/nova, recurse: true} > /var/lib/kolla/config_files/nova_metadata.json: > command: '/usr/bin/nova-api-metadata ' > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'nova:nova', path: /var/log/nova, recurse: true} > /var/lib/kolla/config_files/nova_placement.json: > command: /usr/sbin/httpd -DFOREGROUND > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'nova:nova', path: /var/log/nova, recurse: true} > /var/lib/kolla/config_files/nova_scheduler.json: > command: '/usr/bin/nova-scheduler ' > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'nova:nova', path: /var/log/nova, recurse: true} > /var/lib/kolla/config_files/nova_vnc_proxy.json: > command: '/usr/bin/nova-novncproxy --web /usr/share/novnc/ ' > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'nova:nova', path: /var/log/nova, recurse: true} > /var/lib/kolla/config_files/panko_api.json: > command: /usr/sbin/httpd -DFOREGROUND > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'panko:panko', path: /var/log/panko, recurse: true} > /var/lib/kolla/config_files/rabbitmq.json: > command: /usr/sbin/pacemaker_remoted > config_files: > - {dest: /etc/libqb/force-filesystem-sockets, owner: root, perm: '0644', source: /dev/null} > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > - {dest: /, merge: true, optional: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-tls/*} > permissions: > - {owner: 'rabbitmq:rabbitmq', path: /var/lib/rabbitmq, recurse: true} > - {owner: 'rabbitmq:rabbitmq', path: /var/log/rabbitmq, recurse: true} > - {optional: true, owner: 'rabbitmq:rabbitmq', path: /etc/pki/tls/certs/rabbitmq.crt, > perm: '0600'} > - {optional: true, owner: 'rabbitmq:rabbitmq', path: /etc/pki/tls/private/rabbitmq.key, > perm: '0600'} > /var/lib/kolla/config_files/redis.json: > command: /usr/sbin/pacemaker_remoted > config_files: > - {dest: /etc/libqb/force-filesystem-sockets, owner: root, perm: '0644', source: /dev/null} > - {dest: /, merge: true, optional: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > - {dest: /, merge: true, optional: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-tls/*} > permissions: > - {owner: 'redis:redis', path: /var/run/redis, recurse: true} > - {owner: 'redis:redis', path: /var/lib/redis, recurse: true} > - {owner: 'redis:redis', path: /var/log/redis, recurse: true} > - {optional: true, owner: 'redis:redis', path: /etc/pki/tls/certs/redis.crt, > perm: '0600'} > - {optional: true, owner: 'redis:redis', path: /etc/pki/tls/private/redis.key, > perm: '0600'} > /var/lib/kolla/config_files/redis_tls_proxy.json: > command: stunnel /etc/stunnel/stunnel.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_account_auditor.json: > command: /usr/bin/swift-account-auditor /etc/swift/account-server.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_account_reaper.json: > command: /usr/bin/swift-account-reaper /etc/swift/account-server.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_account_replicator.json: > command: /usr/bin/swift-account-replicator /etc/swift/account-server.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_account_server.json: > command: /usr/bin/swift-account-server /etc/swift/account-server.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_container_auditor.json: > command: /usr/bin/swift-container-auditor /etc/swift/container-server.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_container_replicator.json: > command: /usr/bin/swift-container-replicator /etc/swift/container-server.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_container_server.json: > command: /usr/bin/swift-container-server /etc/swift/container-server.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_container_updater.json: > command: /usr/bin/swift-container-updater /etc/swift/container-server.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_object_auditor.json: > command: /usr/bin/swift-object-auditor /etc/swift/object-server.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_object_expirer.json: > command: /usr/bin/swift-object-expirer /etc/swift/object-expirer.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_object_replicator.json: > command: /usr/bin/swift-object-replicator /etc/swift/object-server.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_object_server.json: > command: /usr/bin/swift-object-server /etc/swift/object-server.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'swift:swift', path: /var/cache/swift, recurse: true} > /var/lib/kolla/config_files/swift_object_updater.json: > command: /usr/bin/swift-object-updater /etc/swift/object-server.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_proxy.json: > command: /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_proxy_tls_proxy.json: > command: /usr/sbin/httpd -DFOREGROUND > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/swift_rsync.json: > command: /usr/bin/rsync --daemon --no-detach --config=/etc/rsyncd.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > role_data_logging_groups: [root] > role_data_logging_sources: [] > role_data_merged_config_settings: > aodh::api::enable_proxy_headers_parsing: true > aodh::api::gnocchi_external_project_owner: service > aodh::api::host: '%{hiera(''fqdn_internal_api'')}' > aodh::api::service_name: httpd > aodh::auth::auth_password: rBQBTbGmpqtac9ca4TZfZyvNh > aodh::auth::auth_region: regionOne > aodh::auth::auth_tenant_name: service > aodh::auth::auth_url: http://10.0.0.12:5000 > aodh::db::database_connection: mysql+pymysql://aodh:rBQBTbGmpqtac9ca4TZfZyvNh@10.0.0.12/aodh?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > aodh::db::mysql::allowed_hosts: ['%', '%{hiera(''mysql_bind_host'')}'] > aodh::db::mysql::dbname: aodh > aodh::db::mysql::host: 10.0.0.12 > aodh::db::mysql::password: rBQBTbGmpqtac9ca4TZfZyvNh > aodh::db::mysql::user: aodh > aodh::debug: false > aodh::keystone::auth::admin_url: http://10.0.0.12:8042 > aodh::keystone::auth::internal_url: http://10.0.0.12:8042 > aodh::keystone::auth::password: rBQBTbGmpqtac9ca4TZfZyvNh > aodh::keystone::auth::public_url: http://172.16.18.34:8042 > aodh::keystone::auth::region: regionOne > aodh::keystone::auth::tenant: service > aodh::keystone::authtoken::auth_uri: http://10.0.0.12:5000 > aodh::keystone::authtoken::auth_url: http://10.0.0.12:5000 > aodh::keystone::authtoken::password: rBQBTbGmpqtac9ca4TZfZyvNh > aodh::keystone::authtoken::project_domain_name: Default > aodh::keystone::authtoken::project_name: service > aodh::keystone::authtoken::user_domain_name: Default > aodh::notification_driver: messagingv2 > aodh::policy::policies: {} > aodh::rabbit_password: ZsVg3E86aZ7ttpW8A4zdHVNkh > aodh::rabbit_port: 5672 > aodh::rabbit_use_ssl: 'False' > aodh::rabbit_userid: guest > aodh::wsgi::apache::bind_host: internal_api > aodh::wsgi::apache::servername: '%{hiera(''fqdn_internal_api'')}' > aodh::wsgi::apache::ssl: false > aodh::wsgi::apache::wsgi_process_display_name: aodh_wsgi > aodh_redis_password: qf9AHug3MR3sEvRDGxGNqbQuV > apache::default_vhost: false > apache::ip: internal_api > apache::mod::prefork::maxclients: 256 > apache::mod::prefork::serverlimit: 256 > apache::mod::remoteip::proxy_ips: ['%{hiera(''apache_remote_proxy_ips_network'')}'] > apache::server_signature: 'Off' > apache::server_tokens: Prod > apache_remote_proxy_ips_network: internal_api_subnet > ceilometer::agent::auth::auth_endpoint_type: internalURL > ceilometer::agent::auth::auth_password: C2xNfx9NxPBNTqpYrGMfYrUnn > ceilometer::agent::auth::auth_project_domain_name: Default > ceilometer::agent::auth::auth_region: regionOne > ceilometer::agent::auth::auth_tenant_name: service > ceilometer::agent::auth::auth_url: http://10.0.0.12:5000 > ceilometer::agent::auth::auth_user_domain_name: Default > ceilometer::agent::notification::event_pipeline_publishers: ['gnocchi://', 'panko://'] > ceilometer::agent::notification::manage_event_pipeline: true > ceilometer::agent::notification::manage_pipeline: false > ceilometer::agent::notification::pipeline_publishers: ['gnocchi://'] > ceilometer::agent::polling::manage_polling: false > ceilometer::db::mysql::allowed_hosts: ['%', '%{hiera(''mysql_bind_host'')}'] > ceilometer::db::mysql::dbname: ceilometer > ceilometer::db::mysql::host: 10.0.0.12 > ceilometer::db::mysql::password: C2xNfx9NxPBNTqpYrGMfYrUnn > ceilometer::db::mysql::user: ceilometer > ceilometer::debug: false > ceilometer::dispatcher::gnocchi::archive_policy: low > ceilometer::dispatcher::gnocchi::filter_project: service > ceilometer::dispatcher::gnocchi::resources_definition_file: gnocchi_resources.yaml > ceilometer::dispatcher::gnocchi::url: http://10.0.0.12:8041 > ceilometer::host: '%{::fqdn}' > ceilometer::keystone::auth::admin_url: http://10.0.0.12:8777 > ceilometer::keystone::auth::configure_endpoint: false > ceilometer::keystone::auth::internal_url: http://10.0.0.12:8777 > ceilometer::keystone::auth::password: C2xNfx9NxPBNTqpYrGMfYrUnn > ceilometer::keystone::auth::public_url: http://172.16.18.34:8777 > ceilometer::keystone::auth::region: regionOne > ceilometer::keystone::auth::tenant: service > ceilometer::keystone::authtoken::auth_uri: http://10.0.0.12:5000 > ceilometer::keystone::authtoken::auth_url: http://10.0.0.12:5000 > ceilometer::keystone::authtoken::password: C2xNfx9NxPBNTqpYrGMfYrUnn > ceilometer::keystone::authtoken::project_domain_name: Default > ceilometer::keystone::authtoken::project_name: service > ceilometer::keystone::authtoken::user_domain_name: Default > ceilometer::notification_driver: messagingv2 > ceilometer::rabbit_heartbeat_timeout_threshold: 60 > ceilometer::rabbit_password: ZsVg3E86aZ7ttpW8A4zdHVNkh > ceilometer::rabbit_port: 5672 > ceilometer::rabbit_use_ssl: 'False' > ceilometer::rabbit_userid: guest > ceilometer::snmpd_readonly_user_password: 034690b9f8c681c1a3a9d9cbe1f71477b30945e2 > ceilometer::snmpd_readonly_username: ro_snmp_user > ceilometer::telemetry_secret: 9MmHuzBPfvYK7YXbNWqDdDpzz > ceilometer_auth_enabled: true > ceilometer_redis_password: qf9AHug3MR3sEvRDGxGNqbQuV > central_namespace: true > cinder::api::bind_host: '%{hiera(''fqdn_internal_api'')}' > cinder::api::enable_proxy_headers_parsing: true > cinder::api::nova_catalog_admin_info: compute:nova:adminURL > cinder::api::nova_catalog_info: compute:nova:internalURL > cinder::api::service_name: httpd > cinder::backend_host: hostgroup > cinder::ceilometer::notification_driver: messagingv2 > cinder::config: > DEFAULT/swift_catalog_info: {value: 'object-store:swift:internalURL'} > cinder::cron::db_purge::age: '30' > cinder::cron::db_purge::destination: /var/log/cinder/cinder-rowsflush.log > cinder::cron::db_purge::hour: '0' > cinder::cron::db_purge::minute: '1' > cinder::cron::db_purge::month: '*' > cinder::cron::db_purge::monthday: '*' > cinder::cron::db_purge::user: cinder > cinder::cron::db_purge::weekday: '*' > cinder::database_connection: mysql+pymysql://cinder:R2FFnCq82Ut24UVBP4arPq2RN@10.0.0.12/cinder?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > cinder::db::database_db_max_retries: -1 > cinder::db::database_max_retries: -1 > cinder::db::mysql::allowed_hosts: ['%', '%{hiera(''mysql_bind_host'')}'] > cinder::db::mysql::dbname: cinder > cinder::db::mysql::host: 10.0.0.12 > cinder::db::mysql::password: R2FFnCq82Ut24UVBP4arPq2RN > cinder::db::mysql::user: cinder > cinder::debug: false > cinder::glance::glance_api_servers: http://10.0.0.12:9292 > cinder::keystone::auth::admin_url: http://10.0.0.12:8776/v1/%(tenant_id)s > cinder::keystone::auth::admin_url_v2: http://10.0.0.12:8776/v2/%(tenant_id)s > cinder::keystone::auth::admin_url_v3: http://10.0.0.12:8776/v3/%(tenant_id)s > cinder::keystone::auth::internal_url: http://10.0.0.12:8776/v1/%(tenant_id)s > cinder::keystone::auth::internal_url_v2: http://10.0.0.12:8776/v2/%(tenant_id)s > cinder::keystone::auth::internal_url_v3: http://10.0.0.12:8776/v3/%(tenant_id)s > cinder::keystone::auth::password: R2FFnCq82Ut24UVBP4arPq2RN > cinder::keystone::auth::public_url: http://172.16.18.34:8776/v1/%(tenant_id)s > cinder::keystone::auth::public_url_v2: http://172.16.18.34:8776/v2/%(tenant_id)s > cinder::keystone::auth::public_url_v3: http://172.16.18.34:8776/v3/%(tenant_id)s > cinder::keystone::auth::region: regionOne > cinder::keystone::auth::tenant: service > cinder::keystone::authtoken::auth_uri: http://10.0.0.12:5000 > cinder::keystone::authtoken::auth_url: http://10.0.0.12:5000 > cinder::keystone::authtoken::password: R2FFnCq82Ut24UVBP4arPq2RN > cinder::keystone::authtoken::project_domain_name: Default > cinder::keystone::authtoken::project_name: service > cinder::keystone::authtoken::user_domain_name: Default > cinder::policy::policies: {} > cinder::rabbit_heartbeat_timeout_threshold: 60 > cinder::rabbit_password: ZsVg3E86aZ7ttpW8A4zdHVNkh > cinder::rabbit_port: 5672 > cinder::rabbit_use_ssl: 'False' > cinder::rabbit_userid: guest > cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler > cinder::volume::enabled: false > cinder::volume::manage_service: false > cinder::wsgi::apache::bind_host: internal_api > cinder::wsgi::apache::servername: '%{hiera(''fqdn_internal_api'')}' > cinder::wsgi::apache::ssl: false > cinder::wsgi::apache::workers: '%{::os_workers}' > corosync_ipv6: false > corosync_token_timeout: 10000 > enable_fencing: false > enable_galera: true > enable_load_balancer: true > enable_panko_expirer: true > glance::api::authtoken::auth_uri: http://10.0.0.12:5000 > glance::api::authtoken::auth_url: http://10.0.0.12:5000 > glance::api::authtoken::password: Es7VyhA6bWnnDgux2jWmgxD9w > glance::api::authtoken::project_name: service > glance::api::bind_host: internal_api > glance::api::bind_port: '9292' > glance::api::database_connection: mysql+pymysql://glance:Es7VyhA6bWnnDgux2jWmgxD9w@10.0.0.12/glance?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > glance::api::debug: false > glance::api::enable_proxy_headers_parsing: true > glance::api::enable_v1_api: false > glance::api::enable_v2_api: true > glance::api::enabled_import_methods: [web-download] > glance::api::image_member_quota: 128 > glance::api::os_region_name: regionOne > glance::api::pipeline: keystone > glance::api::show_image_direct_url: true > glance::api::show_multiple_locations: false > glance::api::sync_db: false > glance::backend::rbd::rbd_store_ceph_conf: /etc/ceph/ceph.conf > glance::backend::rbd::rbd_store_pool: images > glance::backend::rbd::rbd_store_user: openstack > glance::backend::swift::swift_store_auth_address: http://10.0.0.12:5000/v3 > glance::backend::swift::swift_store_auth_version: 3 > glance::backend::swift::swift_store_create_container_on_put: true > glance::backend::swift::swift_store_key: Es7VyhA6bWnnDgux2jWmgxD9w > glance::backend::swift::swift_store_user: service:glance > glance::db::mysql::allowed_hosts: ['%', '%{hiera(''mysql_bind_host'')}'] > glance::db::mysql::dbname: glance > glance::db::mysql::host: 10.0.0.12 > glance::db::mysql::password: Es7VyhA6bWnnDgux2jWmgxD9w > glance::db::mysql::user: glance > glance::keystone::auth::admin_url: http://10.0.0.12:9292 > glance::keystone::auth::internal_url: http://10.0.0.12:9292 > glance::keystone::auth::password: Es7VyhA6bWnnDgux2jWmgxD9w > glance::keystone::auth::public_url: http://172.16.18.34:9292 > glance::keystone::auth::region: regionOne > glance::keystone::auth::tenant: service > glance::keystone::authtoken::project_domain_name: Default > glance::keystone::authtoken::user_domain_name: Default > glance::notify::rabbitmq::notification_driver: messagingv2 > glance::notify::rabbitmq::rabbit_password: ZsVg3E86aZ7ttpW8A4zdHVNkh > glance::notify::rabbitmq::rabbit_port: 5672 > glance::notify::rabbitmq::rabbit_use_ssl: 'False' > glance::notify::rabbitmq::rabbit_userid: guest > glance::policy::policies: {} > glance_backend: swift > glance_log_file: '' > glance_notifier_strategy: noop > gnocchi::api::enable_proxy_headers_parsing: true > gnocchi::api::enabled: true > gnocchi::api::service_name: httpd > gnocchi::db::database_connection: mysql+pymysql://gnocchi:tzWjyxga94u4cwxsk4EwqKP26@10.0.0.12/gnocchi?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > gnocchi::db::mysql::allowed_hosts: ['%', '%{hiera(''mysql_bind_host'')}'] > gnocchi::db::mysql::dbname: gnocchi > gnocchi::db::mysql::host: 10.0.0.12 > gnocchi::db::mysql::password: tzWjyxga94u4cwxsk4EwqKP26 > gnocchi::db::mysql::user: gnocchi > gnocchi::db::sync::extra_opts: ' --sacks-number 128' > gnocchi::debug: false > gnocchi::keystone::auth::admin_url: http://10.0.0.12:8041 > gnocchi::keystone::auth::internal_url: http://10.0.0.12:8041 > gnocchi::keystone::auth::password: tzWjyxga94u4cwxsk4EwqKP26 > gnocchi::keystone::auth::public_url: http://172.16.18.34:8041 > gnocchi::keystone::auth::region: regionOne > gnocchi::keystone::auth::tenant: service > gnocchi::keystone::authtoken::auth_uri: http://10.0.0.12:5000 > gnocchi::keystone::authtoken::auth_url: http://10.0.0.12:5000 > gnocchi::keystone::authtoken::password: tzWjyxga94u4cwxsk4EwqKP26 > gnocchi::keystone::authtoken::project_domain_name: Default > gnocchi::keystone::authtoken::project_name: service > gnocchi::keystone::authtoken::user_domain_name: Default > gnocchi::metricd::metric_processing_delay: 30 > gnocchi::metricd::workers: '%{::os_workers}' > gnocchi::policy::policies: {} > gnocchi::statsd::archive_policy_name: low > gnocchi::statsd::flush_delay: 10 > gnocchi::statsd::project_id: 6c38cd8d-099a-4cb2-aecf-17be688e8616 > gnocchi::statsd::resource_id: 0a8b55df-f90f-491c-8cb9-7cdecec6fc26 > gnocchi::statsd::user_id: 27c0d3f8-e7ee-42f0-8317-72237d1c5ae3 > gnocchi::storage::ceph::ceph_conffile: /etc/ceph/ceph.conf > gnocchi::storage::ceph::ceph_keyring: /etc/ceph/ceph.client.openstack.keyring > gnocchi::storage::ceph::ceph_pool: metrics > gnocchi::storage::ceph::ceph_username: openstack > gnocchi::storage::s3::s3_access_key_id: '' > gnocchi::storage::s3::s3_endpoint_url: '' > gnocchi::storage::s3::s3_region_name: '' > gnocchi::storage::s3::s3_secret_access_key: '' > gnocchi::storage::swift::swift_auth_version: 3 > gnocchi::storage::swift::swift_authurl: http://10.0.0.12:5000/v3 > gnocchi::storage::swift::swift_endpoint_type: internalURL > gnocchi::storage::swift::swift_key: tzWjyxga94u4cwxsk4EwqKP26 > gnocchi::storage::swift::swift_user: service:gnocchi > gnocchi::wsgi::apache::bind_host: internal_api > gnocchi::wsgi::apache::servername: '%{hiera(''fqdn_internal_api'')}' > gnocchi::wsgi::apache::ssl: false > gnocchi::wsgi::apache::wsgi_process_display_name: gnocchi_wsgi > gnocchi_redis_password: qf9AHug3MR3sEvRDGxGNqbQuV > hacluster_pwd: fk2Jcbpxd8Ux5lzB > haproxy_docker: true > heat::api::bind_host: internal_api > heat::api::service_name: httpd > heat::api_cfn::bind_host: internal_api > heat::api_cfn::service_name: httpd > heat::cron::purge_deleted::age: '30' > heat::cron::purge_deleted::age_type: days > heat::cron::purge_deleted::destination: /dev/null > heat::cron::purge_deleted::ensure: present > heat::cron::purge_deleted::hour: '0' > heat::cron::purge_deleted::maxdelay: '3600' > heat::cron::purge_deleted::minute: '1' > heat::cron::purge_deleted::month: '*' > heat::cron::purge_deleted::monthday: '*' > heat::cron::purge_deleted::user: heat > heat::cron::purge_deleted::weekday: '*' > heat::database_connection: mysql+pymysql://heat:tvckZYmCDPWP6ATTnPJA9s4Ar@10.0.0.12/heat?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > heat::db::database_db_max_retries: -1 > heat::db::database_max_retries: -1 > heat::db::mysql::allowed_hosts: ['%', '%{hiera(''mysql_bind_host'')}'] > heat::db::mysql::dbname: heat > heat::db::mysql::host: 10.0.0.12 > heat::db::mysql::password: tvckZYmCDPWP6ATTnPJA9s4Ar > heat::db::mysql::user: heat > heat::debug: false > heat::enable_proxy_headers_parsing: true > heat::engine::auth_encryption_key: 4mNfnq3fAGWnJQpo7LxjLXxEskDbkIat > heat::engine::configure_delegated_roles: false > heat::engine::convergence_engine: true > heat::engine::heat_metadata_server_url: http://172.16.18.34:8000 > heat::engine::heat_waitcondition_server_url: http://172.16.18.34:8000/v1/waitcondition > heat::engine::max_nested_stack_depth: 6 > heat::engine::max_resources_per_stack: 1000 > heat::engine::plugin_dirs: [] > heat::engine::trusts_delegated_roles: [] > heat::heat_keystone_clients_url: http://172.16.18.34:5000 > heat::keystone::auth::admin_url: http://10.0.0.12:8004/v1/%(tenant_id)s > heat::keystone::auth::internal_url: http://10.0.0.12:8004/v1/%(tenant_id)s > heat::keystone::auth::password: tvckZYmCDPWP6ATTnPJA9s4Ar > heat::keystone::auth::public_url: http://172.16.18.34:8004/v1/%(tenant_id)s > heat::keystone::auth::region: regionOne > heat::keystone::auth::tenant: service > heat::keystone::auth_cfn::admin_url: http://10.0.0.12:8000/v1 > heat::keystone::auth_cfn::internal_url: http://10.0.0.12:8000/v1 > heat::keystone::auth_cfn::password: tvckZYmCDPWP6ATTnPJA9s4Ar > heat::keystone::auth_cfn::public_url: http://172.16.18.34:8000/v1 > heat::keystone::auth_cfn::region: regionOne > heat::keystone::auth_cfn::tenant: service > heat::keystone::authtoken::auth_uri: http://10.0.0.12:5000 > heat::keystone::authtoken::auth_url: http://10.0.0.12:5000 > heat::keystone::authtoken::password: tvckZYmCDPWP6ATTnPJA9s4Ar > heat::keystone::authtoken::project_domain_name: Default > heat::keystone::authtoken::project_name: service > heat::keystone::authtoken::user_domain_name: Default > heat::keystone::domain::domain_admin: heat_stack_domain_admin > heat::keystone::domain::domain_admin_email: heat_stack_domain_admin@localhost > heat::keystone::domain::domain_name: heat_stack > heat::keystone::domain::domain_password: P6QpQP4qGrguWgjTWPt2U7eKd > heat::keystone_ec2_uri: http://10.0.0.12:5000/v3/ec2tokens > heat::max_json_body_size: 4194304 > heat::notification_driver: messagingv2 > heat::policy::policies: {} > heat::rabbit_heartbeat_timeout_threshold: 60 > heat::rabbit_password: ZsVg3E86aZ7ttpW8A4zdHVNkh > heat::rabbit_port: 5672 > heat::rabbit_use_ssl: 'False' > heat::rabbit_userid: guest > heat::rpc_response_timeout: 600 > heat::wsgi::apache_api::bind_host: internal_api > heat::wsgi::apache_api::servername: '%{hiera(''fqdn_internal_api'')}' > heat::wsgi::apache_api::ssl: false > heat::wsgi::apache_api_cfn::bind_host: internal_api > heat::wsgi::apache_api_cfn::servername: '%{hiera(''fqdn_internal_api'')}' > heat::wsgi::apache_api_cfn::ssl: false > heat::yaql_limit_iterators: 1000 > heat::yaql_memory_quota: 100000 > horizon::allowed_hosts: ['*'] > horizon::bind_address: internal_api > horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache > horizon::customization_module: '' > horizon::disable_password_reveal: true > horizon::disallow_iframe_embed: true > horizon::django_debug: false > horizon::django_session_engine: django.contrib.sessions.backends.cache > horizon::enable_secure_proxy_ssl_header: true > horizon::enforce_password_check: true > horizon::horizon_ca: /etc/ipa/ca.crt > horizon::keystone_url: http://10.0.0.12:5000 > horizon::listen_ssl: false > horizon::password_validator: '' > horizon::password_validator_help: '' > horizon::secret_key: AMBcqXgPQW > horizon::secure_cookies: false > horizon::servername: '%{hiera(''fqdn_internal_api'')}' > horizon::vhost_extra_params: > access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"' > add_listen: true > options: [FollowSymLinks, MultiViews] > priority: 10 > kernel_modules: > nf_conntrack: {} > nf_conntrack_proto_sctp: {} > keystone::admin_bind_host: '%{hiera(''fqdn_ctlplane'')}' > keystone::admin_password: aVP2a7sFnMyx7wW7XeAH9CKQ8 > keystone::admin_port: '35357' > keystone::admin_token: Nacq3WxQsphPQ6yvArubW2ExT > keystone::config::keystone_config: > ec2/driver: {value: keystone.contrib.ec2.backends.sql.Ec2} > keystone::credential_keys: > /etc/keystone/credential-keys/0: {content: eetghB0ibBbgeqWGvMuksk-XuXawltrNjSD2aw2FQis=} > /etc/keystone/credential-keys/1: {content: f1WPYXJEHM-7NVIgNUX-MlE2vy8WaLW6X3Il-_ng28Q=} > keystone::cron::token_flush::destination: /var/log/keystone/keystone-tokenflush.log > keystone::cron::token_flush::ensure: present > keystone::cron::token_flush::hour: ['*'] > keystone::cron::token_flush::maxdelay: 0 > keystone::cron::token_flush::minute: ['1'] > keystone::cron::token_flush::month: ['*'] > keystone::cron::token_flush::monthday: ['*'] > keystone::cron::token_flush::user: keystone > keystone::cron::token_flush::weekday: ['*'] > keystone::database_connection: mysql+pymysql://keystone:Nacq3WxQsphPQ6yvArubW2ExT@10.0.0.12/keystone?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > keystone::db::database_db_max_retries: -1 > keystone::db::database_max_retries: -1 > keystone::db::mysql::allowed_hosts: ['%', '%{hiera(''mysql_bind_host'')}'] > keystone::db::mysql::dbname: keystone > keystone::db::mysql::host: 10.0.0.12 > keystone::db::mysql::password: Nacq3WxQsphPQ6yvArubW2ExT > keystone::db::mysql::user: keystone > keystone::debug: false > keystone::enable_credential_setup: true > keystone::enable_fernet_setup: true > keystone::enable_proxy_headers_parsing: true > keystone::enable_ssl: false > keystone::endpoint::admin_url: http://192.168.0.19:35357 > keystone::endpoint::internal_url: http://10.0.0.12:5000 > keystone::endpoint::public_url: http://172.16.18.34:5000 > keystone::endpoint::region: regionOne > keystone::endpoint::version: '' > keystone::fernet_keys: > /etc/keystone/fernet-keys/0: {content: gUodMq82zl3ks22m0UFyTok88KEYREzSTSWMiE0L75Y=} > /etc/keystone/fernet-keys/1: {content: N9tQ20D1F7n5m3viAJhQ79wWaeY0ptLUXswQTSO7vIM=} > keystone::fernet_max_active_keys: 5 > keystone::fernet_replace_keys: true > keystone::notification_driver: messagingv2 > keystone::notification_format: basic > keystone::policy::policies: {} > keystone::public_bind_host: '%{hiera(''fqdn_internal_api'')}' > keystone::rabbit_heartbeat_timeout_threshold: 60 > keystone::rabbit_password: ZsVg3E86aZ7ttpW8A4zdHVNkh > keystone::rabbit_port: 5672 > keystone::rabbit_use_ssl: 'False' > keystone::rabbit_userid: guest > keystone::roles::admin::admin_tenant: admin > keystone::roles::admin::email: admin@example.com > keystone::roles::admin::password: aVP2a7sFnMyx7wW7XeAH9CKQ8 > keystone::roles::admin::service_tenant: service > keystone::service_name: httpd > keystone::token_provider: fernet > keystone::wsgi::apache::admin_bind_host: ctlplane > keystone::wsgi::apache::admin_port: '35357' > keystone::wsgi::apache::bind_host: internal_api > keystone::wsgi::apache::servername: '%{hiera(''fqdn_internal_api'')}' > keystone::wsgi::apache::servername_admin: '%{hiera(''fqdn_ctlplane'')}' > keystone::wsgi::apache::ssl: false > keystone::wsgi::apache::threads: 1 > keystone::wsgi::apache::workers: '%{::os_workers}' > keystone_enable_db_purge: true > keystone_enable_member: true > keystone_ssl_certificate: '' > keystone_ssl_certificate_key: '' > memcached::listen_ip: internal_api > memcached::max_memory: 50% > memcached::udp_port: 0 > memcached::verbosity: v > memcached_ipv6: false > memcached_network: internal_api_subnet > mysql::server::manage_config_file: true > mysql::server::package_name: mariadb-galera-server > mysql::server::root_password: rVbzhalk5t > mysql_bind_host: internal_api > mysql_clustercheck_password: EFTVYffcauTEVP9KMzDYeabXt > mysql_ipv6: false > mysql_max_connections: 4096 > neutron::agents::dhcp::debug: false > neutron::agents::dhcp::dnsmasq_dns_servers: [] > neutron::agents::dhcp::enable_force_metadata: false > neutron::agents::dhcp::enable_isolated_metadata: false > neutron::agents::dhcp::enable_metadata_network: false > neutron::agents::dhcp::interface_driver: neutron.agent.linux.interface.OVSInterfaceDriver > neutron::agents::l3::agent_mode: legacy > neutron::agents::l3::debug: false > neutron::agents::l3::external_network_bridge: '' > neutron::agents::metadata::auth_password: 8TNuQY4exAwRpGBdxAEh4R6zR > neutron::agents::metadata::auth_tenant: service > neutron::agents::metadata::auth_url: http://10.0.0.12:5000 > neutron::agents::metadata::debug: false > neutron::agents::metadata::metadata_host: '%{hiera(''cloud_name_internal_api'')}' > neutron::agents::metadata::metadata_ip: '%{hiera(''nova_metadata_vip'')}' > neutron::agents::metadata::metadata_protocol: http > neutron::agents::metadata::shared_secret: paAR3hw2Xy3NGsejNTpPrfJnZ > neutron::agents::ml2::ovs::arp_responder: false > neutron::agents::ml2::ovs::bridge_mappings: ['datacentre:br-ex'] > neutron::agents::ml2::ovs::enable_distributed_routing: false > neutron::agents::ml2::ovs::extensions: [qos] > neutron::agents::ml2::ovs::l2_population: 'False' > neutron::agents::ml2::ovs::local_ip: tenant > neutron::agents::ml2::ovs::tunnel_types: [vxlan] > neutron::allow_overlapping_ips: true > neutron::bind_host: internal_api > neutron::core_plugin: ml2 > neutron::db::database_db_max_retries: -1 > neutron::db::database_max_retries: -1 > neutron::db::mysql::allowed_hosts: ['%', '%{hiera(''mysql_bind_host'')}'] > neutron::db::mysql::dbname: ovs_neutron > neutron::db::mysql::host: 10.0.0.12 > neutron::db::mysql::password: 8TNuQY4exAwRpGBdxAEh4R6zR > neutron::db::mysql::user: neutron > neutron::db::sync::db_sync_timeout: 300 > neutron::db::sync::extra_params: '' > neutron::debug: false > neutron::dhcp_agent_notification: true > neutron::dns_domain: openstacklocal > neutron::global_physnet_mtu: 1500 > neutron::host: '%{::fqdn}' > neutron::keystone::auth::admin_url: http://10.0.0.12:9696 > neutron::keystone::auth::internal_url: http://10.0.0.12:9696 > neutron::keystone::auth::password: 8TNuQY4exAwRpGBdxAEh4R6zR > neutron::keystone::auth::public_url: http://172.16.18.34:9696 > neutron::keystone::auth::region: regionOne > neutron::keystone::auth::tenant: service > neutron::keystone::authtoken::auth_uri: http://10.0.0.12:5000 > neutron::keystone::authtoken::auth_url: http://10.0.0.12:5000 > neutron::keystone::authtoken::password: 8TNuQY4exAwRpGBdxAEh4R6zR > neutron::keystone::authtoken::project_domain_name: Default > neutron::keystone::authtoken::project_name: service > neutron::keystone::authtoken::user_domain_name: Default > neutron::notification_driver: messagingv2 > neutron::plugins::ml2::extension_drivers: [qos, port_security] > neutron::plugins::ml2::firewall_driver: iptables_hybrid > neutron::plugins::ml2::flat_networks: [datacentre] > neutron::plugins::ml2::mechanism_drivers: [openvswitch] > neutron::plugins::ml2::network_vlan_ranges: ['datacentre:1:1000'] > neutron::plugins::ml2::overlay_ip_version: 4 > neutron::plugins::ml2::tenant_network_types: [vxlan] > neutron::plugins::ml2::tunnel_id_ranges: ['1:4094'] > neutron::plugins::ml2::type_drivers: [vxlan, vlan, flat, gre] > neutron::plugins::ml2::vni_ranges: ['1:4094'] > neutron::policy::policies: {} > neutron::purge_config: false > neutron::rabbit_heartbeat_timeout_threshold: 60 > neutron::rabbit_password: ZsVg3E86aZ7ttpW8A4zdHVNkh > neutron::rabbit_port: 5672 > neutron::rabbit_use_ssl: 'False' > neutron::rabbit_user: guest > neutron::server::allow_automatic_l3agent_failover: 'True' > neutron::server::database_connection: mysql+pymysql://neutron:8TNuQY4exAwRpGBdxAEh4R6zR@10.0.0.12/ovs_neutron?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > neutron::server::enable_dvr: false > neutron::server::enable_proxy_headers_parsing: true > neutron::server::notifications::auth_url: http://10.0.0.12:5000 > neutron::server::notifications::endpoint_type: internal > neutron::server::notifications::password: 8MMttgFpZ4fKUe99GWMpEwNqC > neutron::server::notifications::project_name: service > neutron::server::notifications::tenant_name: service > neutron::server::router_distributed: false > neutron::server::sync_db: true > neutron::service_plugins: [router, qos, trunk] > nova::api::api_bind_address: '%{hiera(''fqdn_internal_api'')}' > nova::api::default_floating_pool: public > nova::api::enable_proxy_headers_parsing: true > nova::api::enabled: true > nova::api::instance_name_template: instance-%08x > nova::api::metadata_listen: internal_api > nova::api::neutron_metadata_proxy_shared_secret: paAR3hw2Xy3NGsejNTpPrfJnZ > nova::api::service_name: httpd > nova::api::sync_db_api: true > nova::api_database_connection: mysql+pymysql://nova_api:8MMttgFpZ4fKUe99GWMpEwNqC@10.0.0.12/nova_api?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > nova::cell0_database_connection: mysql+pymysql://nova:8MMttgFpZ4fKUe99GWMpEwNqC@10.0.0.12/nova_cell0?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > nova::cinder_catalog_info: volumev3:cinderv3:internalURL > nova::cron::archive_deleted_rows::destination: /var/log/nova/nova-rowsflush.log > nova::cron::archive_deleted_rows::hour: '0' > nova::cron::archive_deleted_rows::max_rows: '100' > nova::cron::archive_deleted_rows::minute: '1' > nova::cron::archive_deleted_rows::month: '*' > nova::cron::archive_deleted_rows::monthday: '*' > nova::cron::archive_deleted_rows::until_complete: false > nova::cron::archive_deleted_rows::user: nova > nova::cron::archive_deleted_rows::weekday: '*' > nova::database_connection: mysql+pymysql://nova:8MMttgFpZ4fKUe99GWMpEwNqC@10.0.0.12/nova?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > nova::db::database_db_max_retries: -1 > nova::db::database_max_retries: -1 > nova::db::mysql::allowed_hosts: ['%', '%{hiera(''mysql_bind_host'')}'] > nova::db::mysql::dbname: nova > nova::db::mysql::host: 10.0.0.12 > nova::db::mysql::password: 8MMttgFpZ4fKUe99GWMpEwNqC > nova::db::mysql::user: nova > nova::db::mysql_api::allowed_hosts: ['%', '%{hiera(''mysql_bind_host'')}'] > nova::db::mysql_api::dbname: nova_api > nova::db::mysql_api::host: 10.0.0.12 > nova::db::mysql_api::password: 8MMttgFpZ4fKUe99GWMpEwNqC > nova::db::mysql_api::setup_cell0: true > nova::db::mysql_api::user: nova_api > nova::db::mysql_placement::allowed_hosts: ['%', '%{hiera(''mysql_bind_host'')}'] > nova::db::mysql_placement::dbname: nova_placement > nova::db::mysql_placement::host: 10.0.0.12 > nova::db::mysql_placement::password: 8MMttgFpZ4fKUe99GWMpEwNqC > nova::db::mysql_placement::user: nova_placement > nova::db::sync::db_sync_timeout: 300 > nova::db::sync_api::db_sync_timeout: 300 > nova::debug: false > nova::glance_api_servers: http://10.0.0.12:9292 > nova::host: '%{::fqdn}' > nova::keystone::auth::admin_url: http://10.0.0.12:8774/v2.1 > nova::keystone::auth::internal_url: http://10.0.0.12:8774/v2.1 > nova::keystone::auth::password: 8MMttgFpZ4fKUe99GWMpEwNqC > nova::keystone::auth::public_url: http://172.16.18.34:8774/v2.1 > nova::keystone::auth::region: regionOne > nova::keystone::auth::tenant: service > nova::keystone::auth_placement::admin_url: http://10.0.0.12:8778/placement > nova::keystone::auth_placement::internal_url: http://10.0.0.12:8778/placement > nova::keystone::auth_placement::password: 8MMttgFpZ4fKUe99GWMpEwNqC > nova::keystone::auth_placement::public_url: http://172.16.18.34:8778/placement > nova::keystone::auth_placement::region: regionOne > nova::keystone::auth_placement::tenant: service > nova::keystone::authtoken::auth_uri: http://10.0.0.12:5000 > nova::keystone::authtoken::auth_url: http://192.168.0.19:35357 > nova::keystone::authtoken::password: 8MMttgFpZ4fKUe99GWMpEwNqC > nova::keystone::authtoken::project_domain_name: Default > nova::keystone::authtoken::project_name: service > nova::keystone::authtoken::user_domain_name: Default > nova::my_ip: internal_api > nova::network::neutron::dhcp_domain: '' > nova::network::neutron::neutron_auth_type: v3password > nova::network::neutron::neutron_auth_url: http://192.168.0.19:35357/v3 > nova::network::neutron::neutron_ovs_bridge: br-int > nova::network::neutron::neutron_password: 8TNuQY4exAwRpGBdxAEh4R6zR > nova::network::neutron::neutron_project_name: service > nova::network::neutron::neutron_region_name: regionOne > nova::network::neutron::neutron_url: http://10.0.0.12:9696 > nova::network::neutron::neutron_username: neutron > nova::notification_driver: messagingv2 > nova::notification_format: unversioned > nova::notify_on_state_change: vm_and_task_state > nova::placement::auth_url: http://10.0.0.12:5000 > nova::placement::os_interface: internal > nova::placement::os_region_name: regionOne > nova::placement::password: 8MMttgFpZ4fKUe99GWMpEwNqC > nova::placement::project_name: service > nova::placement_database_connection: mysql+pymysql://nova_placement:8MMttgFpZ4fKUe99GWMpEwNqC@10.0.0.12/nova_placement?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > nova::policy::policies: {} > nova::purge_config: false > nova::rabbit_heartbeat_timeout_threshold: 60 > nova::rabbit_password: ZsVg3E86aZ7ttpW8A4zdHVNkh > nova::rabbit_port: 5672 > nova::rabbit_use_ssl: 'False' > nova::rabbit_userid: guest > nova::ram_allocation_ratio: '1.0' > nova::scheduler::discover_hosts_in_cells_interval: -1 > nova::scheduler::filter::scheduler_available_filters: [] > nova::scheduler::filter::scheduler_default_filters: [] > nova::scheduler::filter::scheduler_max_attempts: 3 > nova::use_ipv6: false > nova::vncproxy::common::vncproxy_host: 172.16.18.34 > nova::vncproxy::common::vncproxy_port: '6080' > nova::vncproxy::common::vncproxy_protocol: http > nova::vncproxy::enabled: true > nova::vncproxy::host: internal_api > nova::wsgi::apache_api::bind_host: internal_api > nova::wsgi::apache_api::servername: '%{hiera(''fqdn_internal_api'')}' > nova::wsgi::apache_api::ssl: false > nova::wsgi::apache_placement::api_port: '8778' > nova::wsgi::apache_placement::bind_host: internal_api > nova::wsgi::apache_placement::servername: '%{hiera(''fqdn_internal_api'')}' > nova::wsgi::apache_placement::ssl: false > nova_enable_db_purge: true > nova_wsgi_enabled: true > ntp::iburst_enable: true > 'ntp::maxpoll:': 10 > 'ntp::minpoll:': 6 > ntp::servers: [clock.redhat.com, clock.redhat.com] > pacemaker::corosync::cluster_name: tripleo_cluster > pacemaker::corosync::manage_fw: false > pacemaker::corosync::settle_tries: 360 > pacemaker::resource_defaults::defaults: > resource-stickiness: {value: INFINITY} > panko::api::enable_proxy_headers_parsing: true > panko::api::event_time_to_live: '86400' > panko::api::host: '%{hiera(''fqdn_internal_api'')}' > panko::api::service_name: httpd > panko::auth::auth_password: cU4KQf7yVPeHN6dXUHn8sb9RF > panko::auth::auth_region: regionOne > panko::auth::auth_tenant_name: service > panko::auth::auth_url: http://10.0.0.12:5000 > panko::db::database_connection: mysql+pymysql://panko:cU4KQf7yVPeHN6dXUHn8sb9RF@10.0.0.12/panko?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > panko::db::mysql::allowed_hosts: ['%', '%{hiera(''mysql_bind_host'')}'] > panko::db::mysql::dbname: panko > panko::db::mysql::host: 10.0.0.12 > panko::db::mysql::password: cU4KQf7yVPeHN6dXUHn8sb9RF > panko::db::mysql::user: panko > panko::debug: false > panko::expirer::hour: '0' > panko::expirer::minute: '1' > panko::expirer::month: '*' > panko::expirer::monthday: '*' > panko::expirer::weekday: '*' > panko::keystone::auth::admin_url: http://10.0.0.12:8977 > panko::keystone::auth::internal_url: http://10.0.0.12:8977 > panko::keystone::auth::password: cU4KQf7yVPeHN6dXUHn8sb9RF > panko::keystone::auth::public_url: http://172.16.18.34:8977 > panko::keystone::auth::region: regionOne > panko::keystone::auth::tenant: service > panko::keystone::authtoken::auth_uri: http://10.0.0.12:5000 > panko::keystone::authtoken::auth_url: http://10.0.0.12:5000 > panko::keystone::authtoken::password: cU4KQf7yVPeHN6dXUHn8sb9RF > panko::keystone::authtoken::project_domain_name: Default > panko::keystone::authtoken::project_name: service > panko::keystone::authtoken::user_domain_name: Default > panko::policy::policies: {} > panko::wsgi::apache::bind_host: internal_api > panko::wsgi::apache::servername: '%{hiera(''fqdn_internal_api'')}' > panko::wsgi::apache::ssl: false > rabbit_ipv6: false > rabbitmq::default_pass: ZsVg3E86aZ7ttpW8A4zdHVNkh > rabbitmq::default_user: guest > rabbitmq::delete_guest_user: false > rabbitmq::erlang_cookie: CgymAHzuhrGljFWjqwam > rabbitmq::file_limit: 65536 > rabbitmq::interface: internal_api > rabbitmq::nr_ha_queues: -1 > rabbitmq::package_provider: yum > rabbitmq::package_source: undef > rabbitmq::port: 5672 > rabbitmq::repos_ensure: false > rabbitmq::service_manage: false > rabbitmq::ssl: false > rabbitmq::ssl_depth: 1 > rabbitmq::ssl_erl_dist: false > rabbitmq::ssl_interface: internal_api > rabbitmq::ssl_only: false > rabbitmq::ssl_port: 5672 > rabbitmq::tcp_keepalive: true > rabbitmq::wipe_db_on_cookie_change: true > rabbitmq_config_variables: {cluster_partition_handling: ignore, loopback_users: '[]', > queue_master_locator: <<"min-masters">>} > rabbitmq_environment: {NODE_IP_ADDRESS: '', NODE_PORT: '', RABBITMQ_NODENAME: 'rabbit@%{::hostname}', > RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options > [{nodelay,true}]"', export ERL_EPMD_ADDRESS: '%{hiera(''rabbitmq::interface'')}'} > rabbitmq_kernel_variables: {inet_dist_listen_max: '25672', inet_dist_listen_min: '25672', > net_ticktime: 15} > redis::bind: internal_api > redis::managed_by_cluster_manager: true > redis::masterauth: qf9AHug3MR3sEvRDGxGNqbQuV > redis::notify_service: false > redis::port: 6379 > redis::requirepass: qf9AHug3MR3sEvRDGxGNqbQuV > redis::sentinel::master_name: '%{hiera(''bootstrap_nodeid'')}' > redis::sentinel::notification_script: /usr/local/bin/redis-notifications.sh > redis::sentinel::redis_host: '%{hiera(''bootstrap_nodeid_ip'')}' > redis::sentinel::sentinel_bind: internal_api > redis::sentinel_auth_pass: qf9AHug3MR3sEvRDGxGNqbQuV > redis::service_manage: false > redis::ulimit: '10240' > redis_ipv6: false > snmp::agentaddress: ['udp:161', 'udp6:[::1]:161'] > snmp::snmpd_options: -LS0-5d > snmpd_network: internal_api_subnet > swift::keystone::auth::admin_url: http://10.0.0.147:8080 > swift::keystone::auth::admin_url_s3: http://10.0.0.147:8080 > swift::keystone::auth::configure_s3_endpoint: false > swift::keystone::auth::internal_url: http://10.0.0.147:8080/v1/AUTH_%(tenant_id)s > swift::keystone::auth::internal_url_s3: http://10.0.0.147:8080 > swift::keystone::auth::operator_roles: [admin, swiftoperator, ResellerAdmin] > swift::keystone::auth::password: XPsajW2e83JQUYJhBQn3cWyzX > swift::keystone::auth::public_url: http://172.16.18.34:8080/v1/AUTH_%(tenant_id)s > swift::keystone::auth::public_url_s3: http://172.16.18.34:8080 > swift::keystone::auth::region: regionOne > swift::keystone::auth::tenant: service > swift::proxy::account_autocreate: true > swift::proxy::authtoken::auth_uri: http://10.0.0.12:5000 > swift::proxy::authtoken::auth_url: http://10.0.0.12:5000 > swift::proxy::authtoken::password: XPsajW2e83JQUYJhBQn3cWyzX > swift::proxy::authtoken::project_name: service > swift::proxy::keystone::operator_roles: [admin, swiftoperator, ResellerAdmin] > swift::proxy::node_timeout: 60 > swift::proxy::pipeline: [catch_errors, healthcheck, proxy-logging, cache, ratelimit, > bulk, tempurl, formpost, authtoken, keystone, staticweb, copy, container_quotas, > account_quotas, slo, dlo, versioned_writes, proxy-logging, proxy-server] > swift::proxy::port: '8080' > swift::proxy::proxy_local_net_ip: storage > swift::proxy::staticweb::url_base: http://172.16.18.34:8080 > swift::proxy::versioned_writes::allow_versioned_writes: true > swift::proxy::workers: auto > swift::storage::all::account_pipeline: [healthcheck, account-server] > swift::storage::all::account_server_workers: auto > swift::storage::all::container_pipeline: [healthcheck, container-server] > swift::storage::all::container_server_workers: auto > swift::storage::all::incoming_chmod: Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r > swift::storage::all::mount_check: false > swift::storage::all::object_pipeline: [healthcheck, recon, object-server] > swift::storage::all::object_server_workers: auto > swift::storage::all::outgoing_chmod: Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r > swift::storage::all::storage_local_net_ip: storage_mgmt > swift::storage::disks::args: {} > swift::swift_hash_path_suffix: 66DUDAQa8kqQvJWmH22tQhM2d > sysctl_settings: > fs.inotify.max_user_instances: {value: 1024} > fs.suid_dumpable: {value: 0} > kernel.dmesg_restrict: {value: 1} > kernel.pid_max: {value: 1048576} > net.core.netdev_max_backlog: {value: 10000} > net.ipv4.conf.all.arp_accept: {value: 1} > net.ipv4.conf.all.log_martians: {value: 1} > net.ipv4.conf.all.secure_redirects: {value: 0} > net.ipv4.conf.all.send_redirects: {value: 0} > net.ipv4.conf.default.accept_redirects: {value: 0} > net.ipv4.conf.default.log_martians: {value: 1} > net.ipv4.conf.default.secure_redirects: {value: 0} > net.ipv4.conf.default.send_redirects: {value: 0} > net.ipv4.ip_forward: {value: 1} > net.ipv4.neigh.default.gc_thresh1: {value: 1024} > net.ipv4.neigh.default.gc_thresh2: {value: 2048} > net.ipv4.neigh.default.gc_thresh3: {value: 4096} > net.ipv4.tcp_keepalive_intvl: {value: 1} > net.ipv4.tcp_keepalive_probes: {value: 5} > net.ipv4.tcp_keepalive_time: {value: 5} > net.ipv6.conf.all.accept_ra: {value: 0} > net.ipv6.conf.all.accept_redirects: {value: 0} > net.ipv6.conf.all.autoconf: {value: 0} > net.ipv6.conf.all.disable_ipv6: {value: 0} > net.ipv6.conf.default.accept_ra: {value: 0} > net.ipv6.conf.default.accept_redirects: {value: 0} > net.ipv6.conf.default.autoconf: {value: 0} > net.ipv6.conf.default.disable_ipv6: {value: 0} > net.netfilter.nf_conntrack_max: {value: 500000} > net.nf_conntrack_max: {value: 500000} > timezone::timezone: UTC > tripleo.aodh_api.firewall_rules: > 128 aodh-api: > dport: [8042, 13042] > tripleo.cinder_api.firewall_rules: > 119 cinder: > dport: [8776, 13776] > tripleo.cinder_volume.firewall_rules: > 120 iscsi initiator: {dport: 3260} > tripleo.glance_api.firewall_rules: > 112 glance_api: > dport: [9292, 13292] > tripleo.gnocchi_api.firewall_rules: > 129 gnocchi-api: > dport: [8041, 13041] > tripleo.gnocchi_statsd.firewall_rules: > 140 gnocchi-statsd: {dport: 8125, proto: udp} > tripleo.haproxy.firewall_rules: > 107 haproxy stats: {dport: 1993} > tripleo.heat_api.firewall_rules: > 125 heat_api: > dport: [8004, 13004] > tripleo.heat_api_cfn.firewall_rules: > 125 heat_cfn: > dport: [8000, 13800] > tripleo.horizon.firewall_rules: > 127 horizon: > dport: [80, 443] > tripleo.keystone.firewall_rules: > 111 keystone: > dport: [5000, 13000, '35357'] > tripleo.memcached.firewall_rules: > 121 memcached: {dport: 11211, proto: tcp, source: '%{hiera(''memcached_network'')}'} > tripleo.mysql.firewall_rules: > 104 mysql galera-bundle: > dport: [873, 3123, 3306, 4444, 4567, 4568, 9200] > tripleo.neutron_api.firewall_rules: > 114 neutron api: > dport: [9696, 13696] > tripleo.neutron_dhcp.firewall_rules: > 115 neutron dhcp input: {dport: 67, proto: udp} > 116 neutron dhcp output: {chain: OUTPUT, dport: 68, proto: udp} > tripleo.neutron_l3.firewall_rules: > 106 neutron_l3 vrrp: {proto: vrrp} > tripleo.neutron_ovs_agent.firewall_rules: > 118 neutron vxlan networks: {dport: 4789, proto: udp} > 136 neutron gre networks: {proto: gre} > tripleo.nova_api.firewall_rules: > 113 nova_api: > dport: [8774, 13774, 8775] > tripleo.nova_placement.firewall_rules: > 138 nova_placement: > dport: [8778, 13778] > tripleo.nova_vnc_proxy.firewall_rules: > 137 nova_vnc_proxy: > dport: [6080, 13080] > tripleo.ntp.firewall_rules: > 105 ntp: {dport: 123, proto: udp} > tripleo.pacemaker.firewall_rules: > 130 pacemaker tcp: > dport: [2224, 3121, 21064] > proto: tcp > 131 pacemaker udp: {dport: 5405, proto: udp} > tripleo.panko_api.firewall_rules: > 140 panko-api: > dport: [8977, 13977] > tripleo.rabbitmq.firewall_rules: > 109 rabbitmq-bundle: > dport: [3122, 4369, 5672, 25672] > tripleo.redis.firewall_rules: > 108 redis-bundle: > dport: [3124, 6379, 26379] > tripleo.snmp.firewall_rules: > 124 snmp: {dport: 161, proto: udp, source: '%{hiera(''snmpd_network'')}'} > tripleo.swift_proxy.firewall_rules: > 122 swift proxy: > dport: [8080, 13808] > tripleo.swift_storage.firewall_rules: > 123 swift storage: > dport: [873, 6000, 6001, 6002] > tripleo::fencing::config: {} > tripleo::firewall::manage_firewall: true > tripleo::firewall::purge_firewall_rules: false > tripleo::glance::nfs_mount::edit_fstab: false > tripleo::glance::nfs_mount::options: _netdev,bg,intr,context=system_u:object_r:glance_var_lib_t:s0 > tripleo::glance::nfs_mount::share: '' > tripleo::haproxy::ca_bundle: /etc/ipa/ca.crt > tripleo::haproxy::crl_file: null > tripleo::haproxy::haproxy_log_address: /dev/log > tripleo::haproxy::haproxy_service_manage: false > tripleo::haproxy::haproxy_stats: true > tripleo::haproxy::haproxy_stats_password: cVyBqF8WnZKsvHhxHTjxkbYfq > tripleo::haproxy::haproxy_stats_user: admin > tripleo::haproxy::mysql_clustercheck: true > tripleo::haproxy::redis_password: qf9AHug3MR3sEvRDGxGNqbQuV > tripleo::packages::enable_install: false > tripleo::profile::base::cinder::cinder_enable_db_purge: true > tripleo::profile::base::cinder::volume::cinder_enable_iscsi_backend: true > tripleo::profile::base::cinder::volume::cinder_enable_nfs_backend: false > tripleo::profile::base::cinder::volume::cinder_enable_rbd_backend: false > tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_address: storage > tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: lioadm > tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: iscsi > tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: 10280 > tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_operations: 'False' > tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_permissions: 'False' > tripleo::profile::base::cinder::volume::nfs::cinder_nfs_mount_options: '' > tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers: [] > tripleo::profile::base::cinder::volume::rbd::cinder_rbd_ceph_conf: /etc/ceph/ceph.conf > tripleo::profile::base::cinder::volume::rbd::cinder_rbd_extra_pools: [] > tripleo::profile::base::cinder::volume::rbd::cinder_rbd_pool_name: volumes > tripleo::profile::base::cinder::volume::rbd::cinder_rbd_user_name: openstack > tripleo::profile::base::database::mysql::bind_address: '%{hiera(''fqdn_internal_api'')}' > tripleo::profile::base::database::mysql::client::enable_ssl: false > tripleo::profile::base::database::mysql::client::mysql_client_bind_address: internal_api > tripleo::profile::base::database::mysql::client::ssl_ca: /etc/ipa/ca.crt > tripleo::profile::base::database::mysql::client_bind_address: internal_api > tripleo::profile::base::database::mysql::generate_dropin_file_limit: true > tripleo::profile::base::database::redis::tls_proxy_bind_ip: internal_api > tripleo::profile::base::database::redis::tls_proxy_fqdn: '%{hiera(''fqdn_internal_api'')}' > tripleo::profile::base::database::redis::tls_proxy_port: 6379 > tripleo::profile::base::docker::additional_sockets: [/var/lib/openstack/docker.sock] > tripleo::profile::base::docker::configure_network: true > tripleo::profile::base::docker::debug: false > tripleo::profile::base::docker::docker_options: --log-driver=journald --signature-verification=false > --iptables=false --live-restore > tripleo::profile::base::docker::insecure_registries: ['brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888', > 'rhos-qe-mirror-tlv.usersys.redhat.com:5000'] > tripleo::profile::base::docker::network_options: --bip=172.31.0.1/24 > tripleo::profile::base::glance::api::glance_nfs_enabled: false > tripleo::profile::base::glance::api::tls_proxy_bind_ip: internal_api > tripleo::profile::base::glance::api::tls_proxy_fqdn: '%{hiera(''fqdn_internal_api'')}' > tripleo::profile::base::glance::api::tls_proxy_port: '9292' > tripleo::profile::base::gnocchi::api::gnocchi_backend: swift > tripleo::profile::base::gnocchi::api::incoming_storage_driver: redis > tripleo::profile::base::haproxy::certificates_specs: {} > tripleo::profile::base::heat::manage_db_purge: true > tripleo::profile::base::keystone::ceilometer_notification_topics: [notifications] > tripleo::profile::base::keystone::extra_notification_topics: [] > tripleo::profile::base::keystone::heat_admin_domain: heat_stack > tripleo::profile::base::keystone::heat_admin_email: heat_stack_domain_admin@localhost > tripleo::profile::base::keystone::heat_admin_password: P6QpQP4qGrguWgjTWPt2U7eKd > tripleo::profile::base::keystone::heat_admin_user: heat_stack_domain_admin > tripleo::profile::base::lvm::enable_udev: false > tripleo::profile::base::neutron::dhcp_agent_wrappers::dnsmasq_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-dhcp-agent:2018-06-15.2 > tripleo::profile::base::neutron::dhcp_agent_wrappers::dnsmasq_process_wrapper: /var/lib/neutron/dnsmasq_wrapper > tripleo::profile::base::neutron::dhcp_agent_wrappers::enable_dnsmasq_wrapper: true > tripleo::profile::base::neutron::dhcp_agent_wrappers::enable_haproxy_wrapper: true > tripleo::profile::base::neutron::dhcp_agent_wrappers::haproxy_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-dhcp-agent:2018-06-15.2 > tripleo::profile::base::neutron::dhcp_agent_wrappers::haproxy_process_wrapper: /var/lib/neutron/dhcp_haproxy_wrapper > tripleo::profile::base::neutron::l3_agent_wrappers::dibbler_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-l3-agent:2018-06-15.2 > tripleo::profile::base::neutron::l3_agent_wrappers::dibbler_process_wrapper: /var/lib/neutron/dibbler_wrapper > tripleo::profile::base::neutron::l3_agent_wrappers::enable_dibbler_wrapper: true > tripleo::profile::base::neutron::l3_agent_wrappers::enable_haproxy_wrapper: true > tripleo::profile::base::neutron::l3_agent_wrappers::enable_keepalived_wrapper: true > tripleo::profile::base::neutron::l3_agent_wrappers::enable_radvd_wrapper: false > tripleo::profile::base::neutron::l3_agent_wrappers::haproxy_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-l3-agent:2018-06-15.2 > tripleo::profile::base::neutron::l3_agent_wrappers::haproxy_process_wrapper: /var/lib/neutron/l3_haproxy_wrapper > tripleo::profile::base::neutron::l3_agent_wrappers::keepalived_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-l3-agent:2018-06-15.2 > tripleo::profile::base::neutron::l3_agent_wrappers::keepalived_process_wrapper: /var/lib/neutron/keepalived_wrapper > tripleo::profile::base::neutron::l3_agent_wrappers::keepalived_state_change_wrapper: /var/lib/neutron/keepalived_state_change_wrapper > tripleo::profile::base::neutron::l3_agent_wrappers::radvd_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-l3-agent:2018-06-15.2 > tripleo::profile::base::neutron::l3_agent_wrappers::radvd_process_wrapper: /var/lib/neutron/radvd_wrapper > tripleo::profile::base::neutron::server::l3_ha_override: '' > tripleo::profile::base::neutron::server::tls_proxy_bind_ip: internal_api > tripleo::profile::base::neutron::server::tls_proxy_fqdn: '%{hiera(''fqdn_internal_api'')}' > tripleo::profile::base::neutron::server::tls_proxy_port: '9696' > tripleo::profile::base::pacemaker::remote_authkey: bD4Y2npFBrngBvbCMDzQEkAM7Ujv6Y8KNx9xZYEzbY7ewud67bUdB84YrfkG7TY6pCTaNPzaZq47XPVvP9Qn2JyCA9A4uZsbbeUgM7uzt6ubTsmNUmXAv9qpycgGcaumjJKj7gJZQbmZT2yfBhCXWp4vnbXedYmTMrf9DfT2sCZUKEvVa8UGvUCEzfvZQGEemswRxCEx7Qds6mKmcDP6kUMey6FGsrHK4dwYKc9C6VaMRrcdZfvsJYfYAXJasfP2VkEfCB3CxqeGueAhrsPQ6aEbXKzgE9GjbdmGPjH8YU3q2ZfP9eU4ChQjzmKcyEuzqMjmp8RDUzeT8nXPZdqjBe3Wf9tNPA9ZfBxCXcXTAupGjmZq6PVjK4c4aMyPyxTprXJsQqWWhsBAppH93CNEUsQbsnn2s8Xgzr4fztGwNAF9jrsdjZ8YpJ3CkXUmn8f3ERUZXt4ZhJk7xPj4YvY6VNyNFzHArXB6QtyGPWzTmYY2VjfyJ2yEEmUBVUpZYCcvbeswqcmZGnAqjvCpMakFC2cVBYCYwWn9HQH64fCasbm2TDdc64Js74EmvT8qdHaZ8EEDyAfueCpmb73ujgGZggKNHtd8bJDwVFwZk7RUVwg9PXbQfC8uGMnEs4MGBr42YMxsh9aZ9qbbzqn3QpaqkfQJ4BWw4RsjGfTWKA7upZWh8ZmzJPkVewycsBEmCWgRrZThW9bHTjgVCbgzVjMPmKqyA6Zjq99KR3QWTvsguWjkbRwp9h9cPqg7RJUhMsC2snuKNKwZJRDG6rVCJfWyXNdZQVBvG4PujTfaHdx8Dak3Audm48ywBkBWB7DfJpsTKbwxPRaXyvWxkMXHDvqttYsU9mYbvwXugasA3sWmrXjsCdcYq9DvKEUx94aXfxy2Fe7dA3nWUEBaDnkTqcxUZdxhN2J8gvUHHP8hTQ7ZXZzkMs6NUKcytW8FAY7HkvPngvpc2p2z3fgKT2BbCfBCG88C9TAxMcdrf3KTk8FduhPtVJ6eqq6Dr8QYRXkFKx623ZAtsPGJjYNvXKheDx7C84btjQmQ9M2zNFmxB83Wedc4syNptPhMmBZZ8ezhVRmJ7BEU3HzqkFv8WfDMGeWuBpK8wBDxEGfUYPBZVW9VPJfCAREGErQjeABBbMrTgaQCZFbUFkN7FXmafmuzjsBMYfVwUDgU4Bvm4HzqMdCghTbbPBxRRfsyaCakNexgpbTdWT2dHGVvRfUQtaTvZxftTDdp3QW4KaVMVyQWeGZxkKG6wbPpebtAPqufuvfq4XAayWZ2AugEcF6RDZDF3DFEaThp72fnYUQtmAfNWYNCJrPyawFkjU4Uq72Bu2F6sfYVsEJRCpUKCfq7qqfahhVWanmcDP9CWpgVA3gjuTN838q3mFcbCyHMcdZUXgJ8EpGX2x9brYc4JyBsHrd9N99zsERVme7GZtwN7rJ3yvsfw2dYhKypQzDXHBPrvMGbZHMTpg3fDbTbNkHEWU7PuG68GsuM38cgMkt7XYHu2Htq9mPAMrUGq4AdRtvHACqHggCzgZu9fuefyazwVEDURfYaZynUYJW6F67syMAsJqvv4Hsn3bktrHb7GQEf7WJTHmMyQ846WVkc2JhebQ39aWykqbHAauzKmYZgkcKJxRK7fD8PsvCaHAbMrfFqf3Ubx4BWrz2PY6wNTrh26UMPB7ADAJBKFAAhpx4kRpF22Un3x2ta7sm2ZxYmK6MdWJ4RZsrrcJe8eu3w4EsBcJjRuxgBTVKRxA9ejsddaufyp3Ahz9khNvZ8vHZvreGMGRX3jDsWvxMFqzzee3sz4XECAebqcvBDMef4qZpZH6x8wVDQCcRsAjnkxNyT9jRYs8vaBBAznA3uKtXTm2G7kbMq7r4t8f23FnzdFTWz6zPsJfGXXdwxqpNtRZNJMdNGfBuRQjmtV6JJJY2w6XzntQkn46mtYaGzXu9hDT7fmMVM6jTJ3btntebVCqVeF4xpAyZCwZtve46rMacdqUneUUU3Qtf8zRHCxJysEmBzNcGqR7v3rcy2sf3MCDjEXu4hypw2NGGjHnDYaPtQf8yc82ryWBwUJ9QUXUKtyphQEbgTCaCRdGRtPW2EzgAwZTNEQwrKwgAmpnPZzhBM92Q73hTzg3NE7JmesbMvfBrQEnQAZyUqkrKQph7vBZxP2kE4zuKCsrdGjBU3bCmEAtHPbrjEtWHUXrF7B94TXbsxUEqZAe4N8KxRejsWReHNsnR7HHK2tvqHWuQeu4744mwAhRyFyMMjr2cVbm2qJDUGUDf3RNrhaHkTGf6vbR7chQWmgtQF3xuTmdhBFGU7AscrzDyPwHvsfNt8Pp7jX8AwbHpxyXgap8N6tGUAgaq2ukHB6wCgkWMCV3vvUZPFUQYJNTFPfckKUYPZ6EpakRufwRV7eZbJkMUgRZ8ZyPQj8BVEE8RNYyu2r3NqPqjTW3yFdVZ63PsMF7wxVBnzZpTaNP22EstCmPvkDfgY6uTYWKUZVHvBgvDpbrQrWkDfuTchpBsq7Pgz8jgz6BxyJJRAgCm4Z2DDxyfXQ7xtb2xhdPgen7CnNet3jnasbPVf2EnXXvMFVtnH6rNQKbxC6wadPzqm2wvH4czUeJPjdVGQK2xcjAPHDPxgU4dTNvwyYYWDBEJQjVUpEKdhtGDfkWZfNCgRY6gwZXANtuYFVDBBpkxrCMW4m8jgEuhUafy9wabFxnWtsnBArAaqUGrqzb7xvvFdj3VvnfEnmz8TQW4n6aKj7zXxaPafQraXvDgyEyuUXhm6fApe97ukjjtF36Y7rbmKWEBzYPb4FFb666yWepbcjkPcbEs48Nzz7U2TevxvUWVwm6FCgWRWWwpDyHdV3fRxY94PjsKBk4WXUqNqbdwhGDYG3AqbcR8BXF8GpY4mNUtFQrwNC33feVXbe6KZvJQWnGKwUYFZTPtrFrcCFHEDCGAdEe9ENvNPczwK9tja897fax6MwcXtcKgrFtu7AacJufVQZH9BtXwMJMnYGsEKfF9y3wvrt7XNqJfVXdMhZjaYgYY9ZMM8QFywrGZjZkkCEvAs23Z78DqTDfWa66x9ZTCWB9zaFCkuNC9CCT93EmHVKHbGHymkpfXf787XY44CmeWCzWEsYFmCDBg6GHsUeWEp4tW8cYxnypkqWMkPZeKJVppVxZrQKQb8hNepdceEfvKuD9mtHGRZPjgBqMYCKTfm9PKj3BBYtN4twhXwTqrhKngapA7vsWgac73Mh2EQTrNEZUtTjmypsChZqH8778RMWhYuRdPzBMFXAsftVJgawKY3ajemHWcEbGx6wjFsq3Af2NHX6QcW7KGbjmschyJWKa8x2PEAEsQaUYcyx33xkqR2Rd7yPUgHeavzFHN4yd3u96sMa3pvhhnZHxfCFKBFQcTJwEWRzhTmqx2UjE9TphTAGepkNjNr38WsNVBpsXwGxYepb3Fb4ddTsuMMHcxvb6ZYfKtQMZyDpMx2KRndBu2QNfZHy7zwqx3qd7rUuh38pPW96egYfaa6tBVPVUbgu8sgFxxPXjMs9AmQfJTMjWXcEYPQKpuGCnVhDfFyNeTsCfaUsKqnyHcvP36sXbdFRjbhUNN6vqNJAwbmgX3KDDHPBFWzbeCuRqNdMrr8VzNyvUqDv2VfjuB32d79AsPzew8ZcNub6kwtPnmrCHuPuJtnnjEUqyDmEx6Tf4nffmdVUJACzVkVCPb9AJp8x6TFJ9PcCWDPpADab74PX8PXddKRqyrwrHbc8aqQWjYrZBsh2hmrGZBQAyZuhUgqfysJrre3WEAPcmMMw9BDxzs7YDTF3UNG7negfNY7r8By8DAxPu9BmsJqfDjUWvgbcEaawyYXKp6HCAfDrXRkHaGXXnYZ4kxkPkMuwm9ZcUpEVNaePcdZC4aUQvHdpxKHmdyedhryGpmru4wrth49kjxWGwuEjYqQcTs4ctZfy8t42JPZsWRqTCsMx4dcz6M7gG49aEpeyzv9ZqXhDUMaV33Cn3gkVUJWxsJEEKrbHpQDznYex9TrfQktahr84yDsg9swXFM2dhNvBCe8mdA2p9zVgf6yTncAEZBtcFYQYyUJpQpKvx4TB3FNAZMMqB2qt9ntCZ8e > tripleo::profile::base::rabbitmq::enable_internal_tls: false > tripleo::profile::base::snmp::snmpd_password: 034690b9f8c681c1a3a9d9cbe1f71477b30945e2 > tripleo::profile::base::snmp::snmpd_user: ro_snmp_user > tripleo::profile::base::sshd::bannertext: '' > tripleo::profile::base::sshd::motd: '' > tripleo::profile::base::sshd::options: > AcceptEnv: [LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES, > LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT, LC_IDENTIFICATION > LC_ALL LANGUAGE, XMODIFIERS] > AuthorizedKeysFile: .ssh/authorized_keys > ChallengeResponseAuthentication: 'no' > GSSAPIAuthentication: 'yes' > GSSAPICleanupCredentials: 'no' > HostKey: [/etc/ssh/ssh_host_rsa_key, /etc/ssh/ssh_host_ecdsa_key, /etc/ssh/ssh_host_ed25519_key] > PasswordAuthentication: 'no' > Subsystem: sftp /usr/libexec/openssh/sftp-server > SyslogFacility: AUTHPRIV > UseDNS: 'no' > UsePAM: 'yes' > UsePrivilegeSeparation: sandbox > X11Forwarding: 'yes' > tripleo::profile::base::swift::proxy::ceilometer_enabled: false > tripleo::profile::base::swift::proxy::ceilometer_messaging_use_ssl: 'False' > tripleo::profile::base::swift::proxy::rabbit_port: 5672 > tripleo::profile::base::swift::proxy::tls_proxy_bind_ip: storage > tripleo::profile::base::swift::proxy::tls_proxy_fqdn: '%{hiera(''fqdn_storage'')}' > tripleo::profile::base::swift::proxy::tls_proxy_port: '8080' > tripleo::profile::base::swift::ringbuilder::build_ring: true > tripleo::profile::base::swift::ringbuilder::min_part_hours: 1 > tripleo::profile::base::swift::ringbuilder::part_power: 10 > tripleo::profile::base::swift::ringbuilder::raw_disk_prefix: r1z1- > tripleo::profile::base::swift::ringbuilder::raw_disks: [':%PORT%/d1'] > tripleo::profile::base::swift::ringbuilder::replicas: 3 > tripleo::profile::base::swift::ringbuilder::swift_ring_get_tempurl: '' > tripleo::profile::base::swift::ringbuilder::swift_ring_put_tempurl: '' > tripleo::profile::base::swift::ringbuilder:skip_consistency_check: true > tripleo::profile::base::swift::storage::enable_swift_storage: true > tripleo::profile::base::swift::storage::use_local_dir: true > tripleo::profile::base::tuned::profile: '' > tripleo::profile::pacemaker::cinder::volume_bundle::cinder_volume_docker_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-volume:pcmklatest > tripleo::profile::pacemaker::cinder::volume_bundle::docker_environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > tripleo::profile::pacemaker::cinder::volume_bundle::docker_volumes: ['/etc/hosts:/etc/hosts:ro', > '/etc/localtime:/etc/localtime:ro', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', > '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', > '/etc/puppet:/etc/puppet:ro', '/var/lib/kolla/config_files/cinder_volume.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/cinder/:/var/lib/kolla/config_files/src:ro', > '/etc/iscsi:/var/lib/kolla/config_files/src-iscsid:ro', '/etc/ceph:/var/lib/kolla/config_files/src-ceph:ro', > '/lib/modules:/lib/modules:ro', '/dev/:/dev/', '/run/:/run/', '/sys:/sys', > '/var/lib/cinder:/var/lib/cinder', '/var/log/containers/cinder:/var/log/cinder'] > tripleo::profile::pacemaker::database::mysql::bind_address: '%{hiera(''fqdn_internal_api'')}' > tripleo::profile::pacemaker::database::mysql::ca_file: /etc/ipa/ca.crt > tripleo::profile::pacemaker::database::mysql::gmcast_listen_addr: internal_api > tripleo::profile::pacemaker::database::mysql_bundle::bind_address: '%{hiera(''fqdn_internal_api'')}' > tripleo::profile::pacemaker::database::mysql_bundle::control_port: 3123 > tripleo::profile::pacemaker::database::mysql_bundle::mysql_docker_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:pcmklatest > tripleo::profile::pacemaker::database::redis_bundle::control_port: 3124 > tripleo::profile::pacemaker::database::redis_bundle::redis_docker_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-redis:pcmklatest > tripleo::profile::pacemaker::database::redis_bundle::tls_proxy_bind_ip: internal_api > tripleo::profile::pacemaker::database::redis_bundle::tls_proxy_fqdn: '%{hiera(''fqdn_internal_api'')}' > tripleo::profile::pacemaker::database::redis_bundle::tls_proxy_port: 6379 > tripleo::profile::pacemaker::haproxy_bundle::haproxy_docker_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-haproxy:pcmklatest > tripleo::profile::pacemaker::haproxy_bundle::internal_certs_directory: /etc/pki/tls/certs/haproxy > tripleo::profile::pacemaker::haproxy_bundle::internal_keys_directory: /etc/pki/tls/private/haproxy > tripleo::profile::pacemaker::haproxy_bundle::tls_mapping: [/etc/ipa/ca.crt, > /etc/pki/tls/private/haproxy, /etc/pki/tls/certs/haproxy, /etc/pki/tls/private/overcloud_endpoint.pem] > tripleo::profile::pacemaker::rabbitmq_bundle::control_port: 3122 > tripleo::profile::pacemaker::rabbitmq_bundle::rabbitmq_docker_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-rabbitmq:pcmklatest > tripleo::stunnel::foreground: 'yes' > tripleo::stunnel::manage_service: false > tripleo::trusted_cas::ca_map: {} > vswitch::ovs::enable_hw_offload: false > role_data_monitoring_subscriptions: [overcloud-pacemaker] > role_data_post_update_tasks: [] > role_data_post_upgrade_tasks: > - getent: {database: passwd, key: neutron} > ignore_errors: true > name: Check for neutron user > - name: Set neutron_user_avail > set_fact: {neutron_user_avail: '{{ getent_passwd is defined }}'} > - block: > - {become: true, name: Ensure read/write access for files created after upgrade, > shell: 'umask 0002 > > setfacl -d -R -m u:neutron:rwx /var/lib/neutron > > setfacl -R -m u:neutron:rw /var/lib/neutron > > find /var/lib/neutron -type d -exec setfacl -m u:neutron:rwx ''{}'' \; > > '} > - become: true > ignore_errors: true > name: Provide access for domain sockets > shell: 'umask 0002 > > setfacl -m u:neutron:rwx "{{ item }}" > > ' > with_items: [/var/lib/neutron/metadata_proxy, /var/lib/neutron] > when: [step|int == 2, neutron_user_avail|bool] > - block: > - {become: true, name: Ensure r/w access for existing files after upgrade, shell: 'umask > 0002 > > setfacl -d -R -m u:neutron:rwx /var/lib/neutron > > setfacl -R -m u:neutron:rw /var/lib/neutron > > find /var/lib/neutron -type d -exec setfacl -m u:neutron:rwx ''{}'' \; > > '} > - become: true > ignore_errors: true > name: Provide access to domain sockets > shell: 'umask 0002 > > setfacl -m u:neutron:rwx "{{ item }}" > > ' > with_items: [/var/lib/neutron/metadata_proxy, /var/lib/neutron/keepalived-state-change, > /var/lib/neutron] > when: [step|int == 2, neutron_user_avail|bool] > role_data_pre_upgrade_rolling_tasks: [] > role_data_puppet_config: > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-aodh-api:2018-06-15.2', > config_volume: aodh, puppet_tags: 'aodh_api_paste_ini,aodh_config', step_config: 'include > tripleo::profile::base::aodh::api > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-aodh-api:2018-06-15.2', > config_volume: aodh, puppet_tags: aodh_config, step_config: 'include tripleo::profile::base::aodh::evaluator > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-aodh-api:2018-06-15.2', > config_volume: aodh, puppet_tags: aodh_config, step_config: 'include tripleo::profile::base::aodh::listener > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-aodh-api:2018-06-15.2', > config_volume: aodh, puppet_tags: aodh_config, step_config: 'include tripleo::profile::base::aodh::notifier > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-ceilometer-central:2018-06-15.2', > config_volume: ceilometer, puppet_tags: ceilometer_config, step_config: 'include > ::tripleo::profile::base::ceilometer::agent::polling > > '} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-ceilometer-central:2018-06-15.2', > config_volume: ceilometer, puppet_tags: ceilometer_config, step_config: 'include > ::tripleo::profile::base::ceilometer::agent::notification > > '} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-api:2018-06-15.2', > config_volume: cinder, puppet_tags: 'cinder_config,file,concat,file_line', step_config: 'include > ::tripleo::profile::base::cinder::api > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-api:2018-06-15.2', > config_volume: cinder, puppet_tags: 'cinder_config,file,concat,file_line', step_config: 'include > ::tripleo::profile::base::cinder::scheduler > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-api:2018-06-15.2', > config_volume: cinder, puppet_tags: 'cinder_config,file,concat,file_line', step_config: 'include > ::tripleo::profile::base::lvm > > include ::tripleo::profile::base::cinder::volume > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:2018-06-15.2', > config_volume: clustercheck, puppet_tags: file, step_config: 'include ::tripleo::profile::pacemaker::clustercheck'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-glance-api:2018-06-15.2', > config_volume: glance_api, puppet_tags: 'glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config', > step_config: 'include ::tripleo::profile::base::glance::api > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-gnocchi-api:2018-06-15.2', > config_volume: gnocchi, puppet_tags: 'gnocchi_api_paste_ini,gnocchi_config', > step_config: 'include ::tripleo::profile::base::gnocchi::api > > '} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-gnocchi-api:2018-06-15.2', > config_volume: gnocchi, puppet_tags: gnocchi_config, step_config: 'include ::tripleo::profile::base::gnocchi::metricd > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-gnocchi-api:2018-06-15.2', > config_volume: gnocchi, puppet_tags: gnocchi_config, step_config: 'include ::tripleo::profile::base::gnocchi::statsd > > > include ::tripleo::profile::base::database::mysql::client'} > - config_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-haproxy:2018-06-15.2 > config_volume: haproxy > puppet_tags: haproxy_config > step_config: 'exec {''wait-for-settle'': command => ''/bin/true'' } > > class tripleo::firewall(){}; define tripleo::firewall::rule( $port = undef, > $dport = undef, $sport = undef, $proto = undef, $action = undef, $state = > undef, $source = undef, $iniface = undef, $chain = undef, $destination = undef, > $extras = undef){} > > [''pcmk_bundle'', ''pcmk_resource'', ''pcmk_property'', ''pcmk_constraint'', > ''pcmk_resource_default''].each |String $val| { noop_resource($val) } > > include ::tripleo::profile::pacemaker::haproxy_bundle' > volumes: ['/etc/ipa/ca.crt:/etc/ipa/ca.crt:ro', '/etc/pki/tls/private/haproxy:/etc/pki/tls/private/haproxy:ro', > '/etc/pki/tls/certs/haproxy:/etc/pki/tls/certs/haproxy:ro', '/etc/pki/tls/private/overcloud_endpoint.pem:/etc/pki/tls/private/overcloud_endpoint.pem:ro'] > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-heat-api:2018-06-15.2', > config_volume: heat_api, puppet_tags: 'heat_config,file,concat,file_line', step_config: 'include > ::tripleo::profile::base::heat::api > > '} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-heat-api-cfn:2018-06-15.2', > config_volume: heat_api_cfn, puppet_tags: 'heat_config,file,concat,file_line', > step_config: 'include ::tripleo::profile::base::heat::api_cfn > > '} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-heat-api:2018-06-15.2', > config_volume: heat, puppet_tags: 'heat_config,file,concat,file_line', step_config: 'include > ::tripleo::profile::base::heat::engine > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-horizon:2018-06-15.2', > config_volume: horizon, puppet_tags: horizon_config, step_config: 'include ::tripleo::profile::base::horizon > > '} > - config_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-iscsid:2018-06-15.2 > config_volume: iscsid > puppet_tags: iscsid_config > step_config: include ::tripleo::profile::base::iscsid > volumes: ['/etc/iscsi:/etc/iscsi'] > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-keystone:2018-06-15.2', > config_volume: keystone, puppet_tags: 'keystone_config,keystone_domain_config', > step_config: '[''Keystone_user'', ''Keystone_endpoint'', ''Keystone_domain'', > ''Keystone_tenant'', ''Keystone_user_role'', ''Keystone_role'', ''Keystone_service''].each > |String $val| { noop_resource($val) } > > include ::tripleo::profile::base::keystone > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-memcached:2018-06-15.2', > config_volume: memcached, puppet_tags: file, step_config: 'include ::tripleo::profile::base::memcached > > '} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:2018-06-15.2', > config_volume: mysql, puppet_tags: file, step_config: '[''Mysql_datadir'', ''Mysql_user'', > ''Mysql_database'', ''Mysql_grant'', ''Mysql_plugin''].each |String $val| > { noop_resource($val) } > > exec {''wait-for-settle'': command => ''/bin/true'' } > > include ::tripleo::profile::pacemaker::database::mysql_bundle'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-server:2018-06-15.2', > config_volume: neutron, puppet_tags: 'neutron_config,neutron_api_config', step_config: 'include > tripleo::profile::base::neutron::server > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-server:2018-06-15.2', > config_volume: neutron, puppet_tags: neutron_plugin_ml2, step_config: 'include > ::tripleo::profile::base::neutron::plugins::ml2 > > '} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-server:2018-06-15.2', > config_volume: neutron, puppet_tags: 'neutron_config,neutron_dhcp_agent_config', > step_config: 'include tripleo::profile::base::neutron::dhcp > > '} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-server:2018-06-15.2', > config_volume: neutron, puppet_tags: 'neutron_config,neutron_l3_agent_config', > step_config: 'include tripleo::profile::base::neutron::l3 > > '} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-server:2018-06-15.2', > config_volume: neutron, puppet_tags: 'neutron_config,neutron_metadata_agent_config', > step_config: 'include tripleo::profile::base::neutron::metadata > > '} > - config_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-server:2018-06-15.2 > config_volume: neutron > puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2 > step_config: 'include ::tripleo::profile::base::neutron::ovs > > ' > volumes: ['/lib/modules:/lib/modules:ro', '/run/openvswitch:/run/openvswitch'] > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2', > config_volume: nova, puppet_tags: nova_config, step_config: '[''Nova_cell_v2''].each > |String $val| { noop_resource($val) } > > include tripleo::profile::base::nova::api > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2', > config_volume: nova, puppet_tags: nova_config, step_config: 'include tripleo::profile::base::nova::conductor > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2', > config_volume: nova, puppet_tags: nova_config, step_config: 'include tripleo::profile::base::nova::consoleauth > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2', > config_volume: nova, puppet_tags: nova_config, step_config: ''} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-placement-api:2018-06-15.2', > config_volume: nova_placement, puppet_tags: nova_config, step_config: 'include > tripleo::profile::base::nova::placement > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2', > config_volume: nova, puppet_tags: nova_config, step_config: 'include tripleo::profile::base::nova::scheduler > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-api:2018-06-15.2', > config_volume: nova, puppet_tags: nova_config, step_config: 'include tripleo::profile::base::nova::vncproxy > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cron:2018-06-15.2', > config_volume: crond, step_config: 'include ::tripleo::profile::base::logging::logrotate'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-panko-api:2018-06-15.2', > config_volume: panko, puppet_tags: 'panko_api_paste_ini,panko_config', step_config: 'include > tripleo::profile::base::panko::api > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-rabbitmq:2018-06-15.2', > config_volume: rabbitmq, puppet_tags: file, step_config: '[''Rabbitmq_policy'', > ''Rabbitmq_user''].each |String $val| { noop_resource($val) } > > include ::tripleo::profile::base::rabbitmq > > '} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-redis:2018-06-15.2', > config_volume: redis, puppet_tags: exec, step_config: 'include ::tripleo::profile::pacemaker::database::redis_bundle'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-proxy-server:2018-06-15.2', > config_volume: swift, puppet_tags: 'swift_config,swift_proxy_config,swift_keymaster_config', > step_config: 'include ::tripleo::profile::base::swift::proxy > > '} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-proxy-server:2018-06-15.2', > config_volume: swift_ringbuilder, puppet_tags: 'exec,fetch_swift_ring_tarball,extract_swift_ring_tarball,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance,create_swift_ring_tarball,upload_swift_ring_tarball', > step_config: 'include ::tripleo::profile::base::swift::ringbuilder'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-swift-proxy-server:2018-06-15.2', > config_volume: swift, puppet_tags: 'swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config,rsync::server', > step_config: 'include ::tripleo::profile::base::swift::storage > > > class xinetd() {}'} > role_data_service_config_settings: {} > role_data_service_metadata_settings: null > role_data_service_names: [aodh_api, aodh_evaluator, aodh_listener, aodh_notifier, > ca_certs, ceilometer_api_disabled, ceilometer_collector_disabled, ceilometer_expirer_disabled, > ceilometer_agent_central, ceilometer_agent_notification, cinder_api, cinder_scheduler, > cinder_volume, clustercheck, docker, glance_api, gnocchi_api, gnocchi_metricd, > gnocchi_statsd, haproxy, heat_api, heat_api_cloudwatch_disabled, heat_api_cfn, > heat_engine, horizon, iscsid, kernel, keystone, memcached, mongodb_disabled, > mysql, mysql_client, neutron_api, neutron_plugin_ml2, neutron_dhcp, neutron_l3, > neutron_metadata, neutron_ovs_agent, nova_api, nova_conductor, nova_consoleauth, > nova_metadata, nova_placement, nova_scheduler, nova_vnc_proxy, ntp, logrotate_crond, > pacemaker, panko_api, rabbitmq, redis, snmp, sshd, swift_proxy, swift_ringbuilder, > swift_storage, timezone, tripleo_firewall, tripleo_packages, tuned] > role_data_step_config: "# Copyright 2014 Red Hat, Inc.\n# All Rights Reserved.\n\ > #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n\ > # not use this file except in compliance with the License. You may obtain\n\ > # a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n\ > #\n# Unless required by applicable law or agreed to in writing, software\n#\ > \ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\ > # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\ > # License for the specific language governing permissions and limitations\n\ > # under the License.\n\n# Common config, from tripleo-heat-templates/puppet/manifests/overcloud_common.pp\n\ > # The content of this file will be used to generate\n# the puppet manifests\ > \ for all roles, the placeholder\n# Controller will be replaced by 'controller',\ > \ 'blockstorage',\n# 'cephstorage' and all the deployed roles.\n\nif hiera('step')\ > \ >= 4 {\n hiera_include('Controller_classes', [])\n}\n\n$package_manifest_name\ > \ = join(['/var/lib/tripleo/installed-packages/overcloud_Controller', hiera('step')])\n\ > package_manifest{$package_manifest_name: ensure => present}\n\n# End of overcloud_common.pp\n\ > \ninclude ::tripleo::trusted_cas\ninclude ::tripleo::profile::base::docker\n\ > \ninclude ::tripleo::profile::base::kernel\ninclude ::tripleo::profile::base::database::mysql::client\n\ > include ::tripleo::profile::base::time::ntp\ninclude ::tripleo::profile::base::pacemaker\n\ > \ninclude ::tripleo::profile::base::snmp\n\ninclude ::tripleo::profile::base::sshd\n\ > \ninclude ::timezone\ninclude ::tripleo::firewall\n\ninclude ::tripleo::packages\n\ > \ninclude ::tripleo::profile::base::tuned" > role_data_update_tasks: > - block: > - name: Get docker Cinder-Volume image > set_fact: {docker_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-volume:2018-06-15.2', > docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-volume:pcmklatest'} > - {name: Get previous Cinder-Volume image id, register: cinder_volume_image_id, > shell: 'docker images | awk ''/cinder-volume.* pcmklatest/{print $3}'' | uniq'} > - block: > - {name: Get a list of container using Cinder-Volume image, register: cinder_volume_containers_to_destroy, > shell: 'docker ps -a -q -f ''ancestor={{cinder_volume_image_id.stdout}}'''} > - {name: Remove any container using the same Cinder-Volume image, shell: 'docker > rm -fv {{item}}', with_items: '{{ cinder_volume_containers_to_destroy.stdout_lines > }}'} > - {name: Remove previous Cinder-Volume images, shell: 'docker rmi -f {{cinder_volume_image_id.stdout}}'} > when: [cinder_volume_image_id.stdout != ''] > - {command: 'docker pull {{docker_image}}', name: Pull latest Cinder-Volume > images} > - {name: Retag pcmklatest to latest Cinder-Volume image, shell: 'docker tag > {{docker_image}} {{docker_image_latest}}'} > name: Cinder-Volume fetch and retag container image for pacemaker > when: step|int == 2 > - block: > - {failed_when: false, name: Detect if puppet on the docker profile would restart > the service, register: puppet_docker_noop_output, shell: "puppet apply --noop\ > \ --summarize --detailed-exitcodes --verbose \\\n --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules\ > \ \\\n --color=false -e \"class { 'tripleo::profile::base::docker': step\ > \ => 1, }\" 2>&1 | \\\nawk -F \":\" '/Out of sync:/ { print $2}'\n"} > - {changed_when: docker_check_update.rc == 100, failed_when: 'docker_check_update.rc > not in [0, 100]', name: Is docker going to be updated, register: docker_check_update, > shell: yum check-update docker} > - {name: Set docker_rpm_needs_update fact, set_fact: 'docker_rpm_needs_update={{ > docker_check_update.rc == 100 }}'} > - {name: Set puppet_docker_is_outofsync fact, set_fact: 'puppet_docker_is_outofsync={{ > puppet_docker_noop_output.stdout|trim|int >= 1 }}'} > - {name: Stop all containers, shell: docker ps -q | xargs --no-run-if-empty > -n1 docker stop, when: puppet_docker_is_outofsync or docker_rpm_needs_update} > - name: Stop docker > service: {name: docker, state: stopped} > when: puppet_docker_is_outofsync or docker_rpm_needs_update > - {name: Update the docker package, when: docker_rpm_needs_update, yum: name=docker > state=latest update_cache=yes} > - {changed_when: puppet_docker_apply.rc == 2, failed_when: 'puppet_docker_apply.rc > not in [0, 2]', name: Apply puppet which will start the service again, register: puppet_docker_apply, > shell: "puppet apply --detailed-exitcodes --verbose \\\n --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules\ > \ \\\n -e \"class { 'tripleo::profile::base::docker': step => 1, }\"\n"} > when: step|int == 2 > - block: > - name: Check for haproxy Kolla configuration > register: haproxy_kolla_config > stat: {path: /var/lib/config-data/puppet-generated/haproxy} > - name: Check if haproxy is already containerized > set_fact: {haproxy_containerized: '{{haproxy_kolla_config.stat.isdir | default(false)}}'} > - {command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid, name: get bootstrap > nodeid, register: bootstrap_node, tags: common} > - {name: set is_bootstrap_node fact, set_fact: 'is_bootstrap_node={{bootstrap_node.stdout|lower > == ansible_hostname|lower}}', tags: common} > name: Set HAProxy upgrade facts > - block: > - {command: 'cibadmin --query --xpath "//storage-mapping[@id=''haproxy-cert'']"', > ignore_errors: true, name: Check haproxy public certificate configuration > in pacemaker, register: haproxy_cert_mounted} > - name: Disable the haproxy cluster resource > pacemaker_resource: {resource: haproxy-bundle, state: disable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > when: haproxy_cert_mounted.rc == 6 > - name: Set HAProxy public cert volume mount fact > set_fact: {haproxy_public_cert_path: /etc/pki/tls/private/overcloud_endpoint.pem, > haproxy_public_tls_enabled: false} > - {command: 'pcs resource bundle update haproxy-bundle storage-map add id=haproxy-cert > source-dir={{ haproxy_public_cert_path }} target-dir=/var/lib/kolla/config_files/src-tls/{{ > haproxy_public_cert_path }} options=ro', name: Add a bind mount for public > certificate in the haproxy bundle, when: haproxy_cert_mounted.rc == 6 and > haproxy_public_tls_enabled|bool} > - name: Enable the haproxy cluster resource > pacemaker_resource: {resource: haproxy-bundle, state: enable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > when: haproxy_cert_mounted.rc == 6 > name: Mount TLS cert if needed > when: [step|int == 1, haproxy_containerized|bool, is_bootstrap_node] > - block: > - name: Get docker Haproxy image > set_fact: {docker_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-haproxy:2018-06-15.2', > docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-haproxy:pcmklatest'} > - {name: Get previous Haproxy image id, register: haproxy_image_id, shell: 'docker > images | awk ''/haproxy.* pcmklatest/{print $3}'' | uniq'} > - block: > - {name: Get a list of container using Haproxy image, register: haproxy_containers_to_destroy, > shell: 'docker ps -a -q -f ''ancestor={{haproxy_image_id.stdout}}'''} > - {name: Remove any container using the same Haproxy image, shell: 'docker > rm -fv {{item}}', with_items: '{{ haproxy_containers_to_destroy.stdout_lines > }}'} > - {name: Remove previous Haproxy images, shell: 'docker rmi -f {{haproxy_image_id.stdout}}'} > when: [haproxy_image_id.stdout != ''] > - {command: 'docker pull {{docker_image}}', name: Pull latest Haproxy images} > - {name: Retag pcmklatest to latest Haproxy image, shell: 'docker tag {{docker_image}} > {{docker_image_latest}}'} > name: Haproxy fetch and retag container image for pacemaker > when: step|int == 2 > - block: > - name: Get docker Mariadb image > set_fact: {docker_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:2018-06-15.2', > docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:pcmklatest'} > - {name: Get previous Mariadb image id, register: mariadb_image_id, shell: 'docker > images | awk ''/mariadb.* pcmklatest/{print $3}'' | uniq'} > - block: > - {name: Get a list of container using Mariadb image, register: mariadb_containers_to_destroy, > shell: 'docker ps -a -q -f ''ancestor={{mariadb_image_id.stdout}}'''} > - {name: Remove any container using the same Mariadb image, shell: 'docker > rm -fv {{item}}', with_items: '{{ mariadb_containers_to_destroy.stdout_lines > }}'} > - {name: Remove previous Mariadb images, shell: 'docker rmi -f {{mariadb_image_id.stdout}}'} > when: [mariadb_image_id.stdout != ''] > - {command: 'docker pull {{docker_image}}', name: Pull latest Mariadb images} > - {name: Retag pcmklatest to latest Mariadb image, shell: 'docker tag {{docker_image}} > {{docker_image_latest}}'} > name: Mariadb fetch and retag container image for pacemaker > when: step|int == 2 > - {lineinfile: dest=/etc/sysconfig/iptables regexp=".*neutron-" state=absent, > name: Remove IPv4 iptables rules created by Neutron that are persistent, when: step|int > == 5} > - {lineinfile: dest=/etc/sysconfig/ip6tables regexp=".*neutron-" state=absent, > name: Remove IPv6 iptables rules created by Neutron that are persistent, when: step|int > == 5} > - {async: 30, name: Check pacemaker cluster running before the minor update, pacemaker_cluster: state=online > check_and_fail=true, poll: 4, when: step|int == 0} > - {name: Stop pacemaker cluster, pacemaker_cluster: state=offline, when: step|int > == 1} > - {name: Start pacemaker cluster, pacemaker_cluster: state=online, when: step|int > == 4} > - block: > - name: Get docker Rabbitmq image > set_fact: {docker_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-rabbitmq:2018-06-15.2', > docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-rabbitmq:pcmklatest'} > - {name: Get previous Rabbitmq image id, register: rabbitmq_image_id, shell: 'docker > images | awk ''/rabbitmq.* pcmklatest/{print $3}'' | uniq'} > - block: > - {name: Get a list of container using Rabbitmq image, register: rabbitmq_containers_to_destroy, > shell: 'docker ps -a -q -f ''ancestor={{rabbitmq_image_id.stdout}}'''} > - {name: Remove any container using the same Rabbitmq image, shell: 'docker > rm -fv {{item}}', with_items: '{{ rabbitmq_containers_to_destroy.stdout_lines > }}'} > - {name: Remove previous Rabbitmq images, shell: 'docker rmi -f {{rabbitmq_image_id.stdout}}'} > when: [rabbitmq_image_id.stdout != ''] > - {command: 'docker pull {{docker_image}}', name: Pull latest Rabbitmq images} > - {name: Retag pcmklatest to latest Rabbitmq image, shell: 'docker tag {{docker_image}} > {{docker_image_latest}}'} > name: Rabbit fetch and retag container image for pacemaker > when: step|int == 2 > - block: > - name: Get docker Redis image > set_fact: {docker_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-redis:2018-06-15.2', > docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-redis:pcmklatest'} > - {name: Get previous Redis image id, register: redis_image_id, shell: 'docker > images | awk ''/redis.* pcmklatest/{print $3}'' | uniq'} > - block: > - {name: Get a list of container using Redis image, register: redis_containers_to_destroy, > shell: 'docker ps -a -q -f ''ancestor={{redis_image_id.stdout}}'''} > - {name: Remove any container using the same Redis image, shell: 'docker rm > -fv {{item}}', with_items: '{{ redis_containers_to_destroy.stdout_lines > }}'} > - {name: Remove previous Redis images, shell: 'docker rmi -f {{redis_image_id.stdout}}'} > when: [redis_image_id.stdout != ''] > - {command: 'docker pull {{docker_image}}', name: Pull latest Redis images} > - {name: Retag pcmklatest to latest Redis image, shell: 'docker tag {{docker_image}} > {{docker_image_latest}}'} > name: Redis fetch and retag container image for pacemaker > when: step|int == 2 > - file: {path: /var/run/rsyncd.pid, state: absent} > name: Ensure rsyncd pid file is absent > - {name: Check for existing yum.pid, register: yum_pid_file, stat: path=/var/run/yum.pid, > when: step|int == 0 or step|int == 3} > - {fail: msg="ERROR existing yum.pid detected - can't continue! Please ensure > there is no other package update process for the duration of the minor update > worfklow. Exiting.", name: Exit if existing yum process, when: (step|int == > 0 or step|int == 3) and yum_pid_file.stat.exists} > - {name: Update all packages, when: step == "3", yum: name=* state=latest update_cache=yes} > role_data_upgrade_batch_tasks: [] > role_data_upgrade_tasks: > - {ignore_errors: true, name: Check for aodh api service running under apache, > register: httpd_enabled, shell: httpd -t -D DUMP_VHOSTS | grep -q aodh, tags: common} > - {command: systemctl is-active --quiet httpd, ignore_errors: true, name: Check > if httpd is running, register: httpd_running, tags: common} > - name: 'PreUpgrade step0,validation: Check if aodh api is running' > shell: systemctl status 'httpd' | grep -q aodh > tags: validation > when: [step|int == 0, httpd_enabled.rc == 0, httpd_running.rc == 0] > - name: Stop and disable aodh service (running under httpd) > service: name=httpd state=stopped enabled=no > when: [step|int == 2, httpd_enabled.rc == 0, httpd_running.rc == 0] > - name: Set fact for removal of openstack-aodh-api package > set_fact: {remove_aodh_api_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-aodh-api package if operator requests it > when: [step|int == 2, remove_aodh_api_package|bool] > yum: name=openstack-aodh-api state=removed > - {command: systemctl is-enabled --quiet openstack-aodh-evaluator, ignore_errors: true, > name: Check if aodh_evaluator is deployed, register: aodh_evaluator_enabled, > tags: common} > - command: systemctl is-active --quiet openstack-aodh-evaluator > name: 'PreUpgrade step0,validation: Check service openstack-aodh-evaluator is > running' > tags: validation > when: [step|int == 0, aodh_evaluator_enabled.rc == 0] > - name: Stop and disable openstack-aodh-evaluator service > service: name=openstack-aodh-evaluator.service state=stopped enabled=no > when: [step|int == 2, aodh_evaluator_enabled.rc == 0] > - name: Set fact for removal of openstack-aodh-evaluator package > set_fact: {remove_aodh_evaluator_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-aodh-evaluator package if operator requests it > when: [step|int == 2, remove_aodh_evaluator_package|bool] > yum: name=openstack-aodh-evaluator state=removed > - {command: systemctl is-enabled --quiet openstack-aodh-listener, ignore_errors: true, > name: Check if aodh_listener is deployed, register: aodh_listener_enabled, tags: common} > - command: systemctl is-active --quiet openstack-aodh-listener > name: 'PreUpgrade step0,validation: Check service openstack-aodh-listener is > running' > tags: validation > when: [step|int == 0, aodh_listener_enabled.rc == 0] > - name: Stop and disable openstack-aodh-listener service > service: name=openstack-aodh-listener.service state=stopped enabled=no > when: [step|int == 2, aodh_listener_enabled.rc == 0] > - name: Set fact for removal of openstack-aodh-listener package > set_fact: {remove_aodh_listener_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-aodh-listener package if operator requests it > when: [step|int == 2, remove_aodh_listener_package|bool] > yum: name=openstack-aodh-listener state=removed > - {command: systemctl is-enabled --quiet openstack-aodh-notifier, ignore_errors: true, > name: Check if aodh_notifier is deployed, register: aodh_notifier_enabled, tags: common} > - command: systemctl is-active --quiet openstack-aodh-notifier > name: 'PreUpgrade step0,validation: Check service openstack-aodh-notifier is > running' > tags: validation > when: [step|int == 0, aodh_notifier_enabled.rc == 0] > - name: Stop and disable openstack-aodh-notifier service > service: name=openstack-aodh-notifier.service state=stopped enabled=no > when: [step|int == 2, aodh_notifier_enabled.rc == 0] > - name: Set fact for removal of openstack-aodh-notifier package > set_fact: {remove_aodh_notifier_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-aodh-notifier package if operator requests it > when: [step|int == 2, remove_aodh_notifier_package|bool] > yum: name=openstack-aodh-notifier state=removed > - {command: systemctl is-enabled --quiet openstack-ceilometer-central, ignore_errors: true, > name: Check if ceilometer_agent_central is deployed, register: ceilometer_agent_central_enabled, > tags: common} > - command: systemctl is-active --quiet openstack-ceilometer-central > name: 'PreUpgrade step0,validation: Check service openstack-ceilometer-central > is running' > tags: validation > when: [step|int == 0, ceilometer_agent_central_enabled.rc == 0] > - name: Stop and disable ceilometer agent central service > service: name=openstack-ceilometer-central state=stopped enabled=no > when: [step|int == 2, ceilometer_agent_central_enabled.rc == 0] > - name: Set fact for removal of openstack-ceilometer-central package > set_fact: {remove_ceilometer_central_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-ceilometer-central package if operator requests it > when: [step|int == 2, remove_ceilometer_central_package|bool] > yum: name=openstack-ceilometer-central state=removed > - {command: systemctl is-enabled --quiet openstack-ceilometer-notification, ignore_errors: true, > name: Check if ceilometer_agent_notification is deployed, register: ceilometer_agent_notification_enabled, > tags: common} > - command: systemctl is-active --quiet openstack-ceilometer-notification > name: 'PreUpgrade step0,validation: Check service openstack-ceilometer-notification > is running' > tags: validation > when: [step|int == 0, ceilometer_agent_notification_enabled.rc == 0] > - name: Stop and disable ceilometer agent notification service > service: name=openstack-ceilometer-notification state=stopped enabled=no > when: [step|int == 2, ceilometer_agent_notification_enabled.rc == 0] > - name: Set fact for removal of openstack-ceilometer-notification package > set_fact: {remove_ceilometer_notification_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-ceilometer-notification package if operator requests > it > when: [step|int == 2, remove_ceilometer_notification_package|bool] > yum: name=openstack-ceilometer-notification state=removed > - {command: systemctl is-enabled openstack-cinder-api, ignore_errors: true, name: Check > is cinder_api is deployed, register: cinder_api_enabled, tags: common} > - name: 'PreUpgrade step0,validation: Check service openstack-cinder-api is running' > shell: systemctl is-active --quiet openstack-cinder-api > tags: validation > when: [step|int == 0, cinder_api_enabled.rc == 0] > - name: Stop and disable cinder_api service (pre-upgrade not under httpd) > service: name=openstack-cinder-api state=stopped enabled=no > when: [step|int == 2, cinder_api_enabled.rc == 0] > - {ignore_errors: true, name: check for cinder_api running under apache (post > upgrade), register: cinder_api_apache, shell: httpd -t -D DUMP_VHOSTS | grep > -q cinder, when: step|int == 2} > - name: Stop and disable cinder_api service > service: name=httpd state=stopped enabled=no > when: [step|int == 2, cinder_api_apache.rc == 0] > - file: {path: /var/spool/cron/cinder, state: absent} > name: remove old cinder cron jobs > when: step|int == 2 > - name: Set fact for removal of httpd package > set_fact: {remove_httpd_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove httpd package if operator requests it > when: [step|int == 2, remove_httpd_package|bool] > yum: name=httpd state=removed > - {command: systemctl is-enabled openstack-cinder-scheduler, ignore_errors: true, > name: Check if cinder_scheduler is deployed, register: cinder_scheduler_enabled, > tags: common} > - name: 'PreUpgrade step0,validation: Check service openstack-cinder-scheduler > is running' > shell: systemctl is-active --quiet openstack-cinder-scheduler > tags: validation > when: [step|int == 0, cinder_scheduler_enabled.rc == 0] > - name: Stop and disable cinder_scheduler service > service: name=openstack-cinder-scheduler state=stopped enabled=no > when: [step|int == 2, cinder_scheduler_enabled.rc == 0] > - name: Set fact for removal of openstack-cinder package > set_fact: {remove_cinder_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-cinder package if operator requests it > when: [step|int == 2, remove_cinder_package|bool] > yum: name=openstack-cinder state=removed > - name: Get docker Cinder-Volume image > set_fact: {cinder_volume_docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-volume:pcmklatest'} > - {changed_when: false, command: 'grep ''^volume_driver[ \t]*='' /var/lib/config-data/puppet-generated/cinder/etc/cinder/cinder.conf', > ignore_errors: true, name: Check for Cinder-Volume Kolla configuration, register: cinder_volume_kolla_config} > - name: Check if Cinder-Volume is already containerized > set_fact: {cinder_volume_containerized: '{{cinder_volume_kolla_config|succeeded}}'} > - block: > - {command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid, name: get bootstrap > nodeid, register: bootstrap_node, tags: common} > - {name: set is_bootstrap_node fact, set_fact: 'is_bootstrap_node={{bootstrap_node.stdout|lower > == ansible_hostname|lower}}', tags: common} > - ignore_errors: true > name: Check cluster resource status > pacemaker_resource: {check_mode: false, resource: openstack-cinder-volume, > state: show} > register: cinder_volume_res > - block: > - name: Disable the openstack-cinder-volume cluster resource > pacemaker_resource: {resource: openstack-cinder-volume, state: disable, > wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > - name: Delete the stopped openstack-cinder-volume cluster resource. > pacemaker_resource: {resource: openstack-cinder-volume, state: delete, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > when: (is_bootstrap_node) and (cinder_volume_res|succeeded) > - {name: Disable cinder_volume service from boot, service: name=openstack-cinder-volume > enabled=no} > name: Cinder-Volume baremetal to container upgrade tasks > when: [step|int == 1, not cinder_volume_containerized|bool] > - block: > - {name: Get cinder_volume image id currently used by pacemaker, register: cinder_volume_current_pcmklatest_id, > shell: 'docker images | awk ''/cinder-volume.* pcmklatest/{print $3}'' | uniq'} > - {name: Temporarily tag the current cinder_volume image id with the upgraded > image name, shell: 'docker tag {{cinder_volume_current_pcmklatest_id.stdout}} > {{cinder_volume_docker_image_latest}}'} > name: Prepare the switch to new cinder_volume container image name in pacemaker > when: [step|int == 0, cinder_volume_containerized|bool] > - ignore_errors: true > name: Check openstack-cinder-volume cluster resource status > pacemaker_resource: {check_mode: false, resource: openstack-cinder-volume, state: show} > register: cinder_volume_pcs_res > - block: > - name: Disable the cinder_volume cluster resource before container upgrade > pacemaker_resource: {resource: openstack-cinder-volume, state: disable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > - {command: 'pcs resource bundle update openstack-cinder-volume container image={{cinder_volume_docker_image_latest}}', > name: pcs resource bundle update cinder_volume for new container image name} > - name: Enable the cinder_volume cluster resource > pacemaker_resource: {resource: openstack-cinder-volume, state: enable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > when: null > name: Update cinder_volume pcs resource bundle for new container image > when: [step|int == 1, cinder_volume_containerized|bool, is_bootstrap_node, cinder_volume_pcs_res|succeeded] > - block: > - name: Get docker Cinder-Volume image > set_fact: {docker_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-volume:2018-06-15.2', > docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cinder-volume:pcmklatest'} > - {name: Get previous Cinder-Volume image id, register: cinder_volume_image_id, > shell: 'docker images | awk ''/cinder-volume.* pcmklatest/{print $3}'' | uniq'} > - block: > - {name: Get a list of container using Cinder-Volume image, register: cinder_volume_containers_to_destroy, > shell: 'docker ps -a -q -f ''ancestor={{cinder_volume_image_id.stdout}}'''} > - {name: Remove any container using the same Cinder-Volume image, shell: 'docker > rm -fv {{item}}', with_items: '{{ cinder_volume_containers_to_destroy.stdout_lines > }}'} > - {name: Remove previous Cinder-Volume images, shell: 'docker rmi -f {{cinder_volume_image_id.stdout}}'} > when: [cinder_volume_image_id.stdout != ''] > - {command: 'docker pull {{docker_image}}', name: Pull latest Cinder-Volume > images} > - {name: Retag pcmklatest to latest Cinder-Volume image, shell: 'docker tag > {{docker_image}} {{docker_image_latest}}'} > name: Retag the pacemaker image if containerized > when: [step|int == 3, cinder_volume_containerized|bool] > - {name: Install docker packages on upgrade if missing, when: step|int == 3, yum: name=docker > state=latest} > - {command: systemctl is-enabled --quiet openstack-glance-api, ignore_errors: true, > name: Check if glance_api is deployed, register: glance_api_enabled, tags: common} > - command: systemctl is-active --quiet openstack-glance-api > name: 'PreUpgrade step0,validation: Check service openstack-glance-api is running' > tags: validation > when: [step|int == 0, glance_api_enabled.rc == 0] > - name: Stop and disable glance_api service > service: name=openstack-glance-api state=stopped enabled=no > when: [step|int == 2, glance_api_enabled.rc == 0] > - name: Set fact for removal of openstack-glance package > set_fact: {remove_glance_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-glance package if operator requests it > when: [step|int == 2, remove_glance_package|bool] > yum: name=openstack-glance state=removed > - {command: systemctl is-enabled --quiet openstack-gnocchi-api, ignore_errors: true, > name: Check if gnocchi_api is deployed, register: gnocchi_api_enabled, tags: common} > - {ignore_errors: true, name: Check for gnocchi_api running under apache, register: httpd_enabled, > shell: httpd -t -D DUMP_VHOSTS | grep -q gnocchi, tags: common} > - command: systemctl is-active --quiet openstack-gnocchi-api > name: 'PreUpgrade step0,validation: Check service openstack-gnocchi-api is running' > tags: validation > when: [step|int == 0, gnocchi_api_enabled.rc == 0, httpd_enabled.rc != 0] > - name: Stop and disable gnocchi_api service > service: name=openstack-gnocchi-api state=stopped enabled=no > when: [step|int == 2, gnocchi_api_enabled.rc == 0, httpd_enabled.rc != 0] > - {command: systemctl is-active --quiet httpd, ignore_errors: true, name: Check > if httpd service is running, register: httpd_running, tags: common} > - name: 'PreUpgrade step0,validation: Check if gnocchi_api_wsgi is running' > shell: systemctl status 'httpd' | grep -q gnocchi > tags: validation > when: [step|int == 0, httpd_enabled.rc == 0, httpd_running.rc == 0] > - name: Stop and disable httpd service > service: name=httpd state=stopped enabled=no > when: [step|int == 2, httpd_enabled.rc == 0, httpd_running.rc == 0] > - {command: systemctl is-enabled --quiet openstack-gnocchi-metricd, ignore_errors: true, > name: Check if gnocchi_metricd is deployed, register: gnocchi_metricd_enabled, > tags: common} > - command: systemctl is-active --quiet openstack-gnocchi-metricd > name: 'PreUpgrade step0,validation: Check service openstack-gnocchi-metricd > is running' > tags: validation > when: [step|int == 0, gnocchi_metricd_enabled.rc == 0] > - name: Stop and disable openstack-gnocchi-metricd service > service: name=openstack-gnocchi-metricd.service state=stopped enabled=no > when: [step|int == 2, gnocchi_metricd_enabled.rc == 0] > - {command: systemctl is-enabled --quiet openstack-gnocchi-statsd, ignore_errors: true, > name: Check if gnocchi_statsd is deployed, register: gnocchi_statsd_enabled, > tags: common} > - command: systemctl is-active --quiet openstack-gnocchi-statsd > name: 'PreUpgrade step0,validation: Check service openstack-gnocchi-statsd is > running' > tags: validation > when: [step|int == 0, gnocchi_statsd_enabled.rc == 0] > - name: Stop and disable openstack-gnocchi-statsd service > service: name=openstack-gnocchi-statsd.service state=stopped enabled=no > when: [step|int == 2, gnocchi_statsd_enabled.rc == 0] > - name: Get docker haproxy image > set_fact: {haproxy_docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-haproxy:pcmklatest'} > - block: > - name: Check for haproxy Kolla configuration > register: haproxy_kolla_config > stat: {path: /var/lib/config-data/puppet-generated/haproxy} > - name: Check if haproxy is already containerized > set_fact: {haproxy_containerized: '{{haproxy_kolla_config.stat.isdir | default(false)}}'} > - {command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid, name: get bootstrap > nodeid, register: bootstrap_node, tags: common} > - {name: set is_bootstrap_node fact, set_fact: 'is_bootstrap_node={{bootstrap_node.stdout|lower > == ansible_hostname|lower}}', tags: common} > name: Set HAProxy upgrade facts > - block: > - ignore_errors: true > name: Check cluster resource status > pacemaker_resource: {check_mode: true, resource: haproxy, state: started} > register: haproxy_res > - block: > - name: Disable the haproxy cluster resource. > pacemaker_resource: {resource: haproxy, state: disable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > - name: Delete the stopped haproxy cluster resource. > pacemaker_resource: {resource: haproxy, state: delete, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > when: (is_bootstrap_node) and (haproxy_res|succeeded) > name: haproxy baremetal to container upgrade tasks > when: [step|int == 1, not haproxy_containerized|bool] > - block: > - {name: Get haproxy image id currently used by pacemaker, register: haproxy_current_pcmklatest_id, > shell: 'docker images | awk ''/haproxy.* pcmklatest/{print $3}'' | uniq'} > - {name: Temporarily tag the current haproxy image id with the upgraded image > name, shell: 'docker tag {{haproxy_current_pcmklatest_id.stdout}} {{haproxy_docker_image_latest}}'} > name: Prepare the switch to new haproxy container image name in pacemaker > when: [step|int == 0, haproxy_containerized|bool] > - ignore_errors: true > name: Check haproxy-bundle cluster resource status > pacemaker_resource: {check_mode: false, resource: haproxy-bundle, state: show} > register: haproxy_pcs_res > - block: > - name: Disable the haproxy cluster resource before container upgrade > pacemaker_resource: {resource: haproxy-bundle, state: disable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > - block: > - {command: 'cibadmin --query --xpath "//storage-mapping[@id=''haproxy-var-lib'']"', > ignore_errors: true, name: Check haproxy stats socket configuration in pacemaker, > register: haproxy_stats_exposed} > - {command: 'cibadmin --query --xpath "//storage-mapping[@id=''haproxy-cert'']"', > ignore_errors: true, name: Check haproxy public certificate configuration > in pacemaker, register: haproxy_cert_mounted} > - {command: pcs resource bundle update haproxy-bundle storage-map add id=haproxy-var-lib > source-dir=/var/lib/haproxy target-dir=/var/lib/haproxy options=rw, name: Add > a bind mount for stats socket in the haproxy bundle, when: haproxy_stats_exposed.rc > == 6} > - name: Set HAProxy public cert volume mount fact > set_fact: {haproxy_public_cert_path: /etc/pki/tls/private/overcloud_endpoint.pem, > haproxy_public_tls_enabled: false} > - command: pcs resource bundle update haproxy-bundle storage-map add id=haproxy-cert > source-dir={{ haproxy_public_cert_path }} target-dir=/var/lib/kolla/config_files/src-tls/{{ > haproxy_public_cert_path }} options=ro > name: Add a bind mount for public certificate in the haproxy bundle > when: [haproxy_cert_mounted.rc == 6, haproxy_public_tls_enabled|bool] > name: Expose HAProxy stats socket on the host and mount TLS cert if needed > - {command: 'pcs resource bundle update haproxy-bundle container image={{haproxy_docker_image_latest}}', > name: Update the haproxy bundle to use the new container image name} > - name: Enable the haproxy cluster resource > pacemaker_resource: {resource: haproxy-bundle, state: enable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > name: Update haproxy pcs resource bundle for new container image > when: [step|int == 1, haproxy_containerized|bool, is_bootstrap_node, haproxy_pcs_res|succeeded] > - block: > - name: Get docker Haproxy image > set_fact: {docker_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-haproxy:2018-06-15.2', > docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-haproxy:pcmklatest'} > - {name: Get previous Haproxy image id, register: haproxy_image_id, shell: 'docker > images | awk ''/haproxy.* pcmklatest/{print $3}'' | uniq'} > - block: > - {name: Get a list of container using Haproxy image, register: haproxy_containers_to_destroy, > shell: 'docker ps -a -q -f ''ancestor={{haproxy_image_id.stdout}}'''} > - {name: Remove any container using the same Haproxy image, shell: 'docker > rm -fv {{item}}', with_items: '{{ haproxy_containers_to_destroy.stdout_lines > }}'} > - {name: Remove previous Haproxy images, shell: 'docker rmi -f {{haproxy_image_id.stdout}}'} > when: [haproxy_image_id.stdout != ''] > - {command: 'docker pull {{docker_image}}', name: Pull latest Haproxy images} > - {name: Retag pcmklatest to latest Haproxy image, shell: 'docker tag {{docker_image}} > {{docker_image_latest}}'} > name: Retag the pacemaker image if containerized > when: [step|int == 3, haproxy_containerized|bool] > - {command: systemctl is-enabled --quiet openstack-heat-api, ignore_errors: true, > name: Check if heat_api is deployed, register: heat_api_enabled, tags: common} > - {ignore_errors: true, name: Check for heat_api running under apache, register: httpd_enabled, > shell: httpd -t -D DUMP_VHOSTS | grep -q heat_api_wsgi, tags: common} > - command: systemctl is-active --quiet openstack-heat-api > name: 'PreUpgrade step0,validation: Check service openstack-heat-api is running' > tags: validation > when: [step|int == 0, heat_api_enabled.rc == 0, httpd_enabled.rc != 0] > - name: Stop and disable heat_api service (pre-upgrade not under httpd) > service: name=openstack-heat-api state=stopped enabled=no > when: [step|int == 2, heat_api_enabled.rc == 0, httpd_enabled.rc != 0] > - name: 'PreUpgrade step0,validation: Check if heat_api_wsgi is running' > shell: systemctl status 'httpd' | grep -q heat_api_wsgi > tags: validation > when: [step|int == 0, httpd_enabled.rc == 0, httpd_running.rc == 0] > - name: Stop heat_api service (running under httpd) > service: name=httpd state=stopped > when: [step|int == 2, httpd_enabled.rc == 0, httpd_running.rc == 0] > - file: {path: /var/spool/cron/heat, state: absent} > name: remove old heat cron jobs > when: step|int == 2 > - {command: systemctl is-enabled openstack-heat-api-cloudwatch, ignore_errors: true, > name: Check if heat_api_cloudwatch is deployed, register: heat_api_cloudwatch_enabled, > when: step|int == 1} > - name: Stop and disable heat_api_cloudwatch service (pre-upgrade not under httpd) > service: name=openstack-heat-api-cloudwatch state=stopped enabled=no > when: [step|int == 1, heat_api_cloudwatch_enabled.rc == 0] > - {command: systemctl is-enabled --quiet openstack-heat-api-cfn, ignore_errors: true, > name: Check if heat_api_cfn is deployed, register: heat_api_cfn_enabled, tags: common} > - {ignore_errors: true, name: Check for heat_api_cfn running under apache, register: httpd_enabled, > shell: httpd -t -D DUMP_VHOSTS | grep -q heat_api_cfn_wsgi, tags: common} > - command: systemctl is-active --quiet openstack-heat-api-cfn > name: 'PreUpgrade step0,validation: Check service openstack-heat-api-cfn is > running' > tags: validation > when: [step|int == 0, heat_api_cfn_enabled.rc == 0, httpd_enabled.rc != 0] > - name: Stop and disable heat_api_cfn service (pre-upgrade not under httpd) > service: name=openstack-heat-api-cfn state=stopped enabled=no > when: [step|int == 2, heat_api_cfn_enabled.rc == 0, httpd_enabled.rc != 0] > - name: 'PreUpgrade step0,validation: Check if heat_api_cfn_wsgi is running' > shell: systemctl status 'httpd' | grep -q heat_api_cfn_wsgi > tags: validation > when: [step|int == 0, httpd_enabled.rc == 0, httpd_running.rc == 0] > - name: Stop heat_api_cfn service (running under httpd) > service: name=httpd state=stopped > when: [step|int == 2, httpd_enabled.rc == 0, httpd_running.rc == 0] > - {command: systemctl is-enabled --quiet openstack-heat-engine, ignore_errors: true, > name: Check if heat_engine is deployed, register: heat_engine_enabled, tags: common} > - command: systemctl is-active --quiet openstack-heat-engine > name: 'PreUpgrade step0,validation: Check service openstack-heat-engine is running' > tags: validation > when: [step|int == 0, heat_engine_enabled.rc == 0] > - name: Stop and disable heat_engine service > service: name=openstack-heat-engine state=stopped enabled=no > when: [step|int == 2, heat_engine_enabled.rc == 0] > - {ignore_errors: true, name: Check for horizon running under apache, register: httpd_enabled, > shell: httpd -t -D DUMP_VHOSTS | grep -q horizon_vhost, tags: common} > - name: 'PreUpgrade step0,validation: Check if horizon is running' > shell: systemctl is-active --quiet httpd > tags: validation > when: [step|int == 0, httpd_enabled.rc == 0] > - name: Stop and disable horizon service (running under httpd) > service: name=httpd state=stopped enabled=no > when: [step|int == 2, httpd_enabled.rc == 0] > - {command: systemctl is-enabled --quiet iscsid, ignore_errors: true, name: Check > if iscsid service is deployed, register: iscsid_enabled, tags: common} > - command: systemctl is-active --quiet iscsid > name: 'PreUpgrade step0,validation: Check if iscsid is running' > tags: validation > when: [step|int == 0, iscsid_enabled.rc == 0] > - name: Stop and disable iscsid service > service: name=iscsid state=stopped enabled=no > when: [step|int == 2, iscsid_enabled.rc == 0] > - {command: systemctl is-enabled --quiet iscsid.socket, ignore_errors: true, name: Check > if iscsid.socket service is deployed, register: iscsid_socket_enabled, tags: common} > - command: systemctl is-active --quiet iscsid.socket > name: 'PreUpgrade step0,validation: Check if iscsid.socket is running' > tags: validation > when: [step|int == 0, iscsid_socket_enabled.rc == 0] > - name: Stop and disable iscsid.socket service > service: name=iscsid.socket state=stopped enabled=no > when: [step|int == 2, iscsid_socket_enabled.rc == 0] > - {ignore_errors: true, name: Check for keystone running under apache, register: httpd_enabled, > shell: httpd -t -D DUMP_VHOSTS | grep -q keystone_wsgi, tags: common} > - name: 'PreUpgrade step0,validation: Check if keystone_wsgi is running under > httpd' > shell: systemctl status 'httpd' | grep -q keystone > tags: validation > when: [step|int == 0, httpd_enabled.rc == 0, httpd_running.rc == 0] > - name: Stop and disable keystone service (running under httpd) > service: name=httpd state=stopped enabled=no > when: [step|int == 2, httpd_enabled.rc == 0, httpd_running.rc == 0] > - file: {path: /var/spool/cron/keystone, state: absent} > name: remove old keystone cron jobs > when: step|int == 2 > - {command: systemctl is-enabled --quiet memcached, ignore_errors: true, name: Check > if memcached is deployed, register: memcached_enabled, tags: common} > - command: systemctl is-active --quiet memcached > name: 'PreUpgrade step0,validation: Check service memcached is running' > tags: validation > when: [step|int == 0, memcached_enabled.rc == 0] > - name: Stop and disable memcached service > service: name=memcached state=stopped enabled=no > when: [step|int == 2, memcached_enabled.rc == 0] > - {name: Check for mongodb service, register: mongod_service, stat: path=/usr/lib/systemd/system/mongod.service, > tags: common} > - name: Stop and disable mongodb service on upgrade > service: name=mongod state=stopped enabled=no > when: [step|int == 1, mongod_service.stat.exists] > - name: Get docker Mysql image > set_fact: {mysql_docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:pcmklatest'} > - name: Check for Mysql Kolla configuration > register: mysql_kolla_config > stat: {path: /var/lib/config-data/puppet-generated/mysql} > - name: Check if Mysql is already containerized > set_fact: {mysql_containerized: '{{mysql_kolla_config.stat.isdir | default(false)}}'} > - {command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid, name: get bootstrap > nodeid, register: bootstrap_node, tags: common} > - {name: set is_bootstrap_node fact, set_fact: 'is_bootstrap_node={{bootstrap_node.stdout|lower > == ansible_hostname|lower}}', tags: common} > - block: > - ignore_errors: true > name: Check cluster resource status > pacemaker_resource: {check_mode: true, resource: galera, state: master} > register: galera_res > - block: > - name: Disable the galera cluster resource > pacemaker_resource: {resource: galera, state: disable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > - name: Delete the stopped galera cluster resource. > pacemaker_resource: {resource: galera, state: delete, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > when: (is_bootstrap_node) and (galera_res|succeeded) > - {name: Disable mysql service, service: name=mariadb enabled=no} > - {file: state=absent path=/etc/xinetd.d/galera-monitor, name: Remove clustercheck > service from xinetd} > - {name: Restart xinetd service after clustercheck removal, service: name=xinetd > state=restarted} > name: Mysql baremetal to container upgrade tasks > when: [step|int == 1, not mysql_containerized|bool] > - block: > - {name: Get galera image id currently used by pacemaker, register: galera_current_pcmklatest_id, > shell: 'docker images | awk ''/mariadb.* pcmklatest/{print $3}'' | uniq'} > - {name: Temporarily tag the current galera image id with the upgraded image > name, shell: 'docker tag {{galera_current_pcmklatest_id.stdout}} {{mysql_docker_image_latest}}'} > name: Prepare the switch to new galera container image name in pacemaker > when: [step|int == 0, mysql_containerized|bool] > - ignore_errors: true > name: Check galera cluster resource status > pacemaker_resource: {check_mode: false, resource: galera, state: show} > register: galera_pcs_res > - block: > - name: Disable the galera cluster resource before container upgrade > pacemaker_resource: {resource: galera, state: disable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > - block: > - {command: 'cibadmin --query --xpath "//storage-mapping[@id=''mysql-log'']"', > ignore_errors: true, name: Check Mysql logging configuration in pacemaker, > register: mysql_logs_moved} > - block: > - {command: pcs resource bundle update galera-bundle storage-map add id=mysql-log > source-dir=/var/log/containers/mysql target-dir=/var/log/mysql options=rw, > name: Add a bind mount for logging in the galera bundle} > - {command: pcs resource update galera log=/var/log/mysql/mysqld.log, name: Reconfigure > Mysql log file in the galera resource agent} > name: Change Mysql logging configuration in pacemaker > when: mysql_logs_moved.rc == 6 > name: Move Mysql logging to /var/log/containers > - {command: 'pcs resource bundle update galera-bundle container image={{mysql_docker_image_latest}}', > name: Update the galera bundle to use the new container image name} > - name: Enable the galera cluster resource > pacemaker_resource: {resource: galera, state: enable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > name: Update galera pcs resource bundle for new container image > when: [step|int == 1, mysql_containerized|bool, is_bootstrap_node, galera_pcs_res|succeeded] > - block: > - name: Get docker Mariadb image > set_fact: {docker_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:2018-06-15.2', > docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:pcmklatest'} > - {name: Get previous Mariadb image id, register: mariadb_image_id, shell: 'docker > images | awk ''/mariadb.* pcmklatest/{print $3}'' | uniq'} > - block: > - {name: Get a list of container using Mariadb image, register: mariadb_containers_to_destroy, > shell: 'docker ps -a -q -f ''ancestor={{mariadb_image_id.stdout}}'''} > - {name: Remove any container using the same Mariadb image, shell: 'docker > rm -fv {{item}}', with_items: '{{ mariadb_containers_to_destroy.stdout_lines > }}'} > - {name: Remove previous Mariadb images, shell: 'docker rmi -f {{mariadb_image_id.stdout}}'} > when: [mariadb_image_id.stdout != ''] > - {command: 'docker pull {{docker_image}}', name: Pull latest Mariadb images} > - {name: Retag pcmklatest to latest Mariadb image, shell: 'docker tag {{docker_image}} > {{docker_image_latest}}'} > name: Retag the pacemaker image if containerized > when: [step|int == 3, mysql_containerized|bool] > - block: > - {name: Update host mariadb packages, when: step|int == 3, yum: name=mariadb-server-galera > state=latest} > - name: Mysql upgrade script > set_fact: {mysql_upgrade_script: '{% if mysql_containerized %}kolla_set_configs; > {% endif %} chown -R mysql:mysql /var/lib/mysql; mysqld_safe --user=mysql > --wsrep-provider=none --skip-networking --wsrep-on=off & timeout 60 sh > -c ''while ! mysqladmin ping --silent; do sleep 1; done''; mysql_upgrade; > mysqladmin shutdown'} > - name: Bind mounts for temporary container > set_fact: > mysql_upgrade_db_bind_mounts: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json', > '/var/lib/config-data/puppet-generated/mysql/:/var/lib/kolla/config_files/src:ro', > '/var/lib/mysql:/var/lib/mysql'] > - {name: Upgrade Mysql database from a temporary container, shell: '/usr/bin/docker > run --rm --log-driver=syslog -u root --net=host -e "KOLLA_CONFIG_STRATEGY=COPY_ALWAYS" > -v {{ mysql_upgrade_db_bind_mounts | union([''/tmp/mariadb-upgrade:/var/log/mariadb:rw'']) > | join('' -v '')}} "rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-mariadb:pcmklatest" > /bin/bash -ecx "{{mysql_upgrade_script}}"', when: mysql_containerized|bool} > - {name: Upgrade Mysql database from the host, shell: '/bin/bash -ecx "{{mysql_upgrade_script}}"', > when: not mysql_containerized|bool} > name: Check and upgrade Mysql database after major version upgrade > when: step|int == 3 > - {command: systemctl is-enabled --quiet neutron-server, ignore_errors: true, > name: Check if neutron_server is deployed, register: neutron_server_enabled, > tags: common} > - command: systemctl is-active --quiet neutron-server > name: 'PreUpgrade step0,validation: Check service neutron-server is running' > tags: validation > when: [step|int == 0, neutron_server_enabled.rc == 0] > - name: Stop and disable neutron_api service > service: name=neutron-server state=stopped enabled=no > when: [step|int == 2, neutron_server_enabled.rc == 0] > - name: Set fact for removal of openstack-neutron package > set_fact: {remove_neutron_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-neutron package if operator requests it > when: [step|int == 2, remove_neutron_package|bool] > yum: name=openstack-neutron state=removed > - {command: systemctl is-enabled --quiet neutron-dhcp-agent, ignore_errors: true, > name: Check if neutron_dhcp_agent is deployed, register: neutron_dhcp_agent_enabled, > tags: common} > - command: systemctl is-active --quiet neutron-dhcp-agent > name: 'PreUpgrade step0,validation: Check service neutron-dhcp-agent is running' > tags: validation > when: [step|int == 0, neutron_dhcp_agent_enabled.rc == 0] > - name: Stop and disable neutron_dhcp service > service: name=neutron-dhcp-agent state=stopped enabled=no > when: [step|int == 2, neutron_dhcp_agent_enabled.rc == 0] > - {command: systemctl is-enabled --quiet neutron-l3-agent, ignore_errors: true, > name: Check if neutron_l3_agent is deployed, register: neutron_l3_agent_enabled, > tags: common} > - command: systemctl is-active --quiet neutron-l3-agent > name: 'PreUpgrade step0,validation: Check service neutron-l3-agent is running' > tags: validation > when: [step|int == 0, neutron_l3_agent_enabled.rc == 0] > - name: Stop and disable neutron_l3 service > service: name=neutron-l3-agent state=stopped enabled=no > when: [step|int == 2, neutron_l3_agent_enabled.rc == 0] > - {command: systemctl is-enabled --quiet neutron-metadata-agent, ignore_errors: true, > name: Check if neutron_metadata_agent is deployed, register: neutron_metadata_agent_enabled, > tags: common} > - command: systemctl is-active --quiet neutron-metadata-agent > name: 'PreUpgrade step0,validation: Check service neutron-metadata-agent is > running' > tags: validation > when: [step|int == 0, neutron_metadata_agent_enabled.rc == 0] > - name: Stop and disable neutron_metadata service > service: name=neutron-metadata-agent state=stopped enabled=no > when: [step|int == 2, neutron_metadata_agent_enabled.rc == 0] > - {ignore_errors: true, name: Check openvswitch version., register: ovs_version, > shell: 'rpm -qa | awk -F- ''/^openvswitch-2/{print $2 "-" $3}''', when: step|int > == 2} > - {ignore_errors: true, name: Check openvswitch packaging., register: ovs_packaging_issue, > shell: 'rpm -q --scripts openvswitch | awk ''/postuninstall/,/*/'' | grep -q > "systemctl.*try-restart"', when: step|int == 2} > - block: > - file: {path: /root/OVS_UPGRADE, state: absent} > name: 'Ensure empty directory: emptying.' > - file: {group: root, mode: 488, owner: root, path: /root/OVS_UPGRADE, state: directory} > name: 'Ensure empty directory: creating.' > - {command: yum makecache, name: Make yum cache.} > - {command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch, > name: Download OVS packages.} > - {name: Get rpm list for manual upgrade of OVS., register: ovs_list_of_rpms, > shell: ls -1 /root/OVS_UPGRADE/*.rpm} > - args: {chdir: /root/OVS_UPGRADE} > name: Manual upgrade of OVS > shell: 'rpm -U --test {{item}} 2>&1 | grep "already installed" || \ > > rpm -U --replacepkgs --notriggerun --nopostun {{item}}; > > ' > with_items: ['{{ovs_list_of_rpms.stdout_lines}}'] > when: [step|int == 2, '''2.5.0-14'' in ovs_version.stdout|default('''') or ovs_packaging_issue|default(false)|succeeded'] > - {command: systemctl is-enabled --quiet neutron-openvswitch-agent, ignore_errors: true, > name: Check if neutron_ovs_agent is deployed, register: neutron_ovs_agent_enabled, > tags: common} > - command: systemctl is-active --quiet neutron-openvswitch-agent > name: 'PreUpgrade step0,validation: Check service neutron-openvswitch-agent > is running' > tags: validation > when: [step|int == 0, neutron_ovs_agent_enabled.rc == 0] > - name: Stop and disable neutron_ovs_agent service > service: name=neutron-openvswitch-agent state=stopped enabled=no > when: [step|int == 2, neutron_ovs_agent_enabled.rc == 0] > - name: Set fact for removal of openstack-neutron-openvswitch package > set_fact: {remove_neutron_openvswitch_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-neutron-openvswitch package if operator requests it > when: [step|int == 2, remove_neutron_openvswitch_package|bool] > yum: name=openstack-neutron-openvswitch state=removed > - {command: systemctl is-enabled --quiet openstack-nova-api, ignore_errors: true, > name: Check if nova_api is deployed, register: nova_api_enabled, tags: common} > - {ignore_errors: true, name: Check for nova-api running under apache, register: httpd_enabled, > shell: httpd -t -D DUMP_VHOSTS | grep -q 'nova', tags: common} > - command: systemctl is-active --quiet openstack-nova-api > name: 'PreUpgrade step0,validation: Check service openstack-nova-api is running' > tags: validation > when: [step|int == 0, nova_api_enabled.rc == 0, httpd_enabled.rc != 0] > - name: Stop and disable nova_api service > service: name=openstack-nova-api state=stopped enabled=no > when: [step|int == 2, nova_api_enabled.rc == 0, httpd_enabled.rc != 0] > - name: 'PreUpgrade step0,validation: Check if nova_wsgi is running' > shell: systemctl status 'httpd' | grep -q 'nova' > tags: validation > when: [step|int == 0, httpd_enabled.rc == 0, httpd_running.rc == 0] > - name: Stop nova_api service (running under httpd) > service: name=httpd state=stopped > when: [step|int == 2, httpd_enabled.rc == 0, httpd_running.rc == 0] > - name: Set fact for removal of openstack-nova-api package > set_fact: {remove_nova_api_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-nova-api package if operator requests it > when: [step|int == 2, remove_nova_api_package|bool] > yum: name=openstack-nova-api state=removed > - file: {path: /var/spool/cron/nova, state: absent} > name: remove old nova cron jobs > when: step|int == 2 > - {command: systemctl is-enabled --quiet openstack-nova-conductor, ignore_errors: true, > name: Check if nova_conductor is deployed, register: nova_conductor_enabled, > tags: common} > - {ini_file: dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=, > name: Set compute upgrade level to auto, when: step|int == 1} > - command: systemctl is-active --quiet openstack-nova-conductor > name: 'PreUpgrade step0,validation: Check service openstack-nova-conductor is > running' > tags: validation > when: [step|int == 0, nova_conductor_enabled.rc == 0] > - name: Stop and disable nova_conductor service > service: name=openstack-nova-conductor state=stopped enabled=no > when: [step|int == 2, nova_conductor_enabled.rc == 0] > - name: Set fact for removal of openstack-nova-conductor package > set_fact: {remove_nova_conductor_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-nova-conductor package if operator requests it > when: [step|int == 2, remove_nova_conductor_package|bool] > yum: name=openstack-nova-conductor state=removed > - {command: systemctl is-enabled --quiet openstack-nova-consoleauth, ignore_errors: true, > name: Check if nova_consoleauth is deployed, register: nova_consoleauth_enabled, > tags: common} > - command: systemctl is-active --quiet openstack-nova-consoleauth > name: 'PreUpgrade step0,validation: Check service openstack-nova-consoleauth > is running' > tags: validation > when: [step|int == 0, nova_consoleauth_enabled.rc == 0] > - name: Stop and disable nova_consoleauth service > service: name=openstack-nova-consoleauth state=stopped enabled=no > when: [step|int == 2, nova_consoleauth_enabled.rc == 0] > - name: Set fact for removal of openstack-nova-console package > set_fact: {remove_nova_console_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-nova-console package if operator requests it > when: [step|int == 2, remove_nova_console_package|bool] > yum: name=openstack-nova-console state=removed > - {command: systemctl is-enabled --quiet openstack-nova-api, ignore_errors: true, > name: Check if nova_api_metadata is deployed, register: nova_metadata_enabled, > tags: common} > - command: systemctl is-active --quiet openstack-nova-api > name: 'PreUpgrade step0,validation: Check service openstack-nova-api is running' > tags: validation > when: [step|int == 0, nova_metadata_enabled.rc == 0] > - name: Stop and disable nova_api service > service: name=openstack-nova-api state=stopped enabled=no > when: [step|int == 2, nova_metadata_enabled.rc == 0] > - {ignore_errors: true, name: Check for nova placement running under apache, register: httpd_enabled, > shell: httpd -t -D DUMP_VHOSTS | grep -q placement_wsgi, tags: common} > - name: 'PreUpgrade step0,validation: Check if placement_wsgi is running' > shell: systemctl status 'httpd' | grep -q placement_wsgi > tags: validation > when: [step|int == 0, httpd_enabled.rc == 0, httpd_running.rc == 0] > - name: Stop and disable nova_placement service (running under httpd) > service: name=httpd state=stopped enabled=no > when: [step|int == 2, httpd_enabled.rc == 0, httpd_running.rc == 0] > - {command: systemctl is-enabled --quiet openstack-nova-scheduler, ignore_errors: true, > name: Check if nova_scheduler is deployed, register: nova_scheduler_enabled, > tags: common} > - command: systemctl is-active --quiet openstack-nova-scheduler > name: 'PreUpgrade step0,validation: Check service openstack-nova-scheduler is > running' > tags: validation > when: [step|int == 0, nova_scheduler_enabled.rc == 0] > - name: Stop and disable nova_scheduler service > service: name=openstack-nova-scheduler state=stopped enabled=no > when: [step|int == 2, nova_scheduler_enabled.rc == 0] > - name: Set fact for removal of openstack-nova-scheduler package > set_fact: {remove_nova_scheduler_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-nova-scheduler package if operator requests it > when: [step|int == 2, remove_nova_scheduler_package|bool] > yum: name=openstack-nova-scheduler state=removed > - {command: systemctl is-enabled --quiet openstack-nova-novncproxy, ignore_errors: true, > name: Check if nova vncproxy is deployed, register: nova_vncproxy_enabled, tags: common} > - command: systemctl is-active --quiet openstack-nova-novncproxy > name: 'PreUpgrade step0,validation: Check service openstack-nova-novncproxy > is running' > tags: validation > when: [step|int == 0, nova_vncproxy_enabled.rc == 0] > - name: Stop and disable nova_vnc_proxy service > service: name=openstack-nova-novncproxy state=stopped enabled=no > when: [step|int == 2, nova_vncproxy_enabled.rc == 0] > - name: Set fact for removal of openstack-nova-novncproxy package > set_fact: {remove_nova_novncproxy_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-nova-novncproxy package if operator requests it > when: [step|int == 2, remove_nova_novncproxy_package|bool] > yum: name=openstack-nova-novncproxy state=removed > - {async: 30, name: Check pacemaker cluster running before upgrade, pacemaker_cluster: state=online > check_and_fail=true, poll: 4, tags: validation, when: step|int == 0} > - {name: Stop pacemaker cluster, pacemaker_cluster: state=offline, when: step|int > == 2} > - {name: Start pacemaker cluster, pacemaker_cluster: state=online, when: step|int > == 4} > - name: Get docker Rabbitmq image > set_fact: {rabbitmq_docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-rabbitmq:pcmklatest'} > - name: Check for Rabbitmq Kolla configuration > register: rabbit_kolla_config > stat: {path: /var/lib/config-data/puppet-generated/rabbitmq} > - name: Check if Rabbitmq is already containerized > set_fact: {rabbit_containerized: '{{rabbit_kolla_config.stat.isdir | default(false)}}'} > - {command: hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid, name: get bootstrap > nodeid, register: bootstrap_node} > - {name: set is_bootstrap_node fact, set_fact: 'is_bootstrap_node={{bootstrap_node.stdout|lower > == ansible_hostname|lower}}'} > - block: > - ignore_errors: true > name: Check cluster resource status of rabbitmq > pacemaker_resource: {check_mode: false, resource: rabbitmq, state: show} > register: rabbitmq_res > - block: > - name: Disable the rabbitmq cluster resource. > pacemaker_resource: {resource: rabbitmq, state: disable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > - name: Delete the stopped rabbitmq cluster resource. > pacemaker_resource: {resource: rabbitmq, state: delete, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > when: (is_bootstrap_node) and (rabbitmq_res|succeeded) > - {name: Disable rabbitmq service, service: name=rabbitmq-server enabled=no} > name: Rabbitmq baremetal to container upgrade tasks > when: [step|int == 1, not rabbit_containerized|bool] > - block: > - {name: Get rabbitmq image id currently used by pacemaker, register: rabbitmq_current_pcmklatest_id, > shell: 'docker images | awk ''/rabbitmq.* pcmklatest/{print $3}'' | uniq'} > - {name: Temporarily tag the current rabbitmq image id with the upgraded image > name, shell: 'docker tag {{rabbitmq_current_pcmklatest_id.stdout}} {{rabbitmq_docker_image_latest}}'} > name: Prepare the switch to new rabbitmq container image name in pacemaker > when: [step|int == 0, rabbit_containerized|bool] > - ignore_errors: true > name: Check rabbitmq-bundle cluster resource status > pacemaker_resource: {check_mode: false, resource: rabbitmq-bundle, state: show} > register: rabbit_pcs_res > - block: > - name: Disable the rabbitmq cluster resource before container upgrade > pacemaker_resource: {resource: rabbitmq-bundle, state: disable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > - block: > - {command: 'cibadmin --query --xpath "//storage-mapping[@id=''rabbitmq-log'']"', > ignore_errors: true, name: Check rabbitmq logging configuration in pacemaker, > register: rabbitmq_logs_moved} > - {command: pcs resource bundle update rabbitmq-bundle storage-map add id=rabbitmq-log > source-dir=/var/log/containers/rabbitmq target-dir=/var/log/rabbitmq options=rw, > name: Add a bind mount for logging in the rabbitmq bundle, when: rabbitmq_logs_moved.rc > == 6} > name: Move rabbitmq logging to /var/log/containers > - {command: 'pcs resource bundle update rabbitmq-bundle container image={{rabbitmq_docker_image_latest}}', > name: Update the rabbitmq bundle to use the new container image name} > - name: Enable the rabbitmq cluster resource > pacemaker_resource: {resource: rabbitmq-bundle, state: enable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > name: Update rabbitmq-bundle pcs resource bundle for new container image > when: [step|int == 1, rabbit_containerized|bool, is_bootstrap_node, rabbit_pcs_res|succeeded] > - block: > - name: Get docker Rabbitmq image > set_fact: {docker_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-rabbitmq:2018-06-15.2', > docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-rabbitmq:pcmklatest'} > - {name: Get previous Rabbitmq image id, register: rabbitmq_image_id, shell: 'docker > images | awk ''/rabbitmq.* pcmklatest/{print $3}'' | uniq'} > - block: > - {name: Get a list of container using Rabbitmq image, register: rabbitmq_containers_to_destroy, > shell: 'docker ps -a -q -f ''ancestor={{rabbitmq_image_id.stdout}}'''} > - {name: Remove any container using the same Rabbitmq image, shell: 'docker > rm -fv {{item}}', with_items: '{{ rabbitmq_containers_to_destroy.stdout_lines > }}'} > - {name: Remove previous Rabbitmq images, shell: 'docker rmi -f {{rabbitmq_image_id.stdout}}'} > when: [rabbitmq_image_id.stdout != ''] > - {command: 'docker pull {{docker_image}}', name: Pull latest Rabbitmq images} > - {name: Retag pcmklatest to latest Rabbitmq image, shell: 'docker tag {{docker_image}} > {{docker_image_latest}}'} > name: Retag the pacemaker image if containerized > when: [step|int == 3, rabbit_containerized|bool] > - name: Get docker redis image > set_fact: {redis_docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-redis:pcmklatest'} > - name: Check for redis Kolla configuration > register: redis_kolla_config > stat: {path: /var/lib/config-data/puppet-generated/redis} > - name: Check if redis is already containerized > set_fact: {redis_containerized: '{{redis_kolla_config.stat.isdir | default(false)}}'} > - block: > - ignore_errors: true > name: Check cluster resource status of redis > pacemaker_resource: {check_mode: false, resource: redis, state: show} > register: redis_res > - block: > - name: Disable the redis cluster resource > pacemaker_resource: {resource: redis, state: disable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > - name: Delete the stopped redis cluster resource. > pacemaker_resource: {resource: redis, state: delete, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > when: (is_bootstrap_node) and (redis_res|succeeded) > - {name: Disable redis service, service: name=redis enabled=no} > name: redis baremetal to container upgrade tasks > when: [step|int == 1, not redis_containerized|bool] > - block: > - {name: Get redis image id currently used by pacemaker, register: redis_current_pcmklatest_id, > shell: 'docker images | awk ''/redis.* pcmklatest/{print $3}'' | uniq'} > - {name: Temporarily tag the current redis image id with the upgraded image > name, shell: 'docker tag {{redis_current_pcmklatest_id.stdout}} {{redis_docker_image_latest}}'} > name: Prepare the switch to new redis container image name in pacemaker > when: [step|int == 0, redis_containerized|bool] > - ignore_errors: true > name: Check redis-bundle cluster resource status > pacemaker_resource: {check_mode: false, resource: redis-bundle, state: show} > register: redis_pcs_res > - block: > - name: Disable the redis cluster resource before container upgrade > pacemaker_resource: {resource: redis-bundle, state: disable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > - block: > - {command: 'cibadmin --query --xpath "//storage-mapping[@id=''redis-log'' > and @source-dir=''/var/log/containers/redis'']"', ignore_errors: true, > name: Check redis logging configuration in pacemaker, register: redis_logs_moved} > - block: > - {command: pcs resource bundle update redis-bundle storage-map remove redis-log, > name: Remove old bind mount for logging in the redis bundle} > - {command: pcs resource bundle update redis-bundle storage-map add id=redis-log > source-dir=/var/log/containers/redis target-dir=/var/log/redis options=rw, > name: Add a bind mount for logging in the redis bundle} > name: Change redis logging configuration in pacemaker > when: redis_logs_moved.rc == 6 > name: Move redis logging to /var/log/containers > - {command: 'pcs resource bundle update redis-bundle container image={{redis_docker_image_latest}}', > name: Update the redis bundle to use the new container image name} > - name: Enable the redis cluster resource > pacemaker_resource: {resource: redis-bundle, state: enable, wait_for_resource: true} > register: output > retries: 5 > until: output.rc == 0 > name: Update redis-bundle pcs resource bundle for new container image > when: [step|int == 1, redis_containerized|bool, is_bootstrap_node, redis_pcs_res|succeeded] > - block: > - name: Get docker Redis image > set_fact: {docker_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-redis:2018-06-15.2', > docker_image_latest: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-redis:pcmklatest'} > - {name: Get previous Redis image id, register: redis_image_id, shell: 'docker > images | awk ''/redis.* pcmklatest/{print $3}'' | uniq'} > - block: > - {name: Get a list of container using Redis image, register: redis_containers_to_destroy, > shell: 'docker ps -a -q -f ''ancestor={{redis_image_id.stdout}}'''} > - {name: Remove any container using the same Redis image, shell: 'docker rm > -fv {{item}}', with_items: '{{ redis_containers_to_destroy.stdout_lines > }}'} > - {name: Remove previous Redis images, shell: 'docker rmi -f {{redis_image_id.stdout}}'} > when: [redis_image_id.stdout != ''] > - {command: 'docker pull {{docker_image}}', name: Pull latest Redis images} > - {name: Retag pcmklatest to latest Redis image, shell: 'docker tag {{docker_image}} > {{docker_image_latest}}'} > name: Retag the pacemaker image if containerized > when: [step|int == 3, redis_containerized|bool] > - {name: Stop snmp service, service: name=snmpd state=stopped, when: step|int > == 1} > - command: systemctl is-enabled --quiet "{{ item }}" > ignore_errors: true > name: Check if swift-proxy or swift-object-expirer are deployed > register: swift_proxy_services_enabled > tags: common > with_items: [openstack-swift-proxy, openstack-swift-object-expirer] > - command: systemctl is-active --quiet "{{ item.item }}" > name: 'PreUpgrade step0,validation: Check service openstack-swift-proxy and > openstack-swift-object-expirer are running' > tags: validation > when: [step|int == 0, item.rc == 0] > with_items: '{{ swift_proxy_services_enabled.results }}' > - name: Stop and disable swift-proxy and swift-object-expirer services > service: name={{ item.item }} state=stopped enabled=no > when: [step|int == 2, item.rc == 0] > with_items: '{{ swift_proxy_services_enabled.results }}' > - name: Set fact for removal of openstack-swift-proxy package > set_fact: {remove_swift_proxy_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-swift-proxy package if operator requests it > when: [step|int == 2, remove_swift_proxy_package|bool] > yum: name=openstack-swift-proxy state=removed > - command: systemctl is-enabled --quiet "{{ item }}" > ignore_errors: true > name: Check if swift storage services are deployed > register: swift_services_enabled > tags: common > with_items: [openstack-swift-account-auditor, openstack-swift-account-reaper, > openstack-swift-account-replicator, openstack-swift-account, openstack-swift-container-auditor, > openstack-swift-container-replicator, openstack-swift-container-updater, openstack-swift-container, > openstack-swift-object-auditor, openstack-swift-object-replicator, openstack-swift-object-updater, > openstack-swift-object] > - command: systemctl is-active --quiet "{{ item.item }}" > name: 'PreUpgrade step0,validation: Check swift storage services are running' > tags: validation > when: [step|int == 0, item.rc == 0] > with_items: '{{ swift_services_enabled.results }}' > - name: Stop and disable swift storage services > service: name={{ item.item }} state=stopped enabled=no > when: [step|int == 2, item.rc == 0] > with_items: '{{ swift_services_enabled.results }}' > - name: Set fact for removal of openstack-swift-container,object,account package > set_fact: {remove_swift_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-swift-container,object,account packages if operator requests > it > when: [step|int == 2, remove_swift_package|bool] > with_items: [openstack-swift-container, openstack-swift-object, openstack-swift-account] > yum: name={{ item }} state=removed > - {file: state=absent path=/etc/xinetd.d/rsync, name: Remove rsync service from > xinetd, register: rsync_service_removed, when: step|int == 2} > - name: Restart xinetd service after rsync removal > service: name=xinetd state=restarted > when: [step|int == 2, rsync_service_removed|changed] > - args: {creates: /etc/sysconfig/ip6tables.n-o-upgrade} > name: blank ipv6 rule before activating ipv6 firewall. > shell: cat /etc/sysconfig/ip6tables > /etc/sysconfig/ip6tables.n-o-upgrade; > cat</dev/null>/etc/sysconfig/ip6tables > when: step|int == 3 > - {name: Check yum for rpm-python present, register: rpm_python_check, when: step|int > == 0, yum: name=rpm-python state=present} > - fail: msg="rpm-python package was not present before this run! Check environment > before re-running" > name: Fail when rpm-python wasn't present > when: [step|int == 0, rpm_python_check.changed != false] > - {name: Check for os-net-config upgrade, register: os_net_config_need_upgrade, > shell: 'yum check-upgrade | awk ''/os-net-config/{print}''', when: step|int > == 3} > - {ignore_errors: true, name: Check that os-net-config has configuration, register: os_net_config_has_config, > shell: test -s /etc/os-net-config/config.json, when: step|int == 3} > - block: > - {name: Upgrade os-net-config, yum: name=os-net-config state=latest} > - {changed_when: os_net_config_upgrade.rc == 2, command: os-net-config --no-activate > -c /etc/os-net-config/config.json -v --detailed-exit-codes, failed_when: 'os_net_config_upgrade.rc > not in [0,2]', name: take new os-net-config parameters into account now, > register: os_net_config_upgrade} > when: [step|int == 3, os_net_config_need_upgrade.stdout, os_net_config_has_config.rc > == 0] > - {name: Update all packages, when: step|int == 3, yum: name=* state=latest} > role_data_workflow_tasks: {} > role_name: Controller >COMPUTE-R00-00: > hosts: > 192.168.0.12: {} > vars: > ctlplane_ip: 192.168.0.12 > deploy_server_id: 08d6ae29-d8e8-4e7e-b1cf-836e8c86bcbd > enabled_networks: [management, storage, ctlplane, external, internal_api, storage_mgmt, > tenant] > external_ip: 192.168.0.12 > internal_api_ip: 10.0.0.17 > management_ip: 172.16.17.214 > storage_ip: 10.0.0.148 > storage_mgmt_ip: 192.168.0.12 > tenant_ip: 10.0.1.143 >Compute: > children: > COMPUTE-R00-00: {} > vars: > ansible_ssh_user: heat-admin > bootstrap_server_id: 9b2e7b63-8bb8-4e4b-a3b8-df5dab131de3 > role_data_cellv2_discovery: true > role_data_config_settings: {} > role_data_deploy_steps_tasks: [] > role_data_docker_config: > step_3: > iscsid: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-iscsid:2018-06-15.2 > net: host > privileged: true > restart: always > start_order: 2 > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro', > '/dev/:/dev/', '/run/:/run/', '/sys:/sys', '/lib/modules:/lib/modules:ro', > '/etc/iscsi:/var/lib/kolla/config_files/src-iscsid:ro'] > neutron_ovs_bridge: > command: [puppet, apply, --modulepath, '/etc/puppet/modules:/usr/share/openstack-puppet/modules', > --tags, 'file,file_line,concat,augeas,neutron::plugins::ovs::bridge,vs_config', > -v, -e, 'include neutron::agents::ml2::ovs'] > detach: false > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-server:2018-06-15.2 > net: host > pid: host > privileged: true > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/neutron_ovs_agent.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/neutron/:/var/lib/kolla/config_files/src:ro', > '/lib/modules:/lib/modules:ro', '/run/openvswitch:/run/openvswitch', '/etc/puppet:/etc/puppet:ro', > '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', > '/var/run/openvswitch/:/var/run/openvswitch/'] > nova_libvirt: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-libvirt:2018-06-15.2 > net: host > pid: host > privileged: true > restart: always > start_order: 1 > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/nova_libvirt.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/nova_libvirt/:/var/lib/kolla/config_files/src:ro', > '/etc/ceph:/var/lib/kolla/config_files/src-ceph:ro', '/lib/modules:/lib/modules:ro', > '/dev:/dev', '/run:/run', '/sys/fs/cgroup:/sys/fs/cgroup', '/var/lib/nova:/var/lib/nova:shared', > '/etc/libvirt:/etc/libvirt', '/var/run/libvirt:/var/run/libvirt', '/var/lib/libvirt:/var/lib/libvirt', > '/var/log/containers/libvirt:/var/log/libvirt', '/var/log/libvirt/qemu:/var/log/libvirt/qemu:ro', > '/var/lib/vhost_sockets:/var/lib/vhost_sockets', '/sys/fs/selinux:/sys/fs/selinux'] > nova_virtlogd: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-libvirt:2018-06-15.2 > net: host > pid: host > privileged: true > restart: always > start_order: 0 > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/nova_virtlogd.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/nova_libvirt/:/var/lib/kolla/config_files/src:ro', > '/lib/modules:/lib/modules:ro', '/dev:/dev', '/run:/run', '/sys/fs/cgroup:/sys/fs/cgroup', > '/var/lib/nova:/var/lib/nova:shared', '/var/run/libvirt:/var/run/libvirt', > '/var/lib/libvirt:/var/lib/libvirt', '/etc/libvirt/qemu:/etc/libvirt/qemu:ro', > '/var/log/libvirt/qemu:/var/log/libvirt/qemu'] > step_4: > ceilometer_agent_compute: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-ceilometer-compute:2018-06-15.2 > net: host > privileged: false > restart: always > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/ceilometer_agent_compute.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/ceilometer/:/var/lib/kolla/config_files/src:ro', > '/var/run/libvirt:/var/run/libvirt:ro', '/var/log/containers/ceilometer:/var/log/ceilometer'] > logrotate_crond: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cron:2018-06-15.2 > net: none > pid: host > privileged: true > restart: always > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/logrotate-crond.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/crond/:/var/lib/kolla/config_files/src:ro', > '/var/log/containers:/var/log/containers'] > neutron_ovs_agent: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-openvswitch-agent:2018-06-15.2 > net: host > pid: host > privileged: true > restart: always > start_order: 10 > ulimit: [nofile=1024] > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/neutron:/var/log/neutron', '/var/lib/kolla/config_files/neutron_ovs_agent.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/neutron/:/var/lib/kolla/config_files/src:ro', > '/var/lib/docker-config-scripts/neutron_ovs_agent_launcher.sh:/neutron_ovs_agent_launcher.sh:ro', > '/lib/modules:/lib/modules:ro', '/run/openvswitch:/run/openvswitch'] > nova_compute: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > healthcheck: {test: /openstack/healthcheck} > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-compute:2018-06-15.2 > ipc: host > net: host > privileged: true > restart: always > ulimit: [nofile=1024] > user: nova > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/log/containers/nova:/var/log/nova', '/var/lib/kolla/config_files/nova_compute.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/nova_libvirt/:/var/lib/kolla/config_files/src:ro', > '/etc/iscsi:/var/lib/kolla/config_files/src-iscsid:ro', '/etc/ceph:/var/lib/kolla/config_files/src-ceph:ro', > '/dev:/dev', '/lib/modules:/lib/modules:ro', '/run:/run', '/var/lib/nova:/var/lib/nova:shared', > '/var/lib/libvirt:/var/lib/libvirt', '/sys/class/net:/sys/class/net', > '/sys/bus/pci:/sys/bus/pci'] > nova_migration_target: > environment: [KOLLA_CONFIG_STRATEGY=COPY_ALWAYS] > image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-compute:2018-06-15.2 > net: host > privileged: true > restart: always > user: root > volumes: ['/etc/hosts:/etc/hosts:ro', '/etc/localtime:/etc/localtime:ro', > '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', > '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', > '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', '/dev/log:/dev/log', > '/etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro', '/etc/puppet:/etc/puppet:ro', > '/var/lib/kolla/config_files/nova-migration-target.json:/var/lib/kolla/config_files/config.json:ro', > '/var/lib/config-data/puppet-generated/nova_libvirt/:/var/lib/kolla/config_files/src:ro', > '/etc/ssh/:/host-ssh/:ro', '/run:/run', '/var/lib/nova:/var/lib/nova:shared'] > role_data_docker_config_scripts: > neutron_ovs_agent_launcher.sh: {content: '#!/bin/bash > > set -xe > > /usr/bin/python -m neutron.cmd.destroy_patch_ports --config-file /usr/share/neutron/neutron-dist.conf > --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini > --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-openvswitch-agent > > /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf > --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini > --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-dir /etc/neutron/conf.d/common > --log-file=/var/log/neutron/openvswitch-agent.log > > ', mode: '0755'} > role_data_docker_puppet_tasks: {} > role_data_external_deploy_tasks: [] > role_data_external_post_deploy_tasks: [] > role_data_fast_forward_post_upgrade_tasks: > - name: Register repo type and args > set_fact: > fast_forward_repo_args: > tripleo_repos: {ocata: -b ocata current, pike: -b pike current, queens: -b > queens current} > fast_forward_repo_type: custom-script > - debug: {msg: 'fast_forward_repo_type: {{ fast_forward_repo_type }} fast_forward_repo_args: > {{ fast_forward_repo_args }}'} > - block: > - git: {dest: /home/stack/tripleo-repos/, repo: 'https://github.com/openstack/tripleo-repos.git'} > name: clone tripleo-repos > - args: {chdir: /home/stack/tripleo-repos/} > command: python setup.py install > name: install tripleo-repos > - {command: 'tripleo-repos {{ fast_forward_repo_args.tripleo_repos[release] > }}', name: Enable tripleo-repos} > when: [ffu_packages_apply|bool, fast_forward_repo_type == 'tripleo-repos'] > - block: > - copy: {content: "set -e\nMIRROR=tlv\nyum localinstall -y http://download.lab.bos.redhat.com/rcm-guest/puddles/OpenStack/rhos-release/rhos-release-latest.noarch.rpm\n\ > rhos-release -x\ncase $1 in\n ocata)\n rhos-release 11\n ;;\n \ > \ pike)\n rhos-release 12\n ;;\n queens)\n rhos-release 13\n\ > \ rm -rf /etc/yum.repos.d/rhos-release-ceph*\n ;;\n *)\n echo\ > \ \"unknown release $1\" >&2\n exit 1\nesac\nif [[ $MIRROR != default_repo_mirror\ > \ ]]; then\n sed -i \"s/download.*\\.lab.*\\.redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com/\"\ > \ /etc/yum.repos.d/*.repo\n sed -i \"s/download\\(-node.*\\)\\?\\.eng.*\\\ > .redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com/\" /etc/yum.repos.d/*.repo\n\ > \ sed -i \"s/rhos-release.*\\.redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com\\\ > /rhos-release/\" /etc/yum.repos.d/*.repo\n sed -r -i \"s/ayanami.*\\\ > .redhat.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com\\/ayanami/\" /etc/yum.repos.d/*.repo\n\ > \ sed -i \"s/pulp.*\\.redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com\\\ > /pulp/\" /etc/yum.repos.d/*.repo\nfi\nyum clean all\n", dest: /root/ffu_update_repo.sh, > mode: 448} > name: Create custom Script for upgrading repo. > - {name: Execute custom script for upgrading repo., shell: '/root/ffu_update_repo.sh > {{release}}'} > when: [ffu_packages_apply|bool, fast_forward_repo_type == 'custom-script'] > role_data_fast_forward_upgrade_tasks: > - command: systemctl is-enabled openstack-ceilometer-compute > ignore_errors: true > name: FFU check if openstack-ceilometer-compute is deployed > register: ceilometer_agent_compute_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact ceilometer_agent_compute_enabled > set_fact: {ceilometer_agent_compute_enabled: '{{ ceilometer_agent_compute_enabled_result.rc > == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: FFU stop and disable openstack-ceilometer-compute service > service: name=openstack-ceilometer-compute state=stopped enabled=no > when: [step|int == 1, release == 'ocata', ceilometer_agent_compute_enabled|bool] > - command: systemctl is-enabled --quiet neutron-openvswitch-agent > ignore_errors: true > name: Check if neutron_ovs_agent is deployed > register: neutron_ovs_agent_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact neutron_ovs_agent_enabled > set_fact: {neutron_ovs_agent_enabled: '{{ neutron_ovs_agent_enabled_result.rc > == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop neutron_openvswitch_agent > service: name=neutron-openvswitch-agent state=stopped enabled=no > when: [step|int == 1, release == 'ocata', neutron_ovs_agent_enabled|bool] > - command: systemctl is-enabled --quiet openstack-nova-compute > ignore_errors: true > name: Check if nova-compute is deployed > register: nova_compute_enabled_result > when: [step|int == 0, release == 'ocata'] > - name: Set fact nova_compute_enabled > set_fact: {nova_compute_enabled: '{{ nova_compute_enabled_result.rc == 0 }}'} > when: [step|int == 0, release == 'ocata'] > - name: Stop and disable nova-compute service > service: name=openstack-nova-compute state=stopped > when: [step|int == 1, nova_compute_enabled|bool, release == 'ocata'] > - name: Register repo type and args > set_fact: > fast_forward_repo_args: > tripleo_repos: {ocata: -b ocata current, pike: -b pike current, queens: -b > queens current} > fast_forward_repo_type: custom-script > when: step|int == 3 > - debug: {msg: 'fast_forward_repo_type: {{ fast_forward_repo_type }} fast_forward_repo_args: > {{ fast_forward_repo_args }}'} > when: step|int == 3 > - block: > - git: {dest: /home/stack/tripleo-repos/, repo: 'https://github.com/openstack/tripleo-repos.git'} > name: clone tripleo-repos > - args: {chdir: /home/stack/tripleo-repos/} > command: python setup.py install > name: install tripleo-repos > - {command: 'tripleo-repos {{ fast_forward_repo_args.tripleo_repos[release] > }}', name: Enable tripleo-repos} > when: [step|int == 3, ffu_packages_apply|bool, fast_forward_repo_type == 'tripleo-repos'] > - block: > - copy: {content: "set -e\nMIRROR=tlv\nyum localinstall -y http://download.lab.bos.redhat.com/rcm-guest/puddles/OpenStack/rhos-release/rhos-release-latest.noarch.rpm\n\ > rhos-release -x\ncase $1 in\n ocata)\n rhos-release 11\n ;;\n \ > \ pike)\n rhos-release 12\n ;;\n queens)\n rhos-release 13\n\ > \ rm -rf /etc/yum.repos.d/rhos-release-ceph*\n ;;\n *)\n echo\ > \ \"unknown release $1\" >&2\n exit 1\nesac\nif [[ $MIRROR != default_repo_mirror\ > \ ]]; then\n sed -i \"s/download.*\\.lab.*\\.redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com/\"\ > \ /etc/yum.repos.d/*.repo\n sed -i \"s/download\\(-node.*\\)\\?\\.eng.*\\\ > .redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com/\" /etc/yum.repos.d/*.repo\n\ > \ sed -i \"s/rhos-release.*\\.redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com\\\ > /rhos-release/\" /etc/yum.repos.d/*.repo\n sed -r -i \"s/ayanami.*\\\ > .redhat.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com\\/ayanami/\" /etc/yum.repos.d/*.repo\n\ > \ sed -i \"s/pulp.*\\.redhat\\.com/rhos-qe-mirror-${MIRROR}.usersys.redhat.com\\\ > /pulp/\" /etc/yum.repos.d/*.repo\nfi\nyum clean all\n", dest: /root/ffu_update_repo.sh, > mode: 448} > name: Create custom Script for upgrading repo. > - {name: Execute custom script for upgrading repo., shell: '/root/ffu_update_repo.sh > {{release}}'} > when: [step|int == 3, ffu_packages_apply|bool, fast_forward_repo_type == 'custom-script'] > role_data_global_config_settings: {} > role_data_host_prep_tasks: > - file: {path: /var/log/containers/ceilometer, state: directory} > name: create persistent logs directory > - copy: {content: 'Log files from ceilometer containers can be found under > > /var/log/containers/ceilometer. > > ', dest: /var/log/ceilometer/readme.txt} > ignore_errors: true > name: ceilometer logs readme > - file: {path: '{{ item }}', state: directory} > name: create persistent logs directory > with_items: [/var/log/containers/neutron] > - copy: {content: 'Log files from neutron containers can be found under > > /var/log/containers/neutron and /var/log/containers/httpd/neutron-api. > > ', dest: /var/log/neutron/readme.txt} > ignore_errors: true > name: neutron logs readme > - {name: stat /lib/systemd/system/iscsid.socket, register: stat_iscsid_socket, > stat: path=/lib/systemd/system/iscsid.socket} > - {name: Stop and disable iscsid.socket service, service: name=iscsid.socket state=stopped > enabled=no, when: stat_iscsid_socket.stat.exists} > - file: {path: /var/log/containers/nova, state: directory} > name: create persistent logs directory > - copy: {content: 'Log files from nova containers can be found under > > /var/log/containers/nova and /var/log/containers/httpd/nova-*. > > ', dest: /var/log/nova/readme.txt} > ignore_errors: true > name: nova logs readme > - file: {path: '{{ item }}', state: directory} > name: create persistent directories > with_items: [/var/lib/nova, /var/lib/libvirt] > - file: {path: /etc/ceph, state: directory} > name: ensure ceph configurations exist > - name: is Instance HA enabled > set_fact: {instance_ha_enabled: false} > - block: > - file: {path: /var/lib/nova/instanceha, state: directory} > name: prepare Instance HA script directory > - copy: {content: "#!/bin/python -utt\n\nimport os\nimport sys\nimport time\n\ > import inspect\nimport logging\nimport argparse\nimport oslo_config.cfg\n\ > import requests.exceptions\n\ndef is_forced_down(connection, hostname):\n\ > \ services = connection.services.list(host=hostname, binary=\"nova-compute\"\ > )\n for service in services:\n if service.forced_down:\n \ > \ return True\n return False\n\ndef evacuations_done(connection,\ > \ hostname):\n # Get a list of migrations.\n # :param host: (optional)\ > \ filter migrations by host name.\n # :param status: (optional) filter\ > \ migrations by status.\n # :param cell_name: (optional) filter migrations\ > \ for a cell.\n #\n migrations = connection.migrations.list(host=hostname)\n\ > \n print(\"Checking %d migrations\" % len(migrations))\n for migration\ > \ in migrations:\n # print migration.to_dict()\n #\n \ > \ # {\n # u'status': u'error',\n # u'dest_host': None,\n\ > \ # u'new_instance_type_id': 2,\n # u'old_instance_type_id':\ > \ 2,\n # u'updated_at': u'2018-04-22T20:55:29.000000',\n \ > \ # u'dest_compute':\n # u'overcloud-novacompute-2.localdomain',\n\ > \ # u'migration_type': u'live-migration',\n # u'source_node':\n\ > \ # u'overcloud-novacompute-0.localdomain',\n # u'id':\ > \ 8,\n # u'created_at': u'2018-04-22T20:52:58.000000',\n \ > \ # u'instance_uuid':\n # u'd1c82ce8-3dc5-48db-b59f-854b3b984ef1',\n\ > \ # u'dest_node':\n # u'overcloud-novacompute-2.localdomain',\n\ > \ # u'source_compute':\n # u'overcloud-novacompute-0.localdomain'\n\ > \ # }\n # Acceptable: done, completed, failed\n if\ > \ migration.status in [\"running\", \"accepted\", \"pre-migrating\"]:\n\ > \ return False\n return True\n\ndef safe_to_start(connection,\ > \ hostname):\n if is_forced_down(connection, hostname):\n print(\"\ > Waiting for fence-down flag to be cleared\")\n return False\n \ > \ if not evacuations_done(connection, hostname):\n print(\"Waiting\ > \ for evacuations to complete or fail\")\n return False\n return\ > \ True\n\ndef create_nova_connection(options):\n try:\n from\ > \ novaclient import client\n from novaclient.exceptions import\ > \ NotAcceptable\n except ImportError:\n print(\"Nova not found\ > \ or not accessible\")\n sys.exit(1)\n\n from keystoneauth1\ > \ import loading\n from keystoneauth1 import session\n from keystoneclient\ > \ import discover\n\n # Prefer the oldest and strip the leading 'v'\n\ > \ keystone_versions = discover.available_versions(options[\"auth_url\"\ > ][0])\n keystone_version = keystone_versions[0]['id'][1:]\n kwargs\ > \ = dict(\n auth_url=options[\"auth_url\"][0],\n username=options[\"\ > username\"][0],\n password=options[\"password\"][0]\n )\n\ > \n if discover.version_match(\"2\", keystone_version):\n kwargs[\"\ > tenant_name\"] = options[\"tenant_name\"][0]\n\n elif discover.version_match(\"\ > 3\", keystone_version):\n kwargs[\"project_name\"] = options[\"\ > project_name\"][0]\n kwargs[\"user_domain_name\"] = options[\"\ > user_domain_name\"][0]\n kwargs[\"project_domain_name\"] = options[\"\ > project_domain_name\"][0]\n\n loader = loading.get_plugin_loader('password')\n\ > \ keystone_auth = loader.load_from_options(**kwargs)\n keystone_session\ > \ = session.Session(auth=keystone_auth, verify=(not options[\"insecure\"\ > ]))\n\n nova_versions = [ \"2.23\", \"2\" ]\n for version in nova_versions:\n\ > \ clientargs = inspect.getargspec(client.Client).varargs\n \ > \ # Some versions of Openstack prior to Ocata only\n # supported\ > \ positional arguments for username,\n # password, and tenant.\n\ > \ #\n # Versions since Ocata only support named arguments.\n\ > \ #\n # So we need to use introspection to figure out how\ > \ to\n # create a Nova client.\n #\n # Happy days\n\ > \ #\n if clientargs:\n # OSP < Ocata\n \ > \ # ArgSpec(args=['version', 'username', 'password', 'project_id',\ > \ 'auth_url'],\n # varargs=None,\n # \ > \ keywords='kwargs', defaults=(None, None, None, None))\n \ > \ nova = client.Client(version,\n \ > \ None, # User\n None, # Password\n \ > \ None, # Tenant\n \ > \ None, # Auth URL\n insecure=options[\"\ > insecure\"],\n region_name=options[\"\ > os_region_name\"][0],\n session=keystone_session,\ > \ auth=keystone_auth,\n http_log_debug=options.has_key(\"\ > verbose\"))\n else:\n # OSP >= Ocata\n #\ > \ ArgSpec(args=['version'], varargs='args', keywords='kwargs', defaults=None)\n\ > \ nova = client.Client(version,\n \ > \ region_name=options[\"os_region_name\"][0],\n \ > \ session=keystone_session, auth=keystone_auth,\n \ > \ http_log_debug=options.has_key(\"verbose\"\ > ))\n\n try:\n nova.hypervisors.list()\n return\ > \ nova\n\n except NotAcceptable as e:\n logging.warning(e)\n\ > \n except Exception as e:\n logging.warning(\"Nova connection\ > \ failed. %s: %s\" % (e.__class__.__name__, e))\n\n print(\"Couldn't\ > \ obtain a supported connection to nova, tried: %s\\n\" % repr(nova_versions))\n\ > \ return None\n\n\nparser = argparse.ArgumentParser(description='Process\ > \ some integers.')\nparser.add_argument('--config-file', dest='nova_config',\ > \ action='store',\n default=\"/etc/nova/nova.conf\"\ > ,\n help='path to nova configuration (default: /etc/nova/nova.conf)')\n\ > parser.add_argument('--nova-binary', dest='nova_binary', action='store',\n\ > \ default=\"/usr/bin/nova-compute\",\n \ > \ help='path to nova compute binary (default: /usr/bin/nova-compute)')\n\ > parser.add_argument('--enable-file', dest='enable_file', action='store',\n\ > \ default=\"/var/lib/nova/instanceha/enabled\",\n \ > \ help='file exists if instance HA is enabled on this\ > \ host '\\\n '(default: /var/lib/nova/instanceha/enabled)')\n\ > \n\nsections = {}\n(args, remaining) = parser.parse_known_args(sys.argv)\n\ > \nconfig = oslo_config.cfg.ConfigParser(args.nova_config, sections)\n\ > config.parse()\nconfig.sections[\"placement\"][\"insecure\"] = 0\nconfig.sections[\"\ > placement\"][\"verbose\"] = 1\n\nif os.path.isfile(args.enable_file):\n\ > \ connection = None\n while not connection:\n # Loop in case\ > \ the control plane is recovering when we run\n connection = create_nova_connection(config.sections[\"\ > placement\"])\n if not connection:\n time.sleep(10)\n\ > \n while not safe_to_start(connection, config.sections[\"DEFAULT\"\ > ][\"host\"][0]):\n time.sleep(10)\n\nreal_args = [args.nova_binary,\ > \ '--config-file', args.nova_config]\nreal_args.extend(remaining[1:])\n\ > os.execv(args.nova_binary, real_args)\n", dest: /var/lib/nova/instanceha/check-run-nova-compute, > mode: 493} > name: install Instance HA script that runs nova-compute > - {command: hiera -c /etc/puppet/hiera.yaml compute_instanceha_short_node_names, > name: Get list of instance HA compute nodes, register: iha_nodes} > - {file: path=/var/lib/nova/instanceha/enabled state=touch, name: If instance > HA is enabled on the node activate the evacuation completed check, when: iha_nodes.stdout|lower > | search('"'+ansible_hostname|lower+'"')} > name: install Instance HA recovery script > when: instance_ha_enabled|bool > - file: {path: '{{ item }}', state: directory} > name: create libvirt persistent data directories > with_items: [/etc/libvirt, /etc/libvirt/secrets, /etc/libvirt/qemu, /var/lib/libvirt, > /var/log/containers/libvirt] > - group: {gid: 107, name: qemu, state: present} > name: ensure qemu group is present on the host > - name: ensure qemu user is present on the host > user: {comment: qemu user, group: qemu, name: qemu, shell: /sbin/nologin, state: present, > uid: 107} > - file: {group: qemu, owner: qemu, path: /var/lib/vhost_sockets, setype: virt_cache_t, > seuser: system_u, state: directory} > name: create directory for vhost-user sockets with qemu ownership > - {command: /usr/bin/rpm -q libvirt-daemon, failed_when: false, name: check if > libvirt is installed, register: libvirt_installed} > - name: make sure libvirt services are disabled > service: {enabled: false, name: '{{ item }}', state: stopped} > when: libvirt_installed.rc == 0 > with_items: [libvirtd.service, virtlogd.socket] > role_data_kolla_config: > /var/lib/kolla/config_files/ceilometer_agent_compute.json: > command: /usr/bin/ceilometer-polling --polling-namespaces compute --logfile > /var/log/ceilometer/compute.log > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/iscsid.json: > command: /usr/sbin/iscsid -f > config_files: > - {dest: /etc/iscsi/, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-iscsid/*} > /var/lib/kolla/config_files/logrotate-crond.json: > command: /usr/sbin/crond -s -n > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > /var/lib/kolla/config_files/neutron_ovs_agent.json: > command: /neutron_ovs_agent_launcher.sh > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > permissions: > - {owner: 'neutron:neutron', path: /var/log/neutron, recurse: true} > /var/lib/kolla/config_files/nova-migration-target.json: > command: /usr/sbin/sshd -D -p 2022 > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > - {dest: /etc/ssh/, owner: root, perm: '0600', source: /host-ssh/ssh_host_*_key} > /var/lib/kolla/config_files/nova_compute.json: > command: '/usr/bin/nova-compute ' > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > - {dest: /etc/iscsi/, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-iscsid/*} > - {dest: /etc/ceph/, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-ceph/} > permissions: > - {owner: 'nova:nova', path: /var/log/nova, recurse: true} > - {owner: 'nova:nova', path: /var/lib/nova, recurse: true} > - {owner: 'nova:nova', path: /etc/ceph/ceph.client.openstack.keyring, perm: '0600'} > /var/lib/kolla/config_files/nova_libvirt.json: > command: /usr/sbin/libvirtd > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > - {dest: /etc/ceph/, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src-ceph/} > permissions: > - {owner: 'nova:nova', path: /etc/ceph/ceph.client.openstack.keyring, perm: '0600'} > /var/lib/kolla/config_files/nova_virtlogd.json: > command: /usr/sbin/virtlogd --config /etc/libvirt/virtlogd.conf > config_files: > - {dest: /, merge: true, preserve_properties: true, source: /var/lib/kolla/config_files/src/*} > role_data_logging_groups: [root] > role_data_logging_sources: [] > role_data_merged_config_settings: > ceilometer::agent::auth::auth_endpoint_type: internalURL > ceilometer::agent::auth::auth_password: C2xNfx9NxPBNTqpYrGMfYrUnn > ceilometer::agent::auth::auth_project_domain_name: Default > ceilometer::agent::auth::auth_region: regionOne > ceilometer::agent::auth::auth_tenant_name: service > ceilometer::agent::auth::auth_url: http://10.0.0.12:5000 > ceilometer::agent::auth::auth_user_domain_name: Default > ceilometer::agent::compute::instance_discovery_method: libvirt_metadata > ceilometer::agent::notification::event_pipeline_publishers: ['gnocchi://', 'panko://'] > ceilometer::agent::notification::manage_event_pipeline: true > ceilometer::agent::notification::manage_pipeline: false > ceilometer::agent::notification::pipeline_publishers: ['gnocchi://'] > ceilometer::agent::polling::manage_polling: false > ceilometer::debug: false > ceilometer::dispatcher::gnocchi::archive_policy: low > ceilometer::dispatcher::gnocchi::filter_project: service > ceilometer::dispatcher::gnocchi::resources_definition_file: gnocchi_resources.yaml > ceilometer::dispatcher::gnocchi::url: http://10.0.0.12:8041 > ceilometer::host: '%{::fqdn}' > ceilometer::keystone::authtoken::auth_uri: http://10.0.0.12:5000 > ceilometer::keystone::authtoken::auth_url: http://10.0.0.12:5000 > ceilometer::keystone::authtoken::password: C2xNfx9NxPBNTqpYrGMfYrUnn > ceilometer::keystone::authtoken::project_domain_name: Default > ceilometer::keystone::authtoken::project_name: service > ceilometer::keystone::authtoken::user_domain_name: Default > ceilometer::notification_driver: messagingv2 > ceilometer::rabbit_heartbeat_timeout_threshold: 60 > ceilometer::rabbit_password: ZsVg3E86aZ7ttpW8A4zdHVNkh > ceilometer::rabbit_port: 5672 > ceilometer::rabbit_use_ssl: 'False' > ceilometer::rabbit_userid: guest > ceilometer::snmpd_readonly_user_password: 034690b9f8c681c1a3a9d9cbe1f71477b30945e2 > ceilometer::snmpd_readonly_username: ro_snmp_user > ceilometer::telemetry_secret: 9MmHuzBPfvYK7YXbNWqDdDpzz > ceilometer_redis_password: qf9AHug3MR3sEvRDGxGNqbQuV > cold_migration_ssh_inbound_addr: internal_api > compute_namespace: true > kernel_modules: > nf_conntrack: {} > nf_conntrack_proto_sctp: {} > live_migration_ssh_inbound_addr: internal_api > neutron::agents::ml2::ovs::arp_responder: false > neutron::agents::ml2::ovs::bridge_mappings: ['datacentre:br-ex'] > neutron::agents::ml2::ovs::enable_distributed_routing: false > neutron::agents::ml2::ovs::extensions: [qos] > neutron::agents::ml2::ovs::l2_population: 'False' > neutron::agents::ml2::ovs::local_ip: tenant > neutron::agents::ml2::ovs::tunnel_types: [vxlan] > neutron::allow_overlapping_ips: true > neutron::core_plugin: ml2 > neutron::db::database_db_max_retries: -1 > neutron::db::database_max_retries: -1 > neutron::db::sync::db_sync_timeout: 300 > neutron::db::sync::extra_params: '' > neutron::debug: false > neutron::dhcp_agent_notification: true > neutron::dns_domain: openstacklocal > neutron::global_physnet_mtu: 1500 > neutron::host: '%{::fqdn}' > neutron::notification_driver: messagingv2 > neutron::plugins::ml2::extension_drivers: [qos, port_security] > neutron::plugins::ml2::firewall_driver: iptables_hybrid > neutron::plugins::ml2::flat_networks: [datacentre] > neutron::plugins::ml2::mechanism_drivers: [openvswitch] > neutron::plugins::ml2::network_vlan_ranges: ['datacentre:1:1000'] > neutron::plugins::ml2::overlay_ip_version: 4 > neutron::plugins::ml2::tenant_network_types: [vxlan] > neutron::plugins::ml2::tunnel_id_ranges: ['1:4094'] > neutron::plugins::ml2::type_drivers: [vxlan, vlan, flat, gre] > neutron::plugins::ml2::vni_ranges: ['1:4094'] > neutron::purge_config: false > neutron::rabbit_heartbeat_timeout_threshold: 60 > neutron::rabbit_password: ZsVg3E86aZ7ttpW8A4zdHVNkh > neutron::rabbit_port: 5672 > neutron::rabbit_use_ssl: 'False' > neutron::rabbit_user: guest > neutron::service_plugins: [router, qos, trunk] > nova::api_database_connection: mysql+pymysql://nova_api:8MMttgFpZ4fKUe99GWMpEwNqC@10.0.0.12/nova_api?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > nova::cell0_database_connection: mysql+pymysql://nova:8MMttgFpZ4fKUe99GWMpEwNqC@10.0.0.12/nova_cell0?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > nova::cinder_catalog_info: volumev3:cinderv3:internalURL > nova::compute::instance_usage_audit: true > nova::compute::instance_usage_audit_period: hour > nova::compute::libvirt::libvirt_enabled_perf_events: [] > nova::compute::libvirt::libvirt_virt_type: kvm > nova::compute::libvirt::manage_libvirt_services: false > nova::compute::libvirt::migration_support: false > nova::compute::libvirt::qemu::configure_qemu: true > nova::compute::libvirt::qemu::max_files: 32768 > nova::compute::libvirt::qemu::max_processes: 131072 > nova::compute::libvirt::services::libvirt_virt_type: kvm > nova::compute::libvirt::vncserver_listen: internal_api > nova::compute::neutron::libvirt_vif_driver: '' > nova::compute::pci::passthrough: '' > nova::compute::rbd::ephemeral_storage: false > nova::compute::rbd::libvirt_images_rbd_ceph_conf: /etc/ceph/ceph.conf > nova::compute::rbd::libvirt_images_rbd_pool: vms > nova::compute::rbd::libvirt_rbd_secret_key: AQBj1CdbAAAAABAAEC/nAQIjGJ/DWDu7yJkDJQ== > nova::compute::rbd::libvirt_rbd_secret_uuid: 18c6dc78-730f-11e8-a315-525400d7caac > nova::compute::rbd::libvirt_rbd_user: openstack > nova::compute::rbd::rbd_keyring: client.openstack > nova::compute::reserved_host_memory: 4096 > nova::compute::vcpu_pin_set: [] > nova::compute::verify_glance_signatures: false > nova::compute::vncproxy_host: 172.16.18.34 > nova::compute::vncserver_proxyclient_address: internal_api > nova::cron::archive_deleted_rows::destination: /var/log/nova/nova-rowsflush.log > nova::cron::archive_deleted_rows::hour: '0' > nova::cron::archive_deleted_rows::max_rows: '100' > nova::cron::archive_deleted_rows::minute: '1' > nova::cron::archive_deleted_rows::month: '*' > nova::cron::archive_deleted_rows::monthday: '*' > nova::cron::archive_deleted_rows::until_complete: false > nova::cron::archive_deleted_rows::user: nova > nova::cron::archive_deleted_rows::weekday: '*' > nova::database_connection: mysql+pymysql://nova:8MMttgFpZ4fKUe99GWMpEwNqC@10.0.0.12/nova?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > nova::db::database_db_max_retries: -1 > nova::db::database_max_retries: -1 > nova::db::sync::db_sync_timeout: 300 > nova::db::sync_api::db_sync_timeout: 300 > nova::debug: false > nova::glance_api_servers: http://10.0.0.12:9292 > nova::host: '%{::fqdn}' > nova::migration::live_migration_tunnelled: false > nova::my_ip: internal_api > nova::network::neutron::dhcp_domain: '' > nova::network::neutron::neutron_auth_type: v3password > nova::network::neutron::neutron_auth_url: http://192.168.0.19:35357/v3 > nova::network::neutron::neutron_ovs_bridge: br-int > nova::network::neutron::neutron_password: 8TNuQY4exAwRpGBdxAEh4R6zR > nova::network::neutron::neutron_project_name: service > nova::network::neutron::neutron_region_name: regionOne > nova::network::neutron::neutron_url: http://10.0.0.12:9696 > nova::network::neutron::neutron_username: neutron > nova::notification_driver: messagingv2 > nova::notification_format: unversioned > nova::notify_on_state_change: vm_and_task_state > nova::placement::auth_url: http://10.0.0.12:5000 > nova::placement::os_interface: internal > nova::placement::os_region_name: regionOne > nova::placement::password: 8MMttgFpZ4fKUe99GWMpEwNqC > nova::placement::project_name: service > nova::placement_database_connection: mysql+pymysql://nova_placement:8MMttgFpZ4fKUe99GWMpEwNqC@10.0.0.12/nova_placement?read_default_group=tripleo&read_default_file=/etc/my.cnf.d/tripleo.cnf > nova::purge_config: false > nova::rabbit_heartbeat_timeout_threshold: 60 > nova::rabbit_password: ZsVg3E86aZ7ttpW8A4zdHVNkh > nova::rabbit_port: 5672 > nova::rabbit_use_ssl: 'False' > nova::rabbit_userid: guest > nova::use_ipv6: false > nova::vncproxy::common::vncproxy_host: 172.16.18.34 > nova::vncproxy::common::vncproxy_port: '6080' > nova::vncproxy::common::vncproxy_protocol: http > ntp::iburst_enable: true > 'ntp::maxpoll:': 10 > 'ntp::minpoll:': 6 > ntp::servers: [clock.redhat.com, clock.redhat.com] > rbd_persistent_storage: false > snmp::agentaddress: ['udp:161', 'udp6:[::1]:161'] > snmp::snmpd_options: -LS0-5d > snmpd_network: internal_api_subnet > sysctl_settings: > fs.inotify.max_user_instances: {value: 1024} > fs.suid_dumpable: {value: 0} > kernel.dmesg_restrict: {value: 1} > kernel.pid_max: {value: 1048576} > net.core.netdev_max_backlog: {value: 10000} > net.ipv4.conf.all.arp_accept: {value: 1} > net.ipv4.conf.all.log_martians: {value: 1} > net.ipv4.conf.all.secure_redirects: {value: 0} > net.ipv4.conf.all.send_redirects: {value: 0} > net.ipv4.conf.default.accept_redirects: {value: 0} > net.ipv4.conf.default.log_martians: {value: 1} > net.ipv4.conf.default.secure_redirects: {value: 0} > net.ipv4.conf.default.send_redirects: {value: 0} > net.ipv4.ip_forward: {value: 1} > net.ipv4.neigh.default.gc_thresh1: {value: 1024} > net.ipv4.neigh.default.gc_thresh2: {value: 2048} > net.ipv4.neigh.default.gc_thresh3: {value: 4096} > net.ipv4.tcp_keepalive_intvl: {value: 1} > net.ipv4.tcp_keepalive_probes: {value: 5} > net.ipv4.tcp_keepalive_time: {value: 5} > net.ipv6.conf.all.accept_ra: {value: 0} > net.ipv6.conf.all.accept_redirects: {value: 0} > net.ipv6.conf.all.autoconf: {value: 0} > net.ipv6.conf.all.disable_ipv6: {value: 0} > net.ipv6.conf.default.accept_ra: {value: 0} > net.ipv6.conf.default.accept_redirects: {value: 0} > net.ipv6.conf.default.autoconf: {value: 0} > net.ipv6.conf.default.disable_ipv6: {value: 0} > net.netfilter.nf_conntrack_max: {value: 500000} > net.nf_conntrack_max: {value: 500000} > timezone::timezone: UTC > tripleo.neutron_ovs_agent.firewall_rules: > 118 neutron vxlan networks: {dport: 4789, proto: udp} > 136 neutron gre networks: {proto: gre} > tripleo.nova_libvirt.firewall_rules: > 200 nova_libvirt: > dport: [16514, 49152-49215, 5900-6923] > tripleo.nova_migration_target.firewall_rules: > 113 nova_migration_target: > dport: [2022] > tripleo.ntp.firewall_rules: > 105 ntp: {dport: 123, proto: udp} > tripleo.snmp.firewall_rules: > 124 snmp: {dport: 161, proto: udp, source: '%{hiera(''snmpd_network'')}'} > tripleo::firewall::manage_firewall: true > tripleo::firewall::purge_firewall_rules: false > tripleo::packages::enable_install: false > tripleo::profile::base::certmonger_user::libvirt_postsave_cmd: 'true' > tripleo::profile::base::database::mysql::client::enable_ssl: false > tripleo::profile::base::database::mysql::client::mysql_client_bind_address: internal_api > tripleo::profile::base::database::mysql::client::ssl_ca: /etc/ipa/ca.crt > tripleo::profile::base::docker::additional_sockets: [/var/lib/openstack/docker.sock] > tripleo::profile::base::docker::configure_network: true > tripleo::profile::base::docker::debug: false > tripleo::profile::base::docker::docker_options: --log-driver=journald --signature-verification=false > --iptables=false --live-restore > tripleo::profile::base::docker::insecure_registries: ['brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888', > 'rhos-qe-mirror-tlv.usersys.redhat.com:5000'] > tripleo::profile::base::docker::network_options: --bip=172.31.0.1/24 > tripleo::profile::base::nova::compute::cinder_nfs_backend: false > tripleo::profile::base::nova::migration::client::libvirt_enabled: true > tripleo::profile::base::nova::migration::client::nova_compute_enabled: true > tripleo::profile::base::nova::migration::client::ssh_port: 2022 > tripleo::profile::base::nova::migration::client::ssh_private_key: '-----BEGIN > RSA PRIVATE KEY----- > > MIIEogIBAAKCAQEA7cI/Ct89ABnyQpI6nrvMclCRBT+LWX/5PpcBf07qveagJynp > > 3kxUxod7cGd9dp2m0/wUMUMm0BAWllHwUzIvyijify3EKLdI0JyDZ3y9Oioed7wC > > FNUQJ7tQ4APF48q6AG9HJrqGyBLT7mfxSacs1egrUNXjJwpo/8jKNti58Q9NDDpH > > zmdmDVIHr4siCkkt18LAlps9+2fiGj7qmingNyqirVSt8JwWPxQ+wzhxMlkZ1ybI > > lnp22OD6INxwFCZRXv1iwtjfWHIyt/MTz9+36G8wurPCR1xMXnf1Swy/Gkh7o9Vx > > GJnAnjRnOlYiQNKzEg5/MJ97vVecs3fKL4VfowIDAQABAoIBAF4EEJ/A9MMkf2qZ > > njqEcmzFtQLJRNYoR6vbVIk98I8VMkEqTsNLlb5HdqVVFwSfhIeQx39PWXqu0e+x > > CQIs9z7OefnmZAnhqBZtvXxflAf6/b9FBeGs+s1rQajDdS4FTRW86vrCiWd2In3E > > dmI0jAMt4mr9mOwoLqrugB++BZDvQ8Lgod9lYdHZa9cCV2k9bTtOTWLCZ7DN17qY > > Ts0yQEZP31/629D4inGUOPenJ0vfXOVNZwdbKRVDMb/7gnpNi3O+9EBCmrQgbf9G > > ziwv6Ni3OU9Ubt4M/TppS8KP6+Wyh4V4RqNF3/IgfuvvT5RCCr78/D/3nQ5FGxQ4 > > kBAnR9ECgYEA/0gLNBOvV6M9mBEqzRSD1j4BYXLDpXTU9JysFcg6A/abJ4NjhjCt > > c0XeTPE78qE922vHSvtebX0/t9YCR8kurSniQXStQjDEtDWdURHfFeXeenbBQLwN > > y9SZMyA5v1fjqPH7wyB5mF23/PlXLVu+Zbtob/BV1ZAo/k8L5Qbu0BkCgYEA7m2T > > XZSTZS8zr0LhXCpilSpuCkOGQJDfNC0nGll4ACAKBr9uJjhi7nahfGLxBCIQ7toR > > ZfMLrGrUghr9EDN+9Xb2Kju9FZNwvZDv8vBw5q3HIdEGIe+N9+fgz73eOe/y87Vh > > AN0fbI+08GYi9+bzOBl5XfuarAQL8SGErWhKdRsCgYAec++yNK70G2sbmuCqOkpH > > 4nmzsEcak8dbrtzwdERHeMGlgwz6UxA98hXEw/pMxWNlq4PItBfdo9VYDtWSzrMc > > wSRWfp60iLDOdp7fruwX5qYVodkH6ecTa7ci3DNHWiq0WrmX22XmATgTm4MEzC74 > > X9TjlZ4HrfzV6YQXvC388QKBgHpfqURdrq0/ZdKtjAeHBaQz69i+wZhAAx+/kRNY > > GlxOFXdTJ5DcXgKwxr+V9Mg/R9uf91ARR8uN5VaFzZ9UHvSTw1L36Fu7miDh1BYn > > zHrJdKxpoTEnyAEWdW+KrxWjj1oAnTIZTDinJaJBee8tDWacxIs1STTCuvJfVr6+ > > CYFDAoGAGWWxXSchMoxnwKe8kuvUt5lsGtM25X+tscfkZdaxunfFlh6KKImP/+1O > > KxHnQVd0a43Q7JUDoNtsaCufeu058BukVQyN84948qnzxpfDNzblRdEuoApy++Mc > > mVZsy4n/ACbkQI7kHkPjS6lWqQ5TknhiCy27PlAl45mAu3kYXKk= > > -----END RSA PRIVATE KEY----- > > ' > tripleo::profile::base::nova::migration::target::ssh_authorized_keys: [ssh-rsa > AAAAB3NzaC1yc2EAAAADAQABAAABAQDtwj8K3z0AGfJCkjqeu8xyUJEFP4tZf/k+lwF/Tuq95qAnKeneTFTGh3twZ312nabT/BQxQybQEBaWUfBTMi/KKOJ/LcQot0jQnINnfL06Kh53vAIU1RAnu1DgA8XjyroAb0cmuobIEtPuZ/FJpyzV6CtQ1eMnCmj/yMo22LnxD00MOkfOZ2YNUgeviyIKSS3XwsCWmz37Z+IaPuqaKeA3KqKtVK3wnBY/FD7DOHEyWRnXJsiWenbY4Pog3HAUJlFe/WLC2N9YcjK38xPP37fobzC6s8JHXExed/VLDL8aSHuj1XEYmcCeNGc6ViJA0rMSDn8wn3u9V5yzd8ovhV+j > Generated by TripleO] > tripleo::profile::base::nova::migration::target::ssh_localaddrs: ['%{hiera(''cold_migration_ssh_inbound_addr'')}', > '%{hiera(''live_migration_ssh_inbound_addr'')}'] > tripleo::profile::base::snmp::snmpd_password: 034690b9f8c681c1a3a9d9cbe1f71477b30945e2 > tripleo::profile::base::snmp::snmpd_user: ro_snmp_user > tripleo::profile::base::sshd::bannertext: '' > tripleo::profile::base::sshd::motd: '' > tripleo::profile::base::sshd::options: > AcceptEnv: [LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES, > LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT, LC_IDENTIFICATION > LC_ALL LANGUAGE, XMODIFIERS] > AuthorizedKeysFile: .ssh/authorized_keys > ChallengeResponseAuthentication: 'no' > GSSAPIAuthentication: 'yes' > GSSAPICleanupCredentials: 'no' > HostKey: [/etc/ssh/ssh_host_rsa_key, /etc/ssh/ssh_host_ecdsa_key, /etc/ssh/ssh_host_ed25519_key] > PasswordAuthentication: 'no' > Subsystem: sftp /usr/libexec/openssh/sftp-server > SyslogFacility: AUTHPRIV > UseDNS: 'no' > UsePAM: 'yes' > UsePrivilegeSeparation: sandbox > X11Forwarding: 'yes' > tripleo::profile::base::sshd::port: 22 > tripleo::profile::base::tuned::profile: '' > tripleo::trusted_cas::ca_map: {} > vswitch::ovs::enable_hw_offload: false > role_data_monitoring_subscriptions: [] > role_data_post_update_tasks: [] > role_data_post_upgrade_tasks: [] > role_data_pre_upgrade_rolling_tasks: [] > role_data_puppet_config: > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-ceilometer-central:2018-06-15.2', > config_volume: ceilometer, puppet_tags: ceilometer_config, step_config: 'include > ::tripleo::profile::base::ceilometer::agent::polling > > '} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-server:2018-06-15.2', > config_volume: neutron, puppet_tags: neutron_plugin_ml2, step_config: 'include > ::tripleo::profile::base::neutron::plugins::ml2 > > '} > - config_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-neutron-server:2018-06-15.2 > config_volume: neutron > puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2 > step_config: 'include ::tripleo::profile::base::neutron::ovs > > ' > volumes: ['/lib/modules:/lib/modules:ro', '/run/openvswitch:/run/openvswitch'] > - config_image: rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-iscsid:2018-06-15.2 > config_volume: iscsid > puppet_tags: iscsid_config > step_config: include ::tripleo::profile::base::iscsid > volumes: ['/etc/iscsi:/etc/iscsi'] > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-compute:2018-06-15.2', > config_volume: nova_libvirt, puppet_tags: 'nova_config,nova_paste_api_ini', > step_config: '# TODO(emilien): figure how to deal with libvirt profile. > > # We''ll probably treat it like we do with Neutron plugins. > > # Until then, just include it in the default nova-compute role. > > include tripleo::profile::base::nova::compute::libvirt > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-compute:2018-06-15.2', > config_volume: nova_libvirt, puppet_tags: 'libvirtd_config,nova_config,file,libvirt_tls_password', > step_config: 'include tripleo::profile::base::nova::libvirt > > > include ::tripleo::profile::base::database::mysql::client'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-nova-compute:2018-06-15.2', > config_volume: nova_libvirt, step_config: 'include ::tripleo::profile::base::sshd > > include tripleo::profile::base::nova::migration::target'} > - {config_image: 'rhos-qe-mirror-tlv.usersys.redhat.com:5000/rhosp13/openstack-cron:2018-06-15.2', > config_volume: crond, step_config: 'include ::tripleo::profile::base::logging::logrotate'} > role_data_service_config_settings: {} > role_data_service_metadata_settings: null > role_data_service_names: [ca_certs, ceilometer_agent_compute, neutron_plugin_ml2, > neutron_ovs_agent, docker, iscsid, kernel, mysql_client, nova_compute, nova_libvirt, > nova_migration_target, ntp, logrotate_crond, snmp, sshd, timezone, tripleo_firewall, > tripleo_packages, tuned] > role_data_step_config: "# Copyright 2014 Red Hat, Inc.\n# All Rights Reserved.\n\ > #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n\ > # not use this file except in compliance with the License. You may obtain\n\ > # a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n\ > #\n# Unless required by applicable law or agreed to in writing, software\n#\ > \ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\ > # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\ > # License for the specific language governing permissions and limitations\n\ > # under the License.\n\n# Common config, from tripleo-heat-templates/puppet/manifests/overcloud_common.pp\n\ > # The content of this file will be used to generate\n# the puppet manifests\ > \ for all roles, the placeholder\n# Compute will be replaced by 'controller',\ > \ 'blockstorage',\n# 'cephstorage' and all the deployed roles.\n\nif hiera('step')\ > \ >= 4 {\n hiera_include('Compute_classes', [])\n}\n\n$package_manifest_name\ > \ = join(['/var/lib/tripleo/installed-packages/overcloud_Compute', hiera('step')])\n\ > package_manifest{$package_manifest_name: ensure => present}\n\n# End of overcloud_common.pp\n\ > \ninclude ::tripleo::trusted_cas\ninclude ::tripleo::profile::base::docker\n\ > \ninclude ::tripleo::profile::base::kernel\ninclude ::tripleo::profile::base::database::mysql::client\n\ > include ::tripleo::profile::base::time::ntp\ninclude ::tripleo::profile::base::snmp\n\ > \ninclude ::tripleo::profile::base::sshd\n\ninclude ::timezone\ninclude ::tripleo::firewall\n\ > \ninclude ::tripleo::packages\n\ninclude ::tripleo::profile::base::tuned" > role_data_update_tasks: > - {lineinfile: dest=/etc/sysconfig/iptables regexp=".*neutron-" state=absent, > name: Remove IPv4 iptables rules created by Neutron that are persistent, when: step|int > == 5} > - {lineinfile: dest=/etc/sysconfig/ip6tables regexp=".*neutron-" state=absent, > name: Remove IPv6 iptables rules created by Neutron that are persistent, when: step|int > == 5} > - block: > - {failed_when: false, name: Detect if puppet on the docker profile would restart > the service, register: puppet_docker_noop_output, shell: "puppet apply --noop\ > \ --summarize --detailed-exitcodes --verbose \\\n --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules\ > \ \\\n --color=false -e \"class { 'tripleo::profile::base::docker': step\ > \ => 1, }\" 2>&1 | \\\nawk -F \":\" '/Out of sync:/ { print $2}'\n"} > - {changed_when: docker_check_update.rc == 100, failed_when: 'docker_check_update.rc > not in [0, 100]', name: Is docker going to be updated, register: docker_check_update, > shell: yum check-update docker} > - {name: Set docker_rpm_needs_update fact, set_fact: 'docker_rpm_needs_update={{ > docker_check_update.rc == 100 }}'} > - {name: Set puppet_docker_is_outofsync fact, set_fact: 'puppet_docker_is_outofsync={{ > puppet_docker_noop_output.stdout|trim|int >= 1 }}'} > - {name: Stop all containers, shell: docker ps -q | xargs --no-run-if-empty > -n1 docker stop, when: puppet_docker_is_outofsync or docker_rpm_needs_update} > - name: Stop docker > service: {name: docker, state: stopped} > when: puppet_docker_is_outofsync or docker_rpm_needs_update > - {name: Update the docker package, when: docker_rpm_needs_update, yum: name=docker > state=latest update_cache=yes} > - {changed_when: puppet_docker_apply.rc == 2, failed_when: 'puppet_docker_apply.rc > not in [0, 2]', name: Apply puppet which will start the service again, register: puppet_docker_apply, > shell: "puppet apply --detailed-exitcodes --verbose \\\n --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules\ > \ \\\n -e \"class { 'tripleo::profile::base::docker': step => 1, }\"\n"} > when: step|int == 2 > - {name: Check for existing yum.pid, register: yum_pid_file, stat: path=/var/run/yum.pid, > when: step|int == 0 or step|int == 3} > - {fail: msg="ERROR existing yum.pid detected - can't continue! Please ensure > there is no other package update process for the duration of the minor update > worfklow. Exiting.", name: Exit if existing yum process, when: (step|int == > 0 or step|int == 3) and yum_pid_file.stat.exists} > - {name: Update all packages, when: step == "3", yum: name=* state=latest update_cache=yes} > role_data_upgrade_batch_tasks: [] > role_data_upgrade_tasks: > - {command: systemctl is-enabled --quiet openstack-ceilometer-compute, ignore_errors: true, > name: Check if openstack-ceilometer-compute is deployed, register: openstack_ceilometer_compute_enabled, > tags: common} > - {command: systemctl is-enabled --quiet openstack-ceilometer-polling, ignore_errors: true, > name: Check if openstack-ceilometer-polling is deployed, register: openstack_ceilometer_polling_enabled, > tags: common} > - command: systemctl is-active --quiet openstack-ceilometer-compute > name: 'PreUpgrade step0,validation: Check service openstack-ceilometer-compute > is running' > tags: validation > when: [step|int == 0, openstack_ceilometer_compute_enabled.rc == 0] > - command: systemctl is-active --quiet openstack-ceilometer-polling > name: 'PreUpgrade step0,validation: Check service openstack-ceilometer-polling > is running' > tags: validation > when: [step|int == 0, openstack_ceilometer_polling_enabled.rc == 0] > - name: Stop and disable ceilometer compute agent > service: name=openstack-ceilometer-compute state=stopped enabled=no > when: [step|int == 2, openstack_ceilometer_compute_enabled.rc|default('') == > 0] > - name: Stop and disable ceilometer polling agent > service: name=openstack-ceilometer-polling state=stopped enabled=no > when: [step|int == 2, openstack_ceilometer_polling_enabled.rc|default('') == > 0] > - name: Set fact for removal of openstack-ceilometer-compute and polling package > set_fact: {remove_ceilometer_compute_polling_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-ceilometer-compute package if operator requests it > when: [step|int == 2, remove_ceilometer_compute_polling_package|bool] > yum: name=openstack-ceilometer-compute state=removed > - ignore_errors: true > name: Remove openstack-ceilometer-polling package if operator requests it > when: [step|int == 2, remove_ceilometer_compute_polling_package|bool] > yum: name=openstack-ceilometer-polling state=removed > - {ignore_errors: true, name: Check openvswitch version., register: ovs_version, > shell: 'rpm -qa | awk -F- ''/^openvswitch-2/{print $2 "-" $3}''', when: step|int > == 2} > - {ignore_errors: true, name: Check openvswitch packaging., register: ovs_packaging_issue, > shell: 'rpm -q --scripts openvswitch | awk ''/postuninstall/,/*/'' | grep -q > "systemctl.*try-restart"', when: step|int == 2} > - block: > - file: {path: /root/OVS_UPGRADE, state: absent} > name: 'Ensure empty directory: emptying.' > - file: {group: root, mode: 488, owner: root, path: /root/OVS_UPGRADE, state: directory} > name: 'Ensure empty directory: creating.' > - {command: yum makecache, name: Make yum cache.} > - {command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch, > name: Download OVS packages.} > - {name: Get rpm list for manual upgrade of OVS., register: ovs_list_of_rpms, > shell: ls -1 /root/OVS_UPGRADE/*.rpm} > - args: {chdir: /root/OVS_UPGRADE} > name: Manual upgrade of OVS > shell: 'rpm -U --test {{item}} 2>&1 | grep "already installed" || \ > > rpm -U --replacepkgs --notriggerun --nopostun {{item}}; > > ' > with_items: ['{{ovs_list_of_rpms.stdout_lines}}'] > when: [step|int == 2, '''2.5.0-14'' in ovs_version.stdout|default('''') or ovs_packaging_issue|default(false)|succeeded'] > - {command: systemctl is-enabled --quiet neutron-openvswitch-agent, ignore_errors: true, > name: Check if neutron_ovs_agent is deployed, register: neutron_ovs_agent_enabled, > tags: common} > - command: systemctl is-active --quiet neutron-openvswitch-agent > name: 'PreUpgrade step0,validation: Check service neutron-openvswitch-agent > is running' > tags: validation > when: [step|int == 0, neutron_ovs_agent_enabled.rc == 0] > - name: Stop and disable neutron_ovs_agent service > service: name=neutron-openvswitch-agent state=stopped enabled=no > when: [step|int == 2, neutron_ovs_agent_enabled.rc == 0] > - name: Set fact for removal of openstack-neutron-openvswitch package > set_fact: {remove_neutron_openvswitch_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-neutron-openvswitch package if operator requests it > when: [step|int == 2, remove_neutron_openvswitch_package|bool] > yum: name=openstack-neutron-openvswitch state=removed > - {name: Install docker packages on upgrade if missing, when: step|int == 3, yum: name=docker > state=latest} > - {command: systemctl is-enabled --quiet iscsid, ignore_errors: true, name: Check > if iscsid service is deployed, register: iscsid_enabled, tags: common} > - command: systemctl is-active --quiet iscsid > name: 'PreUpgrade step0,validation: Check if iscsid is running' > tags: validation > when: [step|int == 0, iscsid_enabled.rc == 0] > - name: Stop and disable iscsid service > service: name=iscsid state=stopped enabled=no > when: [step|int == 2, iscsid_enabled.rc == 0] > - {command: systemctl is-enabled --quiet iscsid.socket, ignore_errors: true, name: Check > if iscsid.socket service is deployed, register: iscsid_socket_enabled, tags: common} > - command: systemctl is-active --quiet iscsid.socket > name: 'PreUpgrade step0,validation: Check if iscsid.socket is running' > tags: validation > when: [step|int == 0, iscsid_socket_enabled.rc == 0] > - name: Stop and disable iscsid.socket service > service: name=iscsid.socket state=stopped enabled=no > when: [step|int == 2, iscsid_socket_enabled.rc == 0] > - {command: systemctl is-enabled --quiet openstack-nova-compute, ignore_errors: true, > name: Check if nova_compute is deployed, register: nova_compute_enabled, tags: common} > - {ini_file: dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=, > name: Set compute upgrade level to auto, when: step|int == 1} > - command: systemctl is-active --quiet openstack-nova-compute > name: 'PreUpgrade step0,validation: Check service openstack-nova-compute is > running' > tags: validation > when: [step|int == 0, nova_compute_enabled.rc == 0] > - name: Stop and disable nova-compute service > service: name=openstack-nova-compute state=stopped enabled=no > when: [step|int == 2, nova_compute_enabled.rc == 0] > - name: Set fact for removal of openstack-nova-compute package > set_fact: {remove_nova_compute_package: false} > when: step|int == 2 > - ignore_errors: true > name: Remove openstack-nova-compute package if operator requests it > when: [step|int == 2, remove_nova_compute_package|bool] > yum: name=openstack-nova-compute state=removed > - {command: systemctl is-enabled --quiet libvirtd, ignore_errors: true, name: Check > if nova_libvirt is deployed, register: nova_libvirt_enabled, tags: common} > - command: systemctl is-active --quiet libvirtd > name: 'PreUpgrade step0,validation: Check service libvirtd is running' > tags: validation > when: [step|int == 0, nova_libvirt_enabled.rc == 0] > - name: Stop and disable libvirtd service > service: name=libvirtd state=stopped enabled=no > when: [step|int == 2, nova_libvirt_enabled.rc == 0] > - {name: Stop snmp service, service: name=snmpd state=stopped, when: step|int > == 1} > - args: {creates: /etc/sysconfig/ip6tables.n-o-upgrade} > name: blank ipv6 rule before activating ipv6 firewall. > shell: cat /etc/sysconfig/ip6tables > /etc/sysconfig/ip6tables.n-o-upgrade; > cat</dev/null>/etc/sysconfig/ip6tables > when: step|int == 3 > - {name: Check yum for rpm-python present, register: rpm_python_check, when: step|int > == 0, yum: name=rpm-python state=present} > - fail: msg="rpm-python package was not present before this run! Check environment > before re-running" > name: Fail when rpm-python wasn't present > when: [step|int == 0, rpm_python_check.changed != false] > - {name: Check for os-net-config upgrade, register: os_net_config_need_upgrade, > shell: 'yum check-upgrade | awk ''/os-net-config/{print}''', when: step|int > == 3} > - {ignore_errors: true, name: Check that os-net-config has configuration, register: os_net_config_has_config, > shell: test -s /etc/os-net-config/config.json, when: step|int == 3} > - block: > - {name: Upgrade os-net-config, yum: name=os-net-config state=latest} > - {changed_when: os_net_config_upgrade.rc == 2, command: os-net-config --no-activate > -c /etc/os-net-config/config.json -v --detailed-exit-codes, failed_when: 'os_net_config_upgrade.rc > not in [0,2]', name: take new os-net-config parameters into account now, > register: os_net_config_upgrade} > when: [step|int == 3, os_net_config_need_upgrade.stdout, os_net_config_has_config.rc > == 0] > - {name: Update all packages, when: step|int == 3, yum: name=* state=latest} > role_data_workflow_tasks: {} > role_name: Compute >overcloud: > children: > Compute: {} > Controller: {} > vars: {ctlplane_vip: 192.168.0.19, external_vip: 172.16.18.34, internal_api_vip: 10.0.0.12, > redis_vip: 10.0.0.19, storage_mgmt_vip: 10.0.1.13, storage_vip: 10.0.0.147} >aodh_evaluator: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >kernel: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >neutron_metadata: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >pacemaker: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >nova_placement: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >snmp: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >heat_api: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >cinder_api: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >neutron_l3: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >swift_proxy: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >aodh_listener: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >swift_ringbuilder: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >neutron_dhcp: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >gnocchi_api: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >timezone: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >ceilometer_agent_central: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >heat_api_cloudwatch_disabled: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >aodh_notifier: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >tripleo_firewall: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >swift_storage: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >redis: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >gnocchi_statsd: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >iscsid: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >nova_conductor: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >mysql_client: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >nova_consoleauth: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >glance_api: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >keystone: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >cinder_volume: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >ceilometer_collector_disabled: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >ceilometer_agent_notification: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >memcached: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >mongodb_disabled: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >neutron_plugin_ml2: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >nova_api: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >aodh_api: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >nova_metadata: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >heat_engine: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >ntp: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >ceilometer_expirer_disabled: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >ceilometer_api_disabled: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >nova_migration_target: > children: > Compute: {} > vars: {ansible_ssh_user: heat-admin} >cinder_scheduler: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >gnocchi_metricd: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >tripleo_packages: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >nova_scheduler: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >nova_compute: > children: > Compute: {} > vars: {ansible_ssh_user: heat-admin} >logrotate_crond: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >neutron_ovs_agent: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >haproxy: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >sshd: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >mysql: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >ceilometer_agent_compute: > children: > Compute: {} > vars: {ansible_ssh_user: heat-admin} >nova_libvirt: > children: > Compute: {} > vars: {ansible_ssh_user: heat-admin} >rabbitmq: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >tuned: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >panko_api: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >horizon: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >neutron_api: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >ca_certs: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >heat_api_cfn: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >docker: > children: > Compute: {} > Controller: {} > vars: {ansible_ssh_user: heat-admin} >nova_vnc_proxy: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >clustercheck: > children: > Controller: {} > vars: {ansible_ssh_user: heat-admin} >_meta: > hostvars: {}
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Raw
Actions:
View
Attachments on
bug 1592556
:
1452724
| 1452942