From 5ef998a74f055b596a35ab4400149c536d89bb84 Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Sat, 23 Jan 2016 15:46:58 -0500 Subject: [PATCH 001/137] setting the number of cpu's for the undercloud was missing? * add setting the number of vcpus for the undercloud virt instance back into the code base Change-Id: Ie3ca683322fe9f24be7631d9d0a346776a90070c --- playbooks/installer/rdo-manager/templates/virt-setup-env.j2 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/playbooks/installer/rdo-manager/templates/virt-setup-env.j2 b/playbooks/installer/rdo-manager/templates/virt-setup-env.j2 index 3245f0bad..eb170f9dc 100644 --- a/playbooks/installer/rdo-manager/templates/virt-setup-env.j2 +++ b/playbooks/installer/rdo-manager/templates/virt-setup-env.j2 @@ -43,6 +43,10 @@ export NODE_CPU={{ installer.nodes.node_cpu | default('1') }} export UNDERCLOUD_NODE_MEM={{ installer.nodes.undercloud_node_mem | default('4096') }} {%endif %} +{% if installer.nodes.undercloud_node_cpu is defined %} +export UNDERCLOUD_NODE_CPU={{ installer.nodes.undercloud_node_cpu | default('1') }} +{%endif %} + {% if product.full_version == "7-director" and installer.network.isolation != "none" %} export TESTENV_ARGS=" --baremetal-bridge-names 'brbm' --vlan-trunk-ids='10 20 30 40 50'" {%endif %} From ced4813851379af0c6216e528a86d6f701d73ec2 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Mon, 25 Jan 2016 16:07:52 +0100 Subject: [PATCH 002/137] Adjust "small" and "medium" flavor on qeos7/rhos-jenkins The small flavor "2" is too small on qeos7 for example for RHEL 7.2 base images (which require a 10GB disk). So bump to "3", smaller value with enough disk space. At the same time, raise the requirements for medium to "4", which has a bigger space, closer to the old configuration. This could require a bigger change to flavor definitions (see CENTRALCI-1189). Change-Id: Iafa43a5d7e1684e18d82f265bfc8dad890210482 --- .../openstack/site/qeos7/tenant/rhos-qe-ci.yml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/settings/provisioner/openstack/site/qeos7/tenant/rhos-qe-ci.yml b/settings/provisioner/openstack/site/qeos7/tenant/rhos-qe-ci.yml index 20624be12..5b04bc896 100644 --- a/settings/provisioner/openstack/site/qeos7/tenant/rhos-qe-ci.yml +++ b/settings/provisioner/openstack/site/qeos7/tenant/rhos-qe-ci.yml @@ -36,8 +36,15 @@ provisioner: allocation_pool_end: 172.31.1.100 flavor: - small: 2 - medium: 3 + # The list of flavor should be rechecked; right now, 3 is the smaller + # with a disk big enough for relevant images. 4 is a bit bigger, so + # use it for medium, and keep it for large, as it quite matches the + # values for the old qeos. Other flavor should be cleaned up (also + # because they are mostly unused right now). + # The recheck could lead to a bigger change to flavor definitions + # (see CENTRALCI-1189) + small: 3 + medium: 4 large: 4 large_testing: c6e0ad85-81a8-4fbb-a2d9-b0abac52f79b large_ephemeral: a89c1587-aab2-49c2-a60d-4d19ea40bdbc From f0805a3ee96313f1e38635868742fc2c2e37f825 Mon Sep 17 00:00:00 2001 From: Ronelle Landy Date: Thu, 21 Jan 2016 17:35:24 -0500 Subject: [PATCH 003/137] Replaces bulk node introspection with node-by-node introspection Intermittent failures are happening with bulk introspection. This commit replaces bulk introspection with node-by-node introspection where the status of each node can be checked and the introspection can be rerun on failure. Change-Id: Ic3fc746d439bf821f4d03d287cbd0182f00c9034 --- .../installer/rdo-manager/overcloud/run.yml | 35 ++++++++++++++++++- settings/installer/rdo_manager.yml | 1 + .../rdo_manager/introspection_method/bulk.yml | 2 ++ .../introspection_method/node_by_node.yml | 2 ++ 4 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 settings/installer/rdo_manager/introspection_method/bulk.yml create mode 100644 settings/installer/rdo_manager/introspection_method/node_by_node.yml diff --git a/playbooks/installer/rdo-manager/overcloud/run.yml b/playbooks/installer/rdo-manager/overcloud/run.yml index a5bbc82c2..a0d284851 100644 --- a/playbooks/installer/rdo-manager/overcloud/run.yml +++ b/playbooks/installer/rdo-manager/overcloud/run.yml @@ -34,10 +34,43 @@ with_items: ironic_node_list_uuid.stdout_lines when: (hw_env is defined) and (hw_env.disk_root_device_size is defined) and product.full_version == '8-director' - - name: introspect nodes + - name: get full list of node UUIDs + shell: > + source {{ instack_user_home }}/stackrc; + ironic node-list | grep 'power' | awk '{print $2}' + register: ironic_node_full_list_uuid + + - name: start bulk introspection shell: > source {{ instack_user_home }}/stackrc; openstack baremetal introspection bulk start; + when: installer.introspection_method == 'bulk' + + - name: introspect node by node + shell: > + source {{ instack_user_home }}/stackrc; + ironic node-set-maintenance {{ item }} true; + openstack baremetal introspection start {{ item }}; + export STATUS=$(openstack baremetal introspection status {{ item }} | grep 'finished'); + while [[ $STATUS != *"True"* ]]; do + echo "Waiting for instrospection to complete."; + sleep 180; + export STATUS=$(openstack baremetal introspection status {{ item }} | grep 'finished'); + done; + openstack baremetal introspection status {{ item }} | grep 'error' + register: introspect_status + retries: 3 + delay: 5 + until: introspect_status.stdout.find("None") != -1 + with_items: ironic_node_full_list_uuid.stdout_lines + when: installer.introspection_method == 'node_by_node' + + - name: set maintenance status to false + shell: > + source {{ instack_user_home }}/stackrc; + ironic node-set-maintenance {{ item }} False + with_items: ironic_node_full_list_uuid.stdout_lines + when: installer.introspection_method == 'node_by_node' - name: check instrospections status register: introspection_result diff --git a/settings/installer/rdo_manager.yml b/settings/installer/rdo_manager.yml index 0a3e55719..3f6b35a30 100644 --- a/settings/installer/rdo_manager.yml +++ b/settings/installer/rdo_manager.yml @@ -60,3 +60,4 @@ defaults: tempest_skip_file: none updates: none custom_deploy: none + introspection_method: bulk diff --git a/settings/installer/rdo_manager/introspection_method/bulk.yml b/settings/installer/rdo_manager/introspection_method/bulk.yml new file mode 100644 index 000000000..09ca68635 --- /dev/null +++ b/settings/installer/rdo_manager/introspection_method/bulk.yml @@ -0,0 +1,2 @@ +installer: + introspection_method: bulk diff --git a/settings/installer/rdo_manager/introspection_method/node_by_node.yml b/settings/installer/rdo_manager/introspection_method/node_by_node.yml new file mode 100644 index 000000000..834f63260 --- /dev/null +++ b/settings/installer/rdo_manager/introspection_method/node_by_node.yml @@ -0,0 +1,2 @@ +installer: + introspection_method: node_by_node From 0cebacf996e4d02e0a75ae23f02c0029fefe90d8 Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Mon, 25 Jan 2016 16:49:26 -0500 Subject: [PATCH 004/137] build the overcloud images on the virthost bm test https://rhos-jenkins.rhev-ci-vms.eng.rdu2.redhat.com/view/POC/job/whayutin-poc-bm-test-261191/ Change-Id: I54b3016e1047b5c60b0b64bc311a832830113ac3 --- .../rdo-manager/templates/ssh_config.j2 | 7 ++ .../rdo-manager/undercloud/build-images.yml | 68 +++++++++++++++++-- .../installer/rdo-manager/undercloud/run.yml | 6 -- 3 files changed, 70 insertions(+), 11 deletions(-) diff --git a/playbooks/installer/rdo-manager/templates/ssh_config.j2 b/playbooks/installer/rdo-manager/templates/ssh_config.j2 index 30cf606ff..eeeae2599 100644 --- a/playbooks/installer/rdo-manager/templates/ssh_config.j2 +++ b/playbooks/installer/rdo-manager/templates/ssh_config.j2 @@ -19,6 +19,13 @@ Host undercloud-from-virthost IdentitiesOnly yes User root StrictHostKeyChecking no + +Host undercloud-from-virthost-as-stack + Hostname {{ hostvars['localhost'].undercloud_ip }} + IdentityFile ~/.ssh/id_rsa + IdentitiesOnly yes + User stack + StrictHostKeyChecking no {%endif %} {% if groups["virthost"] is not defined and hw_env is defined and hw_env.env_type != "ovb_host_cloud" %} diff --git a/playbooks/installer/rdo-manager/undercloud/build-images.yml b/playbooks/installer/rdo-manager/undercloud/build-images.yml index d94d00c38..57658c77a 100644 --- a/playbooks/installer/rdo-manager/undercloud/build-images.yml +++ b/playbooks/installer/rdo-manager/undercloud/build-images.yml @@ -1,9 +1,17 @@ --- -- name: build or import images +- name: setup the undercloud hosts: undercloud + tasks: + - name: Create overcloud_images directory + file: path={{ instack_user_home }}/overcloud_images state=directory + +- name: build images on the virthost + hosts: virthost tasks: - name: setup environment vars - template: src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/build-img-env.j2 dest=~/build-img-env mode=0755 + template: src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/build-img-env.j2 + dest=~/build-img-env mode=0755 + when: installer.overcloud_images | default('build') == "build" - name: ensure /tmp/svc-map-services is absent file: path=/tmp/svc-map-services state=absent @@ -13,39 +21,89 @@ - name: Contents of build-img-env shell: > cat {{ instack_user_home }}/build-img-env + when: installer.overcloud_images | default('build') == "build" + + - name: Create overcloud_images directory + file: path={{ instack_user_home }}/overcloud_images state=directory + when: installer.overcloud_images | default('build') == "build" - name: build all the images shell: > source {{ instack_user_home }}/build-img-env; + pushd {{ instack_user_home }}/overcloud_images; openstack overcloud image build --all > {{ instack_user_home }}/openstack-build-images.log when: installer.overcloud_images | default('build') == "build" + - name: scp the overcloud_images to the undercloud + shell: scp -rv -F ssh.config.ansible {{ instack_user_home }}/overcloud_images/* \ + undercloud-from-virthost-as-stack:{{ instack_user_home }}/overcloud_images/ + when: installer.overcloud_images | default('build') == "build" + +- name: build the images on baremetal + hosts: undercloud:&baremetal + tasks: + - name: setup environment vars + template: src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/build-img-env.j2 + dest=~/build-img-env mode=0755 + + - name: ensure /tmp/svc-map-services is absent + file: path=/tmp/svc-map-services state=absent + sudo: yes + when: installer.overcloud_images | default('build') == "build" + + - name: Contents of build-img-env + shell: > + cat {{ instack_user_home }}/build-img-env + when: installer.overcloud_images | default('build') == "build" + + - name: get the guest-image + get_url: > + url="{{ distro.images[distro.name][distro.full_version].remote_file_server }}{{ distro.images[distro.name][distro.full_version].guest_image_name }}" + dest=/home/stack/overcloud_images/{{ distro.images[distro.name][distro.full_version].guest_image_name }} + timeout=360 + + - name: build all the images + shell: > + source {{ instack_user_home }}/build-img-env; + pushd {{ instack_user_home }}/overcloud_images; + openstack overcloud image build --all > {{ instack_user_home }}/openstack-build-images.log + when: installer.overcloud_images | default('build') == "build" + + +- name: import images + hosts: undercloud + tasks: - name: ensure wget is installed yum: name=wget state=latest sudo: yes - name: download the pre-built rdo-manager images shell: > - wget --quiet -c -O {{ instack_user_home }}/{{ item }}.tar + pushd {{ instack_user_home }}/overcloud_images; + wget --quiet -c -O {{ instack_user_home }}/overcloud_images/{{ item }}.tar "{{ installer.images.url[product.name][product.full_version][product.build][installer.images.version] }}{{ item }}.tar" with_items: "{{ installer.images[product.full_version].files|list }}" when: installer.overcloud_images is defined and installer.overcloud_images == "import" + - name: prep and upload images into glance hosts: undercloud tasks: - name: untar the overcloud images - shell: tar -xvf "{{ instack_user_home }}/{{ item }}.tar" + shell: > + pushd {{ instack_user_home }}/overcloud_images; + tar -xvf "{{ item }}.tar" with_items: "{{ installer.images[product.full_version].files|list }}" when: installer.overcloud_images is defined and installer.overcloud_images == "import" - name: download the fedora-user image get_url: url="{{ distro.images['fedora']['21'].remote_file_server }}{{ distro.images['fedora']['21'].guest_image_name }}" - dest={{ instack_user_home }}/fedora-user.qcow2 + dest={{ instack_user_home }}/overcloud_images/fedora-user.qcow2 force=no timeout=60 - name: prepare for overcloud by loading the images into glance shell: > source {{ instack_user_home }}/stackrc; + pushd {{ instack_user_home }}/overcloud_images; openstack overcloud image upload diff --git a/playbooks/installer/rdo-manager/undercloud/run.yml b/playbooks/installer/rdo-manager/undercloud/run.yml index 400d33825..ebeb87c98 100644 --- a/playbooks/installer/rdo-manager/undercloud/run.yml +++ b/playbooks/installer/rdo-manager/undercloud/run.yml @@ -2,12 +2,6 @@ - name: install the undercloud packages and get the guest image hosts: undercloud tasks: - - name: get the guest-image - get_url: > - url="{{ distro.images[distro.name][distro.full_version].remote_file_server }}{{ distro.images[distro.name][distro.full_version].guest_image_name }}" - dest=/home/stack/{{ distro.images[distro.name][distro.full_version].guest_image_name }} - timeout=360 - - name: install python-rdomanager-oscplugin yum: name=python-rdomanager-oscplugin state=present sudo: yes From 2bef27ad089a6d3423351383200e1cb926baffd0 Mon Sep 17 00:00:00 2001 From: Arie Bregman Date: Mon, 18 Jan 2016 14:18:49 +0200 Subject: [PATCH 005/137] [component] functional and unittest cleanup In current form, functional tester rely on openstack installers (e.g packstack) while it can ( and should ) run against clean environment. If assuming functional used with 'project' installer as pep8 and unittest then, no need to maintain components settings files both in functional and project directories. Unittest playbooks are now using component role in order to create one unified infrastrcture for component testing. Change-Id: I54e8131c6f58cfea1d16d8d9ed94d2d2759c6e61 --- playbooks/tester/unittest/pre.yml | 40 ++----------------- roles/component-test/pre/tasks/pre.yml | 28 ++++++++----- settings/tester/functional.yml | 2 + .../functional/component/neutron-fwaas.yml | 6 --- .../functional/component/neutron-lbaas.yml | 6 --- .../functional/component/neutron-vpnaas.yml | 6 --- .../tester/functional/component/neutron.yml | 6 --- .../component/python-neutronclient.yml | 6 --- 8 files changed, 23 insertions(+), 77 deletions(-) delete mode 100644 settings/tester/functional/component/neutron-fwaas.yml delete mode 100644 settings/tester/functional/component/neutron-lbaas.yml delete mode 100644 settings/tester/functional/component/neutron-vpnaas.yml delete mode 100644 settings/tester/functional/component/neutron.yml delete mode 100644 settings/tester/functional/component/python-neutronclient.yml diff --git a/playbooks/tester/unittest/pre.yml b/playbooks/tester/unittest/pre.yml index a5f33b99c..f6ea02b82 100644 --- a/playbooks/tester/unittest/pre.yml +++ b/playbooks/tester/unittest/pre.yml @@ -1,38 +1,6 @@ --- -- name: Test dependencies - sudo: yes - vars: - test_cfg: "{{ test_env }}" +- name: Preparation tasks hosts: controller - tasks: - - name: Install test rpm dependencies - yum: pkg={{ item }} state=latest - with_items: test_cfg.setup.install - when: - test_cfg.setup | default(false) and test_cfg.setup.install | default(false) - - - name: Remove unwanted rpms - yum: pkg={{ item }} state=absent - with_items: test_cfg.setup.remove - when: - test_cfg.setup | default(false) and test_cfg.setup.remove | default(false) - -- name: Install packages to convert and publish tests results - sudo: yes - hosts: controller - tasks: - - name: Install packages to convert subunit stream into junitxml - yum: name={{ item }} state=present - with_items: - - subunit-filters - - python-junitxml - -- name: print test configuration - hosts: controller - tasks: - - name: print component path - debug: var={{ component_path }} - - - name: print test configuration - debug: var=test_env - register: env + gather_facts: yes + roles: + - component-test/pre \ No newline at end of file diff --git a/roles/component-test/pre/tasks/pre.yml b/roles/component-test/pre/tasks/pre.yml index 9b5a737f3..ba571dba8 100644 --- a/roles/component-test/pre/tasks/pre.yml +++ b/roles/component-test/pre/tasks/pre.yml @@ -1,9 +1,21 @@ --- +- name: Set test_env + set_fact: test_env="{{ test_config.virt[item]|default(omit) }}" + with_items: + - "{{ major_release }}" + - "{{ full_release }}" + +- name: Set tests path + set_fact: tests_path="{{ item|default(omit) }}" + with_items: + - "{{ tester.component.dir }}" + - "{{ installer.component.dir }}" + - name: compute the directory basename - set_fact: component_basename={{ tester.component.dir.split('/')|last }} + set_fact: component_basename={{ tests_path.split('/')|last }} - name: find the test dependencies file used for the test-run - set_fact: test_deps_file="{{ tester.component.dir + '/' + tester.component.config_file }}" #" + set_fact: test_deps_file="{{ tests_path + '/' + tester.component.config_file }}" #" - name: load config include_vars: "{{test_deps_file}}" @@ -16,14 +28,8 @@ - name: set full release set_fact: full_release="{{ ansible_distribution + '-' + ansible_distribution_version }}" -- name: set test_env - set_fact: test_env="{{ test_config.virt[item]|default(omit) }}" - with_items: - - "{{ major_release }}" - - "{{ full_release }}" - - name: rsync tests dir to tester - synchronize: src="{{ tester.component.dir }}" dest="{{ ansible_env.HOME }}/" #" + synchronize: src="{{ tests_path }}" dest="{{ ansible_env.HOME }}/" #" register: result - name: print result @@ -37,8 +43,8 @@ sudo: yes command: "rhos-release {{ product.version.major }} {{ product.repo.rhos_release.extra_args|join(' ') }}" -- name: print tester component dir - debug: var=tester.component.dir +- name: Print component tests path + debug: var=tests_path - name: print HOME dir debug: var=ansible_env.HOME diff --git a/settings/tester/functional.yml b/settings/tester/functional.yml index d6585d47e..f6e66fc86 100644 --- a/settings/tester/functional.yml +++ b/settings/tester/functional.yml @@ -5,6 +5,8 @@ tester: short_name: func component: config_file: jenkins-config.yml + tox_target: dsvm-functional + node: prefix: - !lookup tester.short_name diff --git a/settings/tester/functional/component/neutron-fwaas.yml b/settings/tester/functional/component/neutron-fwaas.yml deleted file mode 100644 index 2295b1149..000000000 --- a/settings/tester/functional/component/neutron-fwaas.yml +++ /dev/null @@ -1,6 +0,0 @@ -tester: - component: - name: neutron-fwaas - short_name: ntrn-fw - dir: !join [ !env WORKSPACE, /neutron-fwaas] - tox_target: dsvm-functional diff --git a/settings/tester/functional/component/neutron-lbaas.yml b/settings/tester/functional/component/neutron-lbaas.yml deleted file mode 100644 index 2213ad459..000000000 --- a/settings/tester/functional/component/neutron-lbaas.yml +++ /dev/null @@ -1,6 +0,0 @@ -tester: - component: - name: neutron-lbaas - short_name: ntrn-lb - dir: !join [ !env WORKSPACE, /neutron-lbaas] - tox_target: dsvm-functional diff --git a/settings/tester/functional/component/neutron-vpnaas.yml b/settings/tester/functional/component/neutron-vpnaas.yml deleted file mode 100644 index 871e7f89a..000000000 --- a/settings/tester/functional/component/neutron-vpnaas.yml +++ /dev/null @@ -1,6 +0,0 @@ -tester: - component: - name: neutron-vpnaas - short_name: ntrn-vpn - dir: !join [ !env WORKSPACE, /neutron-vpnaas] - tox_target: dsvm-functional diff --git a/settings/tester/functional/component/neutron.yml b/settings/tester/functional/component/neutron.yml deleted file mode 100644 index a42dbf67a..000000000 --- a/settings/tester/functional/component/neutron.yml +++ /dev/null @@ -1,6 +0,0 @@ -tester: - component: - name: neutron - short_name: ntrn - dir: !join [ !env WORKSPACE, /neutron] - tox_target: dsvm-functional diff --git a/settings/tester/functional/component/python-neutronclient.yml b/settings/tester/functional/component/python-neutronclient.yml deleted file mode 100644 index eefb523c9..000000000 --- a/settings/tester/functional/component/python-neutronclient.yml +++ /dev/null @@ -1,6 +0,0 @@ -tester: - component: - name: python-neutronclient - short_name: py-ntrnclnt - dir: !join [ !env WORKSPACE, /python-neutronclient] - tox_target: functional From 3a3f0c7b52889816764da24269318f2780bca898 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Tue, 26 Jan 2016 15:01:34 +0100 Subject: [PATCH 006/137] Correctly set and read the credentials for test accounts Do not hardcode the username and tenant for the non-admin testing user/tenant. Call the role for user creation for all integration tests (worst case: no user is created). Also, at the same time, properly read the value of admin_password without triggering an evaluation error (this fixes an error on templating that went unfortunately unnoticed). Change-Id: Id4b540354006f3091c1b1caa5a79de90fe98d215 --- .../tester/integration/common/upload_image.yml | 6 +++--- playbooks/tester/integration/horizon/pre.yml | 14 +++----------- playbooks/tester/integration/pre.yml | 10 ++++++++++ 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/playbooks/tester/integration/common/upload_image.yml b/playbooks/tester/integration/common/upload_image.yml index bc6a353a7..21e8cd55d 100644 --- a/playbooks/tester/integration/common/upload_image.yml +++ b/playbooks/tester/integration/common/upload_image.yml @@ -4,9 +4,9 @@ gather_facts: no sudo: no vars: - - demo_username: demo - - demo_password: "{{ hostvars[controller_name].demo_password | default('redhat') }}" - - demo_tenant_name: demo + - demo_username: "{{ tester.accounts[0].username | default('demo') }}" + - demo_password: "{{ tester.accounts[0].password | default('redhat') }}" + - demo_tenant_name: "{{ tester.accounts[0].tenant_name | default('demo') }}" - controller_name: "{{ provisioner.nodes.controller.name }}" - controller_ip: "{{ hostvars[controller_name].ansible_default_ipv4.address }}" tasks: diff --git a/playbooks/tester/integration/horizon/pre.yml b/playbooks/tester/integration/horizon/pre.yml index d235c1967..37cc34a52 100644 --- a/playbooks/tester/integration/horizon/pre.yml +++ b/playbooks/tester/integration/horizon/pre.yml @@ -1,14 +1,4 @@ --- -- name: Prepare the environment (users and tenant) - hosts: controller - sudo: no - gather_facts: yes - vars: - controller_auth_url: "http://{{ ansible_default_ipv4.address }}:35357/v2.0/" - admin_password: "{{ admin_password | default('redhat') }}" - roles: - - openstack/create_users - - include: ../common/demo_tenant.yml - include: ../common/upload_image.yml @@ -73,7 +63,8 @@ controller_name: "{{ provisioner.nodes.controller.name }}" horizon_tests: admin_password: "{{ hostvars[controller_name].admin_password | default('redhat') }}" - demo_password: "{{ hostvars[controller_name].demo_password | default('redhat') }}" + demo_password: "{{ tester.accounts[0].password | default('redhat') }}" + demo_username: "{{ tester.accounts[0].username | default('demo') }}" tmp_controller_host: "{{ hostvars[controller_name].ansible_default_ipv4.address }}" services_status: enabled_services: "{{ hostvars[controller_name].integration_enabled_services }}" @@ -92,6 +83,7 @@ option={{ item.key }} value={{ item.value }} with_items: + - { section: 'identity', key: 'username', value: "{{ horizon_tests.demo_username }}"} - { section: 'identity', key: 'password', value: "{{ horizon_tests.demo_password }}"} - { section: 'identity', key: 'admin_password', value: "{{ horizon_tests.admin_password }}"} - { section: 'identity', key: 'rh_portal_login', value: "{{ tester.integration.subscription.username }}" } diff --git a/playbooks/tester/integration/pre.yml b/playbooks/tester/integration/pre.yml index f09b1ff7a..9931ae110 100644 --- a/playbooks/tester/integration/pre.yml +++ b/playbooks/tester/integration/pre.yml @@ -41,4 +41,14 @@ pip: name={{ item }} virtualenv=~/{{ tester.venv_dir }} virtualenv_site_packages=yes with_items: tester.pip_packages +- name: Prepare the environment (users and tenant) + hosts: controller + sudo: no + gather_facts: yes + vars: + controller_auth_url: "http://{{ ansible_default_ipv4.address }}:35357/v2.0/" + admin_password: "{{ hostvars[provisioner.nodes.controller.name].admin_password | default('redhat') }}" + roles: + - openstack/create_users + - include: "{{ tester.component }}/pre.yml" From 06fa4d540b98549b4e116aae18085d64a643b720 Mon Sep 17 00:00:00 2001 From: Tomas Rusnak Date: Wed, 18 Nov 2015 15:22:33 +0100 Subject: [PATCH 007/137] fix storage dependencies based on OS version Change-Id: Id65b85bd1f904f698c12c935702b0b943fd5cab1 --- .../installer/packstack/storage/image/backend/ceph.yml | 2 +- .../installer/packstack/storage/volume/backend/ceph.yml | 2 +- .../installer/packstack/storage/volume/backend/gluster.yml | 2 +- .../installer/packstack/storage/volume/backend/thinlvm.yml | 7 +++++++ 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/settings/installer/packstack/storage/image/backend/ceph.yml b/settings/installer/packstack/storage/image/backend/ceph.yml index d32cde301..1f9bdcd1e 100644 --- a/settings/installer/packstack/storage/image/backend/ceph.yml +++ b/settings/installer/packstack/storage/image/backend/ceph.yml @@ -3,7 +3,7 @@ nodes: controller: packages: - - ceph-common + default: ceph-common storage: services: diff --git a/settings/installer/packstack/storage/volume/backend/ceph.yml b/settings/installer/packstack/storage/volume/backend/ceph.yml index b3f3a9458..f6081eb9f 100644 --- a/settings/installer/packstack/storage/volume/backend/ceph.yml +++ b/settings/installer/packstack/storage/volume/backend/ceph.yml @@ -3,7 +3,7 @@ nodes: controller: packages: - - ceph-common + default: ceph-common storage: services: diff --git a/settings/installer/packstack/storage/volume/backend/gluster.yml b/settings/installer/packstack/storage/volume/backend/gluster.yml index 4b22ea12c..1163eb570 100644 --- a/settings/installer/packstack/storage/volume/backend/gluster.yml +++ b/settings/installer/packstack/storage/volume/backend/gluster.yml @@ -3,7 +3,7 @@ nodes: controller: packages: - - glusterfs-fuse + default: glusterfs-fuse storage: services: diff --git a/settings/installer/packstack/storage/volume/backend/thinlvm.yml b/settings/installer/packstack/storage/volume/backend/thinlvm.yml index 6cece867b..6ca300728 100644 --- a/settings/installer/packstack/storage/volume/backend/thinlvm.yml +++ b/settings/installer/packstack/storage/volume/backend/thinlvm.yml @@ -1,5 +1,11 @@ --- !extends:common/shared.yml +nodes: + controller: + packages: + default: targetcli + '6.0': scsi-target-utils + storage: services: - cinder @@ -17,6 +23,7 @@ storage: rhos-6-thinlvm: volume_driver: "cinder.volume.drivers.lvm.LVMISCSIDriver" lvm_type: "thin" + iscsi_helper: "lioadm" "5.0": *cinder_cfg_old "6.0": From 343a168a750c9aeba91d42eecf51c95163228d99 Mon Sep 17 00:00:00 2001 From: Tomas Rusnak Date: Mon, 2 Nov 2015 09:49:12 +0100 Subject: [PATCH 008/137] fix xtreamio_fc volume_driver Change-Id: I3adf4ad6e0873ef6a91b88b3d3f47878dcc2b839 --- .../installer/packstack/storage/volume/backend/xtremio_fc.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/settings/installer/packstack/storage/volume/backend/xtremio_fc.yml b/settings/installer/packstack/storage/volume/backend/xtremio_fc.yml index 89979f569..2a68cca85 100644 --- a/settings/installer/packstack/storage/volume/backend/xtremio_fc.yml +++ b/settings/installer/packstack/storage/volume/backend/xtremio_fc.yml @@ -12,7 +12,7 @@ storage: "icehouse": &cinder_cfg DEFAULT: - volume_driver: "cinder.volume.drivers.emc.xtremio.XtremIOISCSIDriver" + volume_driver: "cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver" san_ip: !lookup private.storage.volume.backend.xtremio.san_ip san_login: !lookup private.storage.volume.backend.xtremio.san_login san_password: !lookup private.storage.volume.backend.xtremio.san_password From ffcef934f9811740007830be81ec145fb4c48b66 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Fri, 22 Jan 2016 16:21:15 +0100 Subject: [PATCH 009/137] Assign floating IP for both static and dynamic networks Previously, only dynamic network had an assigned floating IP from the public network 'public_net_name' The case of network with only one public network seems to be covered by using use_floating_ip, but not the case of static tenant networks with multiple public networks. This change should address the latter. Change-Id: Ibdc7665e29309330454c0b2f7a513a36868360ba --- playbooks/provisioner/openstack/cleanup.yml | 10 +++++++++- playbooks/provisioner/openstack/main.yml | 9 ++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/playbooks/provisioner/openstack/cleanup.yml b/playbooks/provisioner/openstack/cleanup.yml index 3e0e0d527..e927444fb 100644 --- a/playbooks/provisioner/openstack/cleanup.yml +++ b/playbooks/provisioner/openstack/cleanup.yml @@ -7,8 +7,16 @@ - group_by: key=net_prov when: provisioner.network.dynamic_net is defined and provisioner.network.dynamic_net +- name: Check the nodes which need a floating IP from a specific network + hosts: localhost + gather_facts: no + sudo: no + tasks: + - group_by: key=net_add_floatingip + when: provisioner.network.public_net_name is defined + - name: Cleanup Networks - hosts: net_prov + hosts: net_add_floatingip gather_facts: no tasks: - name: Delete Floating IPs diff --git a/playbooks/provisioner/openstack/main.yml b/playbooks/provisioner/openstack/main.yml index db370f15f..81fb9c74e 100644 --- a/playbooks/provisioner/openstack/main.yml +++ b/playbooks/provisioner/openstack/main.yml @@ -7,6 +7,13 @@ - group_by: key=net_prov when: provisioner.network.dynamic_net is defined and provisioner.network.dynamic_net +- name: Check the nodes which need a floating IP from a specific network + hosts: localhost + gather_facts: no + sudo: no + tasks: + - group_by: key=net_add_floatingip + when: provisioner.network.public_net_name is defined - name: Create networks hosts: net_prov @@ -121,7 +128,7 @@ with_items: created_nodes.results - name: Add Floating IPs - hosts: net_prov + hosts: net_add_floatingip tasks: - name: assign floating ip to instances quantum_floating_ip: From c4a8d549dd82ea16d5cba1c7db762febe9343193 Mon Sep 17 00:00:00 2001 From: Arie Bregman Date: Tue, 26 Jan 2016 22:33:49 +0200 Subject: [PATCH 010/137] Fix component testing test_config should be used only after config loaded. tests path should also be set by either 'project' installer or API tester. Change-Id: I9ab8dd0e2d2e3f1bf352cc073848ff8de9d650a3 --- playbooks/installer/project/post.yml | 3 +++ playbooks/tester/api/pre.yml | 7 +++++++ roles/component-test/pre/tasks/pre.yml | 20 +++++++------------- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/playbooks/installer/project/post.yml b/playbooks/installer/project/post.yml index 0c9c59299..47b821209 100644 --- a/playbooks/installer/project/post.yml +++ b/playbooks/installer/project/post.yml @@ -1,3 +1,6 @@ --- - name: Component post steps hosts: controller + tasks: + - name: Set tests path + set_fact: tests_path="{{ installer.component.dir }}" diff --git a/playbooks/tester/api/pre.yml b/playbooks/tester/api/pre.yml index 30df8573a..171f73d87 100644 --- a/playbooks/tester/api/pre.yml +++ b/playbooks/tester/api/pre.yml @@ -1,4 +1,11 @@ --- +- name: Set path for tests + hosts: controller + gather_facts: yes + tasks: + - name: Set tests path + set_fact: tests_path="{{ tester.component.dir }}" + - name: Run pre tasks hosts: controller gather_facts: yes diff --git a/roles/component-test/pre/tasks/pre.yml b/roles/component-test/pre/tasks/pre.yml index ba571dba8..24fed9be1 100644 --- a/roles/component-test/pre/tasks/pre.yml +++ b/roles/component-test/pre/tasks/pre.yml @@ -1,16 +1,4 @@ --- -- name: Set test_env - set_fact: test_env="{{ test_config.virt[item]|default(omit) }}" - with_items: - - "{{ major_release }}" - - "{{ full_release }}" - -- name: Set tests path - set_fact: tests_path="{{ item|default(omit) }}" - with_items: - - "{{ tester.component.dir }}" - - "{{ installer.component.dir }}" - - name: compute the directory basename set_fact: component_basename={{ tests_path.split('/')|last }} @@ -18,7 +6,7 @@ set_fact: test_deps_file="{{ tests_path + '/' + tester.component.config_file }}" #" - name: load config - include_vars: "{{test_deps_file}}" + include_vars: "{{ test_deps_file }}" register: result #TODO(abregman): add major and minor version in distro settings @@ -28,6 +16,12 @@ - name: set full release set_fact: full_release="{{ ansible_distribution + '-' + ansible_distribution_version }}" +- name: Set test_env + set_fact: test_env="{{ test_config.virt[item]|default(omit) }}" + with_items: + - "{{ major_release }}" + - "{{ full_release }}" + - name: rsync tests dir to tester synchronize: src="{{ tests_path }}" dest="{{ ansible_env.HOME }}/" #" register: result From 00ea49b42e98541168392fea6171acea37a6e55a Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Tue, 26 Jan 2016 13:30:41 -0500 Subject: [PATCH 011/137] add some debugging around building the images to the console Change-Id: Id8a192c28e4ac1f6876c778be94a98609d8410c2 --- .../rdo-manager/undercloud/build-images.yml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/playbooks/installer/rdo-manager/undercloud/build-images.yml b/playbooks/installer/rdo-manager/undercloud/build-images.yml index 57658c77a..daac41908 100644 --- a/playbooks/installer/rdo-manager/undercloud/build-images.yml +++ b/playbooks/installer/rdo-manager/undercloud/build-images.yml @@ -34,8 +34,16 @@ openstack overcloud image build --all > {{ instack_user_home }}/openstack-build-images.log when: installer.overcloud_images | default('build') == "build" + - name: expose errors durring DIB build + shell: cat openstack-build-images.log | grep -v liberror | grep -v libgpg-error | grep -A 1 -B 1 error + when: installer.overcloud_images | default('build') == "build" + + - name: list the files in overcloud_images + command: ls -la {{ instack_user_home }}/overcloud_images/ + when: installer.overcloud_images | default('build') == "build" + - name: scp the overcloud_images to the undercloud - shell: scp -rv -F ssh.config.ansible {{ instack_user_home }}/overcloud_images/* \ + shell: scp -r -F ssh.config.ansible {{ instack_user_home }}/overcloud_images/* \ undercloud-from-virthost-as-stack:{{ instack_user_home }}/overcloud_images/ when: installer.overcloud_images | default('build') == "build" @@ -102,6 +110,9 @@ force=no timeout=60 + - name: list the files in overcloud_images + command: ls -la {{ instack_user_home }}/overcloud_images/ + - name: prepare for overcloud by loading the images into glance shell: > source {{ instack_user_home }}/stackrc; From eb48a8d68a58f33184888ae8db5e2798d9bbb587 Mon Sep 17 00:00:00 2001 From: Daniel Mellado Date: Wed, 20 Jan 2016 11:13:44 +0100 Subject: [PATCH 012/137] Add workaround for rhbz1299563 rhbz1299563 - Nova notifications are not configured for ceilometer https://bugzilla.redhat.com/show_bug.cgi?id=1299563 It'll be applied only if openstack-ceilometer-compute service is present. Change-Id: I5a035f54b499f4efe59811efe9e1a36d66f70be6 Partial-Bug: BZ/1299563 --- playbooks/installer/packstack/post.yml | 48 ++++++++++++++++++++++++++ settings/product/rhos/version/7.0.yml | 3 ++ 2 files changed, 51 insertions(+) diff --git a/playbooks/installer/packstack/post.yml b/playbooks/installer/packstack/post.yml index cc18fac40..95a4a68ed 100644 --- a/playbooks/installer/packstack/post.yml +++ b/playbooks/installer/packstack/post.yml @@ -115,6 +115,54 @@ - name: Restart neutron service to apply changes in floating ip pool shell: openstack-service restart neutron +- name: Check if RHBZ1299563 ceilometer nova notifications are enabled + hosts: compute + gather_facts: no + sudo: yes + tasks: + - name: Check if openstack-ceilometer-compute service exists + shell: systemctl is-active openstack-ceilometer-compute + register: ceilometer_status + ignore_errors: yes + + - group_by: key=workaround_rhbz1299563 + when: workarounds.rhbz1299563 is defined and ceilometer_status.stdout == 'active' + +- name: "Workaround RHBZ1299563: Configure ceilometer nova notifications" + hosts: workaround_rhbz1299563 + gather_facts: no + sudo: yes + tasks: + - name: Set instace_usage_audit in nova.conf + ini_file: dest=/etc/nova/nova.conf + section=DEFAULT + option=instance_usage_audit + value=True + + - name: Set instace_usage_audit_period in nova.conf + ini_file: dest=/etc/nova/nova.conf + section=DEFAULT + option=instance_usage_audit_period + value=hour + + - name: Set notify_on_state_change in nova.conf + ini_file: dest=/etc/nova/nova.conf + section=DEFAULT + option=notify_on_state_change + value=vm_and_task_state + + - name: Set notification_driver in nova.conf + ini_file: dest=/etc/nova/nova.conf + section=DEFAULT + option=notification_driver + value=messagingv2 + + - name: Restart openstack-ceilometer-compute + service: name=openstack-ceilometer-compute state=restarted + + - name: Restart nova-compute + service: name=openstack-nova-compute state=restarted + - name: Post install for Neutron server hosts: controller gather_facts: no diff --git a/settings/product/rhos/version/7.0.yml b/settings/product/rhos/version/7.0.yml index 2b70d8e1a..6e9cb6c32 100644 --- a/settings/product/rhos/version/7.0.yml +++ b/settings/product/rhos/version/7.0.yml @@ -4,3 +4,6 @@ product: major: 7 minor: 0 code_name: kilo + +workarounds: + rhbz1299563: {} From 3bfce231827b2dbe85ed387499059a6bf4c265e8 Mon Sep 17 00:00:00 2001 From: Martin Pavlasek Date: Mon, 25 Jan 2016 17:53:24 +0100 Subject: [PATCH 013/137] int.t.: Separate demo credentials to per-component snippet I've moved demo credentials from general integration.yml to per-component snippet to be able use different credentials for different component Change-Id: I5cab29c5b3d38de4732d04b95d6e74d8a5bc59b3 --- settings/tester/integration.yml | 4 ---- settings/tester/integration/component/horizon.yml | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/settings/tester/integration.yml b/settings/tester/integration.yml index 0d8cdf41b..999e90960 100644 --- a/settings/tester/integration.yml +++ b/settings/tester/integration.yml @@ -1,7 +1,3 @@ --- !extends:common.yml tester: type: integration - accounts: - - username: 'demo' - tenant_name: 'demo' - password: 'secrete' diff --git a/settings/tester/integration/component/horizon.yml b/settings/tester/integration/component/horizon.yml index b6be4db50..30da5e1b5 100644 --- a/settings/tester/integration/component/horizon.yml +++ b/settings/tester/integration/component/horizon.yml @@ -56,3 +56,7 @@ tester: - selenium==2.45.0 - nose - testtools + accounts: + - username: 'demo' + tenant_name: 'demo' + password: 'redhat' From 068283847ee51191f817420ef5e38a4974384f1b Mon Sep 17 00:00:00 2001 From: Sagi Shnaidman Date: Tue, 26 Jan 2016 18:20:32 +0200 Subject: [PATCH 014/137] Increase ping timeout to 5 min in tempest config The default ping timeout in tempest is 120 sec which is short for overloaded hardware. Increasing it to 5 minutes so the vm will have enough time to be up. Change-Id: I032627ed4aea4829695348ab827eeb79ed4096bc --- playbooks/post-deploy/rdo-manager/overcloud-test.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/playbooks/post-deploy/rdo-manager/overcloud-test.yml b/playbooks/post-deploy/rdo-manager/overcloud-test.yml index 9f0b6c4a7..d94beca2f 100644 --- a/playbooks/post-deploy/rdo-manager/overcloud-test.yml +++ b/playbooks/post-deploy/rdo-manager/overcloud-test.yml @@ -56,7 +56,8 @@ identity.admin_password $OS_PASSWORD \ network.tenant_network_cidr 192.168.0.0/24 \ object-storage.operator_role swiftoperator \ - orchestration.stack_owner_role heat_stack_owner + orchestration.stack_owner_role heat_stack_owner \ + validation.ping_timeout 300 when: installer.tempest.test_regex is defined and installer.tempest.test_regex != "tempest\.scenario\.test_minimum_basic" From f84fadc641d6ccbdcb6ee4d1023eee34eb16bdf7 Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Wed, 27 Jan 2016 12:17:07 -0500 Subject: [PATCH 015/137] copy the overcloud image build logs to the undercloud for collection Change-Id: Ieba2123059b5584f200e12f6665661cbc565ddca --- playbooks/installer/rdo-manager/undercloud/build-images.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/playbooks/installer/rdo-manager/undercloud/build-images.yml b/playbooks/installer/rdo-manager/undercloud/build-images.yml index daac41908..532de59b8 100644 --- a/playbooks/installer/rdo-manager/undercloud/build-images.yml +++ b/playbooks/installer/rdo-manager/undercloud/build-images.yml @@ -47,6 +47,11 @@ undercloud-from-virthost-as-stack:{{ instack_user_home }}/overcloud_images/ when: installer.overcloud_images | default('build') == "build" + - name: scp the openstack-build-images.log file to the undercloud + shell: scp -r -F ssh.config.ansible {{ instack_user_home }}/openstack-build-images.log \ + undercloud-from-virthost-as-stack:{{ instack_user_home }}/ + when: installer.overcloud_images | default('build') == "build" + - name: build the images on baremetal hosts: undercloud:&baremetal tasks: From 854fbd8a47b46ab143a5560ad6de1c2b618cce91 Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Wed, 27 Jan 2016 11:42:28 +0200 Subject: [PATCH 016/137] Prevent cloud-init from reverting hostname Move hostname control roles and invoke from provisioner/openstack as this is the only place we should have issues. On hosts with cloud-init (like openstack provisioner), when hostname is changed from the guest-image cli, cloud-init would revert hostnames upon reboot. This would cause agent's names to change on openstack DB. Change-Id: I045394d0e903f136a3ffa9f2da7a98fd45cd3391 --- playbooks/installer/packstack/pre.yml | 20 -------------- playbooks/provisioner/openstack/main.yml | 7 +++++ roles/system/set_hostname/tasks/main.yml | 33 ++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 20 deletions(-) create mode 100644 roles/system/set_hostname/tasks/main.yml diff --git a/playbooks/installer/packstack/pre.yml b/playbooks/installer/packstack/pre.yml index 33f0d884b..684822044 100644 --- a/playbooks/installer/packstack/pre.yml +++ b/playbooks/installer/packstack/pre.yml @@ -1,24 +1,4 @@ --- -- name: Ensure hostname is configured properly - hosts: openstack_nodes - gather_facts: yes - sudo: yes - tasks: - - name: Configure hostname - hostname: name="{{ hostvars[inventory_hostname].inventory_hostname }}" - - - name: Ensure hostname is in /etc/hosts - lineinfile: - dest: /etc/hosts - regexp: '.*{{ inventory_hostname }}$' - line: "{{ hostvars[inventory_hostname].ansible_default_ipv4.address }} {{inventory_hostname}}" - state: present - when: hostvars[inventory_hostname].ansible_default_ipv4.address is defined - - - name: restart systemd-hostnamed - service: name=systemd-hostnamed state=restarted - when: ansible_distribution_version|int > 6 - - name: Create ssh key if one does not exist hosts: controller gather_facts: no diff --git a/playbooks/provisioner/openstack/main.yml b/playbooks/provisioner/openstack/main.yml index 81fb9c74e..808af70f4 100644 --- a/playbooks/provisioner/openstack/main.yml +++ b/playbooks/provisioner/openstack/main.yml @@ -163,6 +163,13 @@ sudo: no delegate_to: localhost +- name: Ensure hostname is configured properly + hosts: openstack_nodes + gather_facts: yes + sudo: yes + roles: + - system/set_hostname + - name: Update network interfaces on nodes - OpenStack hosts: openstack_nodes gather_facts: yes diff --git a/roles/system/set_hostname/tasks/main.yml b/roles/system/set_hostname/tasks/main.yml new file mode 100644 index 000000000..9c6ad8e8d --- /dev/null +++ b/roles/system/set_hostname/tasks/main.yml @@ -0,0 +1,33 @@ +--- +- name: Configure hostname + hostname: + name: "{{ inventory_hostname }}" + register: newhostname + +- name: Ensure hostname is in /etc/hosts + lineinfile: + dest: /etc/hosts + regexp: '.*{{ inventory_hostname }}$' + line: "{{ ansible_default_ipv4.address }} {{inventory_hostname}}" + state: present + when: ansible_default_ipv4.address is defined + +- name: check for cloud.cfg + stat: + path: /etc/cloud/cloud.cfg + register: cloudcfg + when: newhostname|changed + +- name: Prevent cloud-init from controlling hostname + lineinfile: + dest: /etc/cloud/cloud.cfg + regexp: "^preserve_hostname:" + line: "preserve_hostname: true" + when: newhostname|changed and cloudcfg.stat.exists + +- name: restart systemd-hostnamed + service: + name: systemd-hostnamed + state: restarted + when: ansible_distribution_version|int > 6 + From e010e49b6011eea881433c37a305959eea36a344 Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Wed, 23 Dec 2015 11:44:24 +0200 Subject: [PATCH 017/137] Introduce the rhos-release module. goal - handle all rhos-release operations and process output for tagging build-mark default: "state=pinned release=RELEASE" -> rhos-release -P RELEASE rolling release (unpinned latest): state=rolling release=RELEASE" -> rhos-release RELEASE todo(yfried): need a better name than "rolling" (but not latest) "state=absent" -> rhos-release -x repo-version control: "version=PUDDLE" -> "rhos-release -p PUDDLE ..." control the target directory for repo files: "rhos-release: [state=absent] dest=DEST" -> "rhos-release [-x] -t DEST" override the default RHEL version: "distro" -> "rhos-release -r DISTRO" Switch between puddle and poodle repos. "repo_type: puddle/poodle" Change-Id: Ic93318448e67605533decfe2a40dfc2513fe4ec5 --- library/rhos-release.py | 241 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 241 insertions(+) create mode 100644 library/rhos-release.py diff --git a/library/rhos-release.py b/library/rhos-release.py new file mode 100644 index 000000000..fba05293f --- /dev/null +++ b/library/rhos-release.py @@ -0,0 +1,241 @@ +#!/usr/bin/python + +# (c) 2014, Red Hat, Inc. +# Written by Yair Fried +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +from os import listdir +from os.path import isfile, join + +DOCUMENTATION = ''' +--- +module: rhos-release +description: + - Add/remove RHEL-OSP repo files on RHEL systems +options: + state: + description: + - Whether to add (C(pinned), C(rolling)), or remove (C(absent)) repo files. + If C(pinned) will grab the latest available version but pin the puddle + version (dereference 'latest' links to prevent content from changing). + If C(rolling) will grab latest in "rolling-release" and keep all links + pointing to latest version. + choices: ['pinned', 'rolling', 'absent'] + default: pinned + release: + description: + - release name to find + dest: + description: + - target directory for repo files + default: "/etc/yum.repos.d" + distro: + description: + - override the default RHEL version + repo_type: + description: + - Controls the repo type C(puddle) or C(poodle) + choices: ['puddle', 'poodle'] + default: puddle + version: + description: + - Specific puddle/poodle selection. + This can be a known-symlink (Y1, Z1, GA, etc.), or + a puddle date stamp in the form of YYYY-MM-DD.X + + +notes: + - requires rhos-release version 1.0.23 +requirements: [ rhos-release ] +''' + +Examples = ''' +- name: Remove all RHEL-OSP repo files. + rhos-release: state=absent + +- name: Add latest RHEL-OSP repo files for for RHEL-OSP 7 and pin version. + rhos-release: release=7 + +- name: Add latest RHEL-OSP repo files for for RHEL-OSPd 7 and pin version. + rhos-release: release=7_director + +- name: Add latest RHEL-OSP repo files for for RHEL-OSP 7 unpinned (rolling release). + rhos-release: release=7 state=rolling + +- name: Add latest RHEL-OSP repo files for for RHEL-OSPd 7 unpinned (rolling release). + rhos-release: release=7_director state=rolling + +''' + + +REPODST = "/etc/yum.repos.d" + + +def get_repo_list(repodst): + return [f for f in listdir(repodst) if isfile(join(repodst, f)) and + f.startswith('rhos-release-') and f.endswith(".repo")] + + +def _remove_repos(module, base_cmd): + """ Remove RHEL-OSP repos files""" + + repodst = REPODST + cmd = [base_cmd, '-x'] + + if module.params["dest"]: + repodst = module.params["dest"] + cmd.extend(["-t", module.params["dest"]]) + + repo_files = get_repo_list(repodst) + if repo_files: + + rc, out, err = module.run_command(cmd) + if rc == "127": + module.fail_json(msg='Requires rhos-release installed. %s: %s' % (cmd, err)) + elif rc: + module.fail_json(msg='Error: %s: %s' % (cmd, err)) + empty_repo_files = get_repo_list(repodst) + if empty_repo_files: + module.fail_json(msg="Failed to remove files: %s" % empty_repo_files) + module.exit_json(changed=True, deleted_files=repo_files) + else: + module.exit_json(changed=False, msg="No repo files found") + + +def _parse_output(module, stdout): + """Parse rhos-release stdout. + + lines starting with "Installed": + list of repo files created. + verify all files are created in the same directory. + + lines starting with "# rhos-release": + Installed channel details + release=release number (should match "release" input), + version=version tag of release, + repo_type="poodle"/"puddle", + channel=ospd/core, + verify no more than 2 channels installed - core and/or ospd + + :return: dict( + repodir=absolute path of directory where repo files were created, + files=list of repo files created (filter output duplications), + releases=list of channels (see channel details) installed, + stdout=standard output of rhos-release, + ) + """ + file_lines = [line for line in stdout.splitlines() if line.startswith("Installed")] + + def installed(line): + pattern = re.compile(r'(?PInstalled: )(?P\S+)') + match = pattern.search(line) + if not match: + module.fail_json("Failed to parse line %s" % line) + filename = os.path.abspath(match.group("filename")) + return dict( + file=os.path.basename(filename), + repodir=os.path.dirname(filename) + ) + + filenames = map(installed, file_lines) + dirs = set(f["repodir"] for f in filenames) + if len(dirs) > 1: + module.fail_json("Found repo files in multiple directories %s" % dirs) + repodir = dirs.pop() + filenames = set(f["file"] for f in filenames) + + release_lines = [line for line in stdout.splitlines() if line.startswith("# rhos-release ")] + + def released(line): + pattern = re.compile(r'(?P# rhos-release )' + r'(?P\d+)\s*' + r'(?P-director)?\s*' + r'(?P-d)?\s*' + r'-p (?P\S+)' + ) + match = pattern.search(line) + if not match: + module.fail_json("Failed to parse line %s" % line) + return dict( + release=match.group("release"), + version=match.group("version"), + repo_type="poodle" if match.group("poodle") else "puddle", + channel="ospd" if match.group("director") else "core", + ) + + installed_releases = map(released, release_lines) + if len(installed_releases) > 2 or (len(installed_releases) == 2 and + set(r["channel"] for r in installed_releases) != {"ospd", "core"}): + module.fail_json(msg="Can't handle more than 2 channels. 1 core, 1 ospd. Found %s" % installed_releases) + + return dict( + repodir=repodir, + files=list(filenames), + releases=installed_releases, + stdout=stdout.splitlines() + ) + + +def _get_latest_repos(module, base_cmd, state, release): + """ Add RHEL-OSP latest repos """ + + if not release: + module.fail_json(msg="Missing release number for '%s' state" % state) + cmd = [base_cmd, release] + if state == "pinned": + cmd.append('-P') + if module.params["dest"]: + cmd.extend(["-t", module.params["dest"]]) + if module.params["distro"]: + cmd.extend(["-r", module.params["distro"]]) + if module.params["repo_type"] == "poodle": + cmd.append("-d") + if module.params["version"]: + cmd.extend(["-p", module.params["version"]]) + + rc, out, err = module.run_command(cmd) + if rc == "127": + module.fail_json(msg='Requires rhos-release installed. %s: %s' % (cmd, err)) + elif rc: + module.fail_json(msg='Error: %s: %s' % (cmd, err)) + summary = _parse_output(module, out) + module.exit_json(changed=True, **summary) + + +def main(): + """ Main """ + module = AnsibleModule( + argument_spec = dict( + state=dict(default="pinned", choices=['absent', 'pinned', 'rolling'], required=False), + release=dict(required=True), + dest=dict(default=None, required=False), + distro=dict(default=None, required=False), + repo_type=dict(default="puddle", choices=['puddle', 'poodle'], required=False), + version=dict(default=None, required=False) + ) + ) + state = module.params["state"] + release = module.params["release"] + + base_cmd = "rhos-release" + if state == "absent": + _remove_repos(module, base_cmd) + else: + _get_latest_repos(module, base_cmd, state, release) + +# import module snippets +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() From d09513532966582891f843629d4d0ddd1bccdfae Mon Sep 17 00:00:00 2001 From: Steve Linabery Date: Thu, 28 Jan 2016 11:24:50 -0600 Subject: [PATCH 018/137] Add full path for overcloud images build log This command was missing the full path, and this change makes it consistent with the rest of the plays in this playbook. Change-Id: I76b5a9f76abde30e7863eb389920b1021cb9ed94 --- playbooks/installer/rdo-manager/undercloud/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/installer/rdo-manager/undercloud/build-images.yml b/playbooks/installer/rdo-manager/undercloud/build-images.yml index 532de59b8..94c3fb7c6 100644 --- a/playbooks/installer/rdo-manager/undercloud/build-images.yml +++ b/playbooks/installer/rdo-manager/undercloud/build-images.yml @@ -35,7 +35,7 @@ when: installer.overcloud_images | default('build') == "build" - name: expose errors durring DIB build - shell: cat openstack-build-images.log | grep -v liberror | grep -v libgpg-error | grep -A 1 -B 1 error + shell: cat {{ instack_user_home }}/openstack-build-images.log | grep -v liberror | grep -v libgpg-error | grep -A 1 -B 1 error when: installer.overcloud_images | default('build') == "build" - name: list the files in overcloud_images From 85a29ad873802eef6fcd4e346f6356f8a1c04c4c Mon Sep 17 00:00:00 2001 From: Steve Linabery Date: Thu, 28 Jan 2016 12:44:07 -0600 Subject: [PATCH 019/137] Ignore rc of grep command when filtering overcloud image build log If there are no matches for the grep expressions in this play, then it will halt the execution of the playbook. This change ignores the exit status of grep. Change-Id: I64957075de1513e625b5336b742877cd814736e8 --- playbooks/installer/rdo-manager/undercloud/build-images.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/playbooks/installer/rdo-manager/undercloud/build-images.yml b/playbooks/installer/rdo-manager/undercloud/build-images.yml index 94c3fb7c6..87a5e0462 100644 --- a/playbooks/installer/rdo-manager/undercloud/build-images.yml +++ b/playbooks/installer/rdo-manager/undercloud/build-images.yml @@ -36,6 +36,7 @@ - name: expose errors durring DIB build shell: cat {{ instack_user_home }}/openstack-build-images.log | grep -v liberror | grep -v libgpg-error | grep -A 1 -B 1 error + ignore_errors: true when: installer.overcloud_images | default('build') == "build" - name: list the files in overcloud_images From e7b13c98ccaf69d83b4664d63b2a51df7cae4299 Mon Sep 17 00:00:00 2001 From: Steve Linabery Date: Thu, 28 Jan 2016 14:36:47 -0600 Subject: [PATCH 020/137] DIB_LOCAL_IMAGE was specified twice with two different settings Change-Id: I11aa7d231c9d5c21ebd3b1ade79578fe52f521e8 --- playbooks/installer/rdo-manager/templates/build-img-env.j2 | 1 - 1 file changed, 1 deletion(-) diff --git a/playbooks/installer/rdo-manager/templates/build-img-env.j2 b/playbooks/installer/rdo-manager/templates/build-img-env.j2 index 141de84bb..c13871d3d 100644 --- a/playbooks/installer/rdo-manager/templates/build-img-env.j2 +++ b/playbooks/installer/rdo-manager/templates/build-img-env.j2 @@ -2,7 +2,6 @@ export DIB_LOCAL_IMAGE={{ distro.images[distro.name][distro.full_version].guest_ {% if product.repo_type is defined and product.repo_type in ["poodle", "puddle"] %} export DIB_YUM_REPO_CONF="{{installer.dib_dir}}/rhos-release-{{product.repo.core_product_version}}-director.repo {{installer.dib_dir}}/rhos-release-{{product.repo.core_product_version}}.repo {{installer.dib_dir}}/rhos-release-rhel-{{distro.full_version}}.repo" -export DIB_LOCAL_IMAGE={{ distro.images[distro.name][ansible_distribution_version].guest_image_name }} export USE_DELOREAN_TRUNK=0 export NODE_DIST=rhel7 export RUN_RHOS_RELEASE=1 From 6b102b2602d52793cf7043079906175ff2a6b4a7 Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Fri, 29 Jan 2016 08:02:31 -0500 Subject: [PATCH 021/137] clean out the ospd-7 tempest skip file. we have a fairly clean run w/o anything in the skip file for ospd-7 The clean run had 3 compute, 3 controller, 3 ceph Change-Id: I39974d40d657d34cce8da9926c8529f70c977da7 --- .../rdoci-rhos-7-director-rdo-manager | 32 ------------------- 1 file changed, 32 deletions(-) diff --git a/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-7-director-rdo-manager b/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-7-director-rdo-manager index c4dc18db0..e69de29bb 100644 --- a/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-7-director-rdo-manager +++ b/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-7-director-rdo-manager @@ -1,32 +0,0 @@ -# rhbz1253709 --tempest.api.compute.certificates.test_certificates.CertificatesV2TestJSON.test_create_root_certificate --tempest.api.compute.certificates.test_certificates.CertificatesV2TestJSON.test_get_root_certificate -# rhbz1253765 --tempest.api.object_storage.test_container_staticweb.StaticWebTest.test_web_index --tempest.api.object_storage.test_container_staticweb.StaticWebTest --tempest.api.object_storage.test_object_slo.ObjectSloTest.test_delete_large_object --tempest.api.object_storage.test_object_slo.ObjectSloTest.test_list_large_object_metadata --tempest.api.object_storage.test_object_slo.ObjectSloTest.test_retrieve_large_object --tempest.api.object_storage.test_object_slo.ObjectSloTest.test_upload_manifest --tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container --tempest.api.orchestration.stacks.test_swift_resources.SwiftResourcesTestJSON.test_acl --tempest.api.orchestration.stacks.test_swift_resources.SwiftResourcesTestJSON.test_metadata -# rhbz1254938 --tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV1Test.test_volume_backup_create_get_detailed_list_restore_delete --tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV2Test.test_volume_backup_create_get_detailed_list_restore_delete --tempest.api.volume.test_volumes_snapshots.VolumesV1SnapshotTestJSON.test_volume_from_snapshot --tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_volume_from_snapshot -# rhbz1266947 --tempest.api.identity.admin.v3 -# rhbz1274308 --tempest.api.object_storage.test_container_services.ContainerTest.test_create_container --tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata -# rhbz1240816 --tempest.scenario.test_volume_boot_pattern -# rhbz1295556 --tempest.api.volume.test_volumes_get -# rhbz1295561 --tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image -# rhbz1295565 --tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools --tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools From 1ec2cd568a2e8360cae5be20683b2c631f8e0838 Mon Sep 17 00:00:00 2001 From: wes hayutin Date: Thu, 28 Jan 2016 20:33:04 +0000 Subject: [PATCH 022/137] add ha topology to rdo-manager 3 controller 3 compute 3 ceph Change-Id: Ia3af857f14c86c0fca4438c3406b5346f7600754 --- .../installer/rdo_manager/topology/ha.yml | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 settings/installer/rdo_manager/topology/ha.yml diff --git a/settings/installer/rdo_manager/topology/ha.yml b/settings/installer/rdo_manager/topology/ha.yml new file mode 100644 index 000000000..1693ee386 --- /dev/null +++ b/settings/installer/rdo_manager/topology/ha.yml @@ -0,0 +1,32 @@ +installer: + topology_name: minimal + network_restart: True + nodes: + node_count: 9 + controller: + remote_user: heat-admin + nova_list_type: controller + flavor: baremetal + scale: 3 + tester: + remote_user: root + compute: + type: Compute + nova_list_type: compute + flavor: baremetal + scale: 3 + blockstorage: + type: Cinder-Storage + nova_list_type: cinderstorage + flavor: baremetal + scale: 0 + swiftstorage: + type: Swift-Storage + nova_list_type: swiftstorage + flavor: baremetal + scale: 0 + cephstorage: + type: Ceph-Storage + nova_list_type: cephstorage + flavor: baremetal + scale: 3 From 09e17df1ee9e12d7e7c5297ff1916847bf7b90bf Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Thu, 28 Jan 2016 11:04:55 +0100 Subject: [PATCH 023/137] Use pip and virtualenv only if required Fix the horizon test runner to source the virtualenv if it is really required (some packages from pip). Change-Id: I62d08e10c501ecd99aae015e5432bf7a91570466 --- playbooks/tester/integration/horizon/run.yml | 4 +++- playbooks/tester/integration/pre.yml | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/playbooks/tester/integration/horizon/run.yml b/playbooks/tester/integration/horizon/run.yml index 614142b37..5cbcfb750 100644 --- a/playbooks/tester/integration/horizon/run.yml +++ b/playbooks/tester/integration/horizon/run.yml @@ -11,7 +11,9 @@ BROWSER_NAME: "{{ lookup('env', 'BROWSER_NAME') }}" BROWSER_VERSION: "{{ lookup('env', 'BROWSER_VERSION') }}" BROWSER_PLATFORM: "{{ lookup('env', 'BROWSER_PLATFORM') }}" - shell: source ~/{{ tester.venv_dir }}/bin/activate && nosetests -v -a "{{ tester.integration.tests_tag }}" --with-xunit --xunit-file=horizon.xml openstack_dashboard/test/integration_tests/tests chdir=~/{{ tester.dir }} + shell: | + [ -d ~/{{ tester.venv_dir }} ] && source ~/{{ tester.venv_dir }}/bin/activate + nosetests -v -a "{{ tester.integration.tests_tag }}" --with-xunit --xunit-file=horizon.xml openstack_dashboard/test/integration_tests/tests chdir=~/{{ tester.dir }} ignore_errors: True async: 21600 poll: 30 \ No newline at end of file diff --git a/playbooks/tester/integration/pre.yml b/playbooks/tester/integration/pre.yml index 9931ae110..9ca821d4c 100644 --- a/playbooks/tester/integration/pre.yml +++ b/playbooks/tester/integration/pre.yml @@ -20,6 +20,9 @@ - yum: name={{ item }} state=present with_items: tester.packages + - yum: name=python-virtualenv state=present + when: "tester.pip_packages is defined and tester.pip_packages|length > 0" + - name: Prepare repository with tests hosts: tester sudo: no @@ -40,6 +43,7 @@ - name: Install pip test requirements pip: name={{ item }} virtualenv=~/{{ tester.venv_dir }} virtualenv_site_packages=yes with_items: tester.pip_packages + when: "tester.pip_packages is defined and tester.pip_packages|length > 0" - name: Prepare the environment (users and tenant) hosts: controller From e5d186b3e6090e7fdbe91ba1bc5874bd265bb3fe Mon Sep 17 00:00:00 2001 From: Jon Schlueter Date: Thu, 28 Jan 2016 11:34:20 -0500 Subject: [PATCH 024/137] [rhos-release] Correct usage of rhos-release -x since 1.0 version of rhos-release the syntax for -x (delete) changed. "rhos-release -x" takes no parameters Change-Id: Id19195377a1f5730e4829b7a544141479b0c5a29 --- playbooks/installer/project/pre.yml | 2 +- roles/linux/rhel/rhos/tasks/main.yml | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/playbooks/installer/project/pre.yml b/playbooks/installer/project/pre.yml index 250704ca7..e37f3a6d4 100644 --- a/playbooks/installer/project/pre.yml +++ b/playbooks/installer/project/pre.yml @@ -47,7 +47,7 @@ changed_when: "shell_result == 0" - name: Create the RHOS poodle repository - shell: "rhos-release -x {{ product.version.major }}{{ installer_host_repo | default('')}}; rhos-release -d {{ product.version.major }}" + shell: "rhos-release -x; rhos-release -d {{ product.version.major }}" when: product.repo.type is defined and product.repo.type in ['poodle'] - name: Print installed repositores diff --git a/roles/linux/rhel/rhos/tasks/main.yml b/roles/linux/rhel/rhos/tasks/main.yml index 6275befc1..5edd69fee 100644 --- a/roles/linux/rhel/rhos/tasks/main.yml +++ b/roles/linux/rhel/rhos/tasks/main.yml @@ -55,7 +55,7 @@ - name: Enable RHSM shell: > - rhos-release -x {{ product.version.major }}{{ installer_host_repo | default('')}}; + rhos-release -x; rm -Rf /etc/yum.repos.d/rhos-release.repo; subscription-manager register --username {{ distro.rhel.subscription.username }} --password {{ distro.rhel.subscription.password }}; subscription-manager subscribe --pool {{ distro.rhel.subscription.pool }}; @@ -93,20 +93,20 @@ # new advanced repos - name: Create the RHOS Advanced repository - shell: "rhos-release -x {{ product.version.major }}; rhos-release {{ product.version.major }}a" + shell: "rhos-release -x" when: product.repo_type == 'advanced' # poodle repos - name: Create the RHOS poodle repository - shell: "rhos-release -x {{ product.version.major }}{{ installer_host_repo | default('')}}; rhos-release -d {{ product.version.major }}{{ installer_host_repo | default('')}}" + shell: "rhos-release -x; rhos-release -d {{ product.version.major }}{{ installer_host_repo | default('')}}" when: (product.repo_type in ['poodle'] and installer is defined and installer.name not in ['instack', 'rdo-manager']) - name: Create the OSP-Director poodle repository - shell: "rhos-release -x {{ product.full_version }}{{ installer_host_repo | default('')}}; rhos-release -d {{ product.full_version }}{{ installer_host_repo | default('')}}" + shell: "rhos-release -x; rhos-release -d {{ product.full_version }}{{ installer_host_repo | default('')}}" when: (product.repo_type in ['poodle'] and installer is defined and installer.name in ['instack', 'rdo-manager']) - name: Create the RHOS Advanced poodle repository - shell: "rhos-release -x {{ product.full_version }}; rhos-release -d {{ product.full_version }}a" + shell: "rhos-release -x; rhos-release -d {{ product.full_version }}a" when: product.repo_type == 'poodle_advanced' - name: Create the COPR repos required for component tests @@ -114,19 +114,19 @@ when: (test.type.name is defined and (test.type.name == 'unit-test' or test.type.name == 'pep8-test') and ansible_distribution_version|int >= 6) - name: Change poodle version for repos in rhos-release - shell: "rhos-release -x {{ product.version.major }}; rhos-release {{ product.version.major }} -d -p {{ product.repo.poodle_pin_version }}" + shell: "rhos-release -x; rhos-release {{ product.version.major }} -d -p {{ product.repo.poodle_pin_version }}" when: (product.repo.poodle_pin_version is defined and product.repo.poodle_pin_version != 'latest|GA' and product.repo_type == 'poodle' and installer_host_repo | default('') == '') - name: Change poodle version for repos in rhos-release for OFI installer host - shell: "rhos-release -x {{ product.version.major }}{{ installer_host_repo | default('')}}; rhos-release {{ product.version.major }}{{ installer_host_repo | default('')}} -d -p {{ product.repo.installer_poodle_pin_version }}" + shell: "rhos-release -x; rhos-release {{ product.version.major }}{{ installer_host_repo | default('')}} -d -p {{ product.repo.installer_poodle_pin_version }}" when: (product.repo.installer_poodle_pin_version is defined and product.repo.installer_poodle_pin_version != 'latest|GA' and product.repo_type == 'poodle' and installer is defined and installer.name == "foreman" and installer_host_repo | default('') != '') - name: Change poodle version for repos in rhos-release for GA -> Latest Poodle - shell: "rhos-release -x {{ product.version.major }}; rhos-release {{ product.version.major }} -p {{ product.repo.poodle_pin_version }}" + shell: "rhos-release -x; rhos-release {{ product.version.major }} -p {{ product.repo.poodle_pin_version }}" when: (product.repo.poodle_pin_version is defined and product.repo.poodle_pin_version == 'GA' and product.repo_type == 'poodle' and installer_host_repo | default('') == '') - name: Change poodle version for repos in rhos-release for OFI installer host and GA-> latest Poodle - shell: "rhos-release -x {{ product.version.major }}{{ installer_host_repo | default('')}}; rhos-release {{ product.version.major }}{{ installer_host_repo | default('')}} -d -p {{ product.repo.installer_poodle_pin_version }}" + shell: "rhos-release -x; rhos-release {{ product.version.major }}{{ installer_host_repo | default('')}} -d -p {{ product.repo.installer_poodle_pin_version }}" when: (product.repo.installer_poodle_pin_version is defined and product.repo.installer_poodle_pin_version == 'latest|GA' and product.repo_type == 'poodle' and installer is defined and installer.name == "foreman" and installer_host_repo | default('') != '') # copr repos From 776808130c176a2e69e098283cc3924471a76731 Mon Sep 17 00:00:00 2001 From: Mathieu Bultel Date: Tue, 26 Jan 2016 11:37:23 +0100 Subject: [PATCH 025/137] Add GA parameter for osp 7.2 Change-Id: I0ec51bb22d3f2e4999586bb3843299d9b5405d6a --- settings/installer/rdo_manager/images/import.yml | 1 + .../product/rhos/version/7_director/build/ga_72.yml | 13 +++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 settings/product/rhos/version/7_director/build/ga_72.yml diff --git a/settings/installer/rdo_manager/images/import.yml b/settings/installer/rdo_manager/images/import.yml index caf649e81..39e7a251b 100644 --- a/settings/installer/rdo_manager/images/import.yml +++ b/settings/installer/rdo_manager/images/import.yml @@ -21,6 +21,7 @@ installer: ga: '7.0': !lookup private.installer.images.rhos.7_director.GA.7.0 '7.1': !lookup private.installer.images.rhos.7_director.GA.7.1 + '7.2': !lookup private.installer.images.rhos.7_director.GA.7.2 '8-director': latest: '8.0': !lookup private.installer.images.rhos.8_director.latest.8.0 diff --git a/settings/product/rhos/version/7_director/build/ga_72.yml b/settings/product/rhos/version/7_director/build/ga_72.yml new file mode 100644 index 000000000..d5809464d --- /dev/null +++ b/settings/product/rhos/version/7_director/build/ga_72.yml @@ -0,0 +1,13 @@ +--- +product: + build: ga + build_version: ga-7.2 + repo: + puddle_pin_version: 'Z2' + poodle_pin_version: 'Z2' + core_product_version: 7 + puddle_director_pin_version: 'Y3' + +installer: + images: + version: '7.2' From ba67df189df51b06ea079829c004ca8955f7753d Mon Sep 17 00:00:00 2001 From: Adriano Petrich Date: Wed, 2 Dec 2015 10:36:36 +0000 Subject: [PATCH 026/137] Adding a cookbok on how to use the build-rpm Change-Id: I743210ac945fbcfdf5562bfe95680d1a1f57df77 --- doc/cookbook.rst | 118 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 118 insertions(+) diff --git a/doc/cookbook.rst b/doc/cookbook.rst index 992450500..adc3ce453 100644 --- a/doc/cookbook.rst +++ b/doc/cookbook.rst @@ -143,3 +143,121 @@ Cleanup After you finished your work, you can simply remove the created instances by:: ansible-playbook -vv --extra-vars @ksgen_settings.yml -i hosts playbooks/cleanup.yml + + +Building rpms +------------- +You can use khaleesi to build rpms for you. + +If you want to test manually a rpm with a patch from gerrit you can use the khaleesi infrastructure to do that. + +Setup Configuration: +```````````````````` +What you will need: + +Ansible 1.9 installed I would recomend on a virtualenv:: + + virtualenv foobar + source foobar/bin/activate + pip install ansible==1.9.4 + + +``rdopkg`` is what is going to do the heavy lifting + + https://github.com/redhat-openstack/rdopkg + +.. Note:: The yum version is a bit old so it is better to install from source + +Like this:: + + git clone https://github.com/redhat-openstack/rdopkg + cd rdopkg + python setup.py develop --user + +You will aslo need a ``rhpkg`` or a ``fedpkg`` those can be obtained from yum or dnf:: + + yum install -y rhpkg + +or:: + + yum install -y fedpkg + +In khaleesi will build the package locally (on a /tmp/tmp.patch_rpm_* directory) but in +order to do that it needs a file called ``hosts_local`` on your khaleesi folder + +The ``hosts_local`` should have this content:: + + [local] + localhost ansible_connection=local + +ksgen_settings needed +````````````````````` + +Once you've got that you need to setup what gerrit patch you want to test:: + + + export GERRIT_BRANCH= + export GERRIT_REFSPEC= + export EXECUTOR_NUMBER=0; #needed for now + + +Then you'll need to load this structure into your ``ksgen_settings.yml``:: + + patch: + upstream: + name: "upstream-" + url: "https://git.openstack.org/openstack/" + gerrit: + name: "gerrit-" + url: "" + branch: "{{ lookup('env', 'GERRIT_BRANCH') }}" + refspec: "{{ lookup('env', 'GERRIT_REFSPEC') }}" + dist_git: + name: "openstack-" + url: "" + use_director: False + +There's two ways to do that: + +Either set the values via extra-vars:: + + ksgen --config-dir settings \ + generate \ + --distro=rhel-7.1 \ + --product=rhos \ + --product-version=7.0 + --extra-vars patch.upstream.name=upstream- \ + --extra-vars patch.upstream.url=https://git.openstack.org/openstack/ \ + --extra-vars patch.gerrit.name=gerrit- \ + --extra-vars patch.gerrit.url= \ + --extra-vars patch.gerrit.branch=$GERRIT_BRANCH \ + --extra-vars patch.gerrit.refspec=$GERRIT_REFSPEC \ + --extra-vars patch.dist_git.name=openstack- \ + --extra-vars patch.dist_git.url= \ + --extra-vars @../khaleesi-settings/settings/product/rhos/private_settings/redhat_internal.yml \ + ksgen_settings.yml + +Or if khaleesi already has the settings for package you are trying to build on khaleesi/settings/rpm/.yml you can do this second method:: + + ksgen --config-dir settings \ + generate \ + --distro=rhel-7.1 \ + --product=rhos \ + --product-version=7.0 + --rpm= + --extra-vars @../khaleesi-settings/settings/product/rhos/private_settings/redhat_internal.yml \ + ksgen_settings.yml + +.. Note:: At this time this second method works only for instack-undercloud, ironic, tripleo-heat-templates and python-rdomanager-oscplugin + + +Playbook usage +`````````````` + +Then just call the playbook with that ksgen_settings:: + + ansible-playbook -vv --extra-vars @ksgen_settings.yml -i local_hosts playbooks/build_gate_rpm.yml + +When the playbook is done the generated rpms will be on the ``generated_rpms`` of your ``khaleesi`` directory + + From 1df012d2554efd521d62afd75245918803b9955f Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Wed, 13 Jan 2016 07:57:08 -0500 Subject: [PATCH 027/137] breakup the undercloud playbooks into logical steps The goal w/ this patch is to start breaking down playbooks into independent and idempotent steps as much as possible. The breakdown of the undercloud should follow the tripleo docs as much as possible. Note, environment-setup, undercloud etc, we're trying to reuse the same terms used in the docs. The one step that can not be idempotent is instack-virt-setup, until replaced by rdo a cleanup will have to be executed before instack-virt-setup. Installing the undercloud, and building/importing images should be idempotent and unique steps that can be executed after instack-virt-setup (or other provisioner) has run. In this review you will find that there is room for additional virt provisioners, so that instack-virt-setup does *not* have to be the sole way to stand up the virt nodes for the undercloud and overcloud. The scope of this change should mainly be around reorganizing the playbooks, removing uneeded code and updating playbooks to the best practices guidelines. Baremetal testing https://rhos-jenkins.rhev-ci-vms.eng.rdu2.redhat.com/view/POC/job/whayutin-bm-test-259645/ Depends-On: I58ca8d41b0b7d7b1e9b109f10e3dcfb09dc632a0@codeng Change-Id: I66e31fccc2c9742844061a6e2c9b0bd6210ac200 --- playbooks/gate.yml | 3 + playbooks/installer/rdo-manager/README.txt | 26 +- .../rdo-manager/environment-setup.yml | 2 + .../rdo-manager/environment-setup/README.txt | 7 + .../environment-setup/baremetal/main.yml | 3 + .../environment-setup/baremetal/run.yml | 122 +++++++ .../rdo-manager/environment-setup/gate.yml | 2 + .../rdo-manager/environment-setup/main.yml | 3 + .../environment-setup/virthost/gate.yml | 31 ++ .../virthost/instack-virt-setup/README.txt | 6 + .../virthost/instack-virt-setup/gate.yml | 25 ++ .../virthost/instack-virt-setup/main.yml | 3 + .../virthost/instack-virt-setup/post.yml | 7 + .../virthost/instack-virt-setup/run.yml | 203 ++++++++++++ .../environment-setup/virthost/main.yml | 12 + playbooks/installer/rdo-manager/gate.yml | 5 + playbooks/installer/rdo-manager/images.yml | 2 + .../installer/rdo-manager/images/README.txt | 5 + .../installer/rdo-manager/images/main.yml | 2 + .../build-images.yml => images/run.yml} | 0 ..._undercloud.yml => instack-virt-setup.yml} | 2 +- .../rdo-manager/install-undercloud.yml | 2 + playbooks/installer/rdo-manager/main.yml | 2 + .../rdo-manager/undercloud/README.txt | 3 + .../installer/rdo-manager/undercloud/gate.yml | 42 +++ .../installer/rdo-manager/undercloud/main.yml | 15 +- .../rdo-manager/undercloud/post-baremetal.yml | 45 +++ .../rdo-manager/undercloud/post-virthost.yml | 10 + .../installer/rdo-manager/undercloud/post.yml | 45 +++ .../rdo-manager/undercloud/pre-baremetal.yml | 270 +++++++--------- .../rdo-manager/undercloud/pre-virthost.yml | 303 +----------------- .../installer/rdo-manager/undercloud/pre.yml | 26 ++ .../rdo-manager/undercloud/repo-rdo.yml | 96 ------ .../installer/rdo-manager/undercloud/run.yml | 262 --------------- .../installer/rdo-manager/user/README.txt | 4 + playbooks/installer/rdo-manager/user/main.yml | 46 +++ .../rdo-manager/yum_repos/README.txt | 4 + .../rdo-manager/yum_repos/repo-rdo.yml | 65 ++++ .../{undercloud => yum_repos}/repo-rhos.yml | 11 - 39 files changed, 882 insertions(+), 840 deletions(-) create mode 100644 playbooks/gate.yml create mode 100644 playbooks/installer/rdo-manager/environment-setup.yml create mode 100644 playbooks/installer/rdo-manager/environment-setup/README.txt create mode 100644 playbooks/installer/rdo-manager/environment-setup/baremetal/main.yml create mode 100644 playbooks/installer/rdo-manager/environment-setup/baremetal/run.yml create mode 100644 playbooks/installer/rdo-manager/environment-setup/gate.yml create mode 100644 playbooks/installer/rdo-manager/environment-setup/main.yml create mode 100644 playbooks/installer/rdo-manager/environment-setup/virthost/gate.yml create mode 100644 playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/README.txt create mode 100644 playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/gate.yml create mode 100644 playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/main.yml create mode 100644 playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/post.yml create mode 100644 playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/run.yml create mode 100644 playbooks/installer/rdo-manager/environment-setup/virthost/main.yml create mode 100644 playbooks/installer/rdo-manager/gate.yml create mode 100644 playbooks/installer/rdo-manager/images.yml create mode 100644 playbooks/installer/rdo-manager/images/README.txt create mode 100644 playbooks/installer/rdo-manager/images/main.yml rename playbooks/installer/rdo-manager/{undercloud/build-images.yml => images/run.yml} (100%) rename playbooks/installer/rdo-manager/{install_undercloud.yml => instack-virt-setup.yml} (66%) create mode 100644 playbooks/installer/rdo-manager/install-undercloud.yml create mode 100644 playbooks/installer/rdo-manager/undercloud/README.txt create mode 100644 playbooks/installer/rdo-manager/undercloud/gate.yml create mode 100644 playbooks/installer/rdo-manager/undercloud/post-baremetal.yml create mode 100644 playbooks/installer/rdo-manager/undercloud/post-virthost.yml create mode 100644 playbooks/installer/rdo-manager/undercloud/post.yml create mode 100644 playbooks/installer/rdo-manager/undercloud/pre.yml delete mode 100644 playbooks/installer/rdo-manager/undercloud/repo-rdo.yml create mode 100644 playbooks/installer/rdo-manager/user/README.txt create mode 100644 playbooks/installer/rdo-manager/user/main.yml create mode 100644 playbooks/installer/rdo-manager/yum_repos/README.txt create mode 100644 playbooks/installer/rdo-manager/yum_repos/repo-rdo.yml rename playbooks/installer/rdo-manager/{undercloud => yum_repos}/repo-rhos.yml (93%) diff --git a/playbooks/gate.yml b/playbooks/gate.yml new file mode 100644 index 000000000..e9eec627e --- /dev/null +++ b/playbooks/gate.yml @@ -0,0 +1,3 @@ +--- +- include: provision.yml +- include: installer/{{ installer.type }}/gate.yml diff --git a/playbooks/installer/rdo-manager/README.txt b/playbooks/installer/rdo-manager/README.txt index 8c3c1df08..2d5011427 100644 --- a/playbooks/installer/rdo-manager/README.txt +++ b/playbooks/installer/rdo-manager/README.txt @@ -1,10 +1,24 @@ +See http://docs.openstack.org/developer/tripleo-docs/ for details about tripleo/rdo-manager See http://khaleesi.readthedocs.org/en/master/cookbook.html for a quickstart -To *only* cleanup a virthost: -ansible-playbook -vv --extra-vars @ksgen_settings.yml -i local_hosts playbooks/installer/rdo-manager/cleanup_virthost.yml +The ansible playbooks under rdo-manager should follow the install documentation as described in the tripleo documentation as +closely as possible. -To *only* install the undercloud: -ansible-playbook -vv --extra-vars @ksgen_settings.yml -i local_hosts playbooks/installer/rdo-manager/install_undercloud.yml +If you are interested in using instack virtual provisioning (instack-virt-setup) -To *only* deploy the overcloud -ansible-playbook -vv --extra-vars @ksgen_settings.yml -i hosts playbooks/installer/rdo-manager/overcloud/main.yml + To *only* cleanup a virthost: + ansible-playbook -vv --extra-vars @ksgen_settings.yml -i local_hosts playbooks/installer/rdo-manager/cleanup_virthost.yml + + To *only* use instack-virt-setup to provision virt undercloud and overcloud nodes + ansible-playbook -vv --extra-vars @ksgen_settings.yml -i local_hosts playbooks/installer/rdo-manager/instack-virt-setup.yml + +If you are using baremetal or using libvirt w/o instack-virt-setup + + To *only* prepare your environment: + ansible-playbook -vv --extra-vars @ksgen_settings.yml -i local_hosts playbooks/installer/rdo-manager/environment-setup.yml + + To *only* install the undercloud: + ansible-playbook -vv --extra-vars @ksgen_settings.yml -i local_hosts playbooks/installer/rdo-manager/install_undercloud.yml + + To *only* deploy the overcloud + ansible-playbook -vv --extra-vars @ksgen_settings.yml -i hosts playbooks/installer/rdo-manager/overcloud/main.yml diff --git a/playbooks/installer/rdo-manager/environment-setup.yml b/playbooks/installer/rdo-manager/environment-setup.yml new file mode 100644 index 000000000..2df6788ec --- /dev/null +++ b/playbooks/installer/rdo-manager/environment-setup.yml @@ -0,0 +1,2 @@ +--- +- include: environment-setup/main.yml diff --git a/playbooks/installer/rdo-manager/environment-setup/README.txt b/playbooks/installer/rdo-manager/environment-setup/README.txt new file mode 100644 index 000000000..49b4498c4 --- /dev/null +++ b/playbooks/installer/rdo-manager/environment-setup/README.txt @@ -0,0 +1,7 @@ +This directory environment-setup is the correct location for tools that setup the undercloud and overcloud nodes by outside scripts or instack-virt-setup. It is recommended if using khaleesi to provision the undercloud and overcloud nodes that the playbooks/provisioner directory is used to provision the nodes while any post provision steps move here. + +Current supported environment setup types.. +- baremetal +- virthost + +http://docs.openstack.org/developer/tripleo-docs/environments/environments.html diff --git a/playbooks/installer/rdo-manager/environment-setup/baremetal/main.yml b/playbooks/installer/rdo-manager/environment-setup/baremetal/main.yml new file mode 100644 index 000000000..a1bad33e3 --- /dev/null +++ b/playbooks/installer/rdo-manager/environment-setup/baremetal/main.yml @@ -0,0 +1,3 @@ +--- +- include: "{{base_dir}}/khaleesi/playbooks/installer/rdo-manager/user/main.yml host=undercloud" +- include: run.yml diff --git a/playbooks/installer/rdo-manager/environment-setup/baremetal/run.yml b/playbooks/installer/rdo-manager/environment-setup/baremetal/run.yml new file mode 100644 index 000000000..c3cd7a289 --- /dev/null +++ b/playbooks/installer/rdo-manager/environment-setup/baremetal/run.yml @@ -0,0 +1,122 @@ +--- +- name: Ensure baremetal host has no yum repos installed + hosts: undercloud + vars: + - ansible_ssh_user: root + tasks: + - name: clean release rpms + yum: name={{ item }} state=absent + with_items: + - rdo-release* + - epel-release + - rhos-release + + - name: remove any yum repos not owned by rpm + shell: rm -Rf /etc/yum.repos.d/{{ item }} + with_items: + - beaker-* + +#this include calls playbooks that setup the appropriate yum repos on the undercloud +- include: "{{base_dir}}/khaleesi/playbooks/installer/rdo-manager/yum_repos/repo-{{ product.name }}.yml repo_host=undercloud" + +- name: Update packages on the host + hosts: undercloud + vars: + - ansible_ssh_user: root + tasks: + - name: repolist + command: yum -d 7 repolist + + - name: update all packages + yum: name=* state=latest + +- name: Enable ip forwarding + hosts: undercloud + vars: + - ansible_ssh_user: root + tasks: + - name: enabling ip forwarding + sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes reload=yes + when: hw_env.ip_forwarding is defined and hw_env.ip_forwarding == 'true' + +- name: Configure the baremetal undercloud + hosts: undercloud + tasks: + - name: check if instackenv.json exists in root + sudo_user: root + sudo: yes + stat: path="/root/instackenv.json" + register: instackenv_json_root + + - name: copy instackenv.json from root if it exists there + sudo_user: root + sudo: yes + shell: cp /root/instackenv.json {{ instack_user_home }}/instackenv.json + when: instackenv_json_root.stat.exists == True + + - name: get instackenv.json + synchronize: src={{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/instackenv.json dest={{ instack_user_home }}/instackenv.json + when: instackenv_json_root.stat.exists == False + + - name: chown instackenv.json + sudo_user: root + sudo: yes + file: path={{ instack_user_home }}/instackenv.json owner=stack group=stack + + - name: install ipmitool + sudo_user: root + sudo: yes + yum: name={{ item }} state=latest + with_items: + - OpenIPMI + - OpenIPMI-tools + + - name: install sshpass - DRACS + sudo_user: root + sudo: yes + yum: name=sshpass state=latest + when: hw_env.remote_mgmt == "dracs" + + - name: start IMPI service + shell: > + sudo chkconfig ipmi on; + sudo service ipmi start + + - name: get tools to validate instackenv.json/nodes.json + git: > + repo="https://github.com/rthallisey/clapper.git" + dest="{{instack_user_home}}/clapper" + + - name: validate instackenv.json + shell: > + chdir={{instack_user_home}} + python clapper/instackenv-validator.py -f {{ instack_user_home }}/instackenv.json + register: instackenv_validator_output + + - name: fail if instackenv.json fails validation + fail: msg="instackenv.json didn't validate." + when: instackenv_validator_output.stdout.find("SUCCESS") == -1 + + - name: get number of overcloud nodes + shell: > + export IP_LENGTH=(`cat {{ instack_user_home }}/instackenv.json | grep -o 'pm_addr.*' | cut -f2- -d':' | wc -l`); + echo $(($IP_LENGTH)) + register: node_length + + - name: power off node boxes - IPMI + shell: > + export IP=(`cat {{ instack_user_home }}/instackenv.json | grep -o 'pm_addr.*' | cut -f2- -d':' | sed 's/[},\"]//g'`); + export USER=(`cat {{ instack_user_home }}/instackenv.json | grep -o 'pm_user.*' | cut -f2- -d':' |rev | cut -c 2- | rev | sed 's/[},\"]//g'`); + export PASSWORD=(`cat {{ instack_user_home }}/instackenv.json | grep -o 'pm_password.*' | cut -f2- -d':' |rev | cut -c 2- | rev | sed 's/[},\"]//g'`); + ipmitool -I lanplus -H ${IP[item]} -U ${USER[item]} -P ${PASSWORD[item]} power off + with_sequence: count="{{node_length.stdout}}" + when: hw_env.remote_mgmt == "ipmi" + + - name: power off node boxes - DRACS + shell: > + export IP=(`cat {{ instack_user_home }}/instackenv.json | grep -o 'pm_addr.*' | cut -f2- -d':' | sed 's/[},\"]//g'`); + export USER=(`cat {{ instack_user_home }}/instackenv.json | grep -o 'pm_user.*' | cut -f2- -d':' |rev | cut -c 2- | rev | sed 's/[},\"]//g'`); + export PASSWORD=(`cat {{ instack_user_home }}/instackenv.json | grep -o 'pm_password.*' | cut -f2- -d':' |rev | cut -c 2- | rev | sed 's/[},\"]//g'`); + sshpass -p ${PASSWORD[item]} ssh -o "StrictHostKeyChecking=no" ${USER[item]}@${IP[item]} "racadm serveraction powerdown" + with_sequence: count="{{node_length.stdout}}" + when: hw_env.remote_mgmt == "dracs" diff --git a/playbooks/installer/rdo-manager/environment-setup/gate.yml b/playbooks/installer/rdo-manager/environment-setup/gate.yml new file mode 100644 index 000000000..a52d23210 --- /dev/null +++ b/playbooks/installer/rdo-manager/environment-setup/gate.yml @@ -0,0 +1,2 @@ +--- +- include: virthost/gate.yml diff --git a/playbooks/installer/rdo-manager/environment-setup/main.yml b/playbooks/installer/rdo-manager/environment-setup/main.yml new file mode 100644 index 000000000..aacd60670 --- /dev/null +++ b/playbooks/installer/rdo-manager/environment-setup/main.yml @@ -0,0 +1,3 @@ +--- +- include: "{{ installer.env.type }}/main.yml" +- include: "{{base_dir}}/khaleesi/playbooks/installer/rdo-manager/yum_repos/repo-{{ product.name }}.yml repo_host=undercloud" diff --git a/playbooks/installer/rdo-manager/environment-setup/virthost/gate.yml b/playbooks/installer/rdo-manager/environment-setup/virthost/gate.yml new file mode 100644 index 000000000..240be499e --- /dev/null +++ b/playbooks/installer/rdo-manager/environment-setup/virthost/gate.yml @@ -0,0 +1,31 @@ +--- +- name: clean up rdo-manager virthost + hosts: virthost + vars: + - ansible_ssh_user: root + roles: + - { role: cleanup_nodes/rdo-manager, + when: (installer.type == "rdo-manager" and provisioner.type == "manual") + } + +- include: "{{base_dir}}/khaleesi/playbooks/installer/rdo-manager/user/main.yml host=virthost" +- include: "{{base_dir}}/khaleesi/playbooks/installer/rdo-manager/yum_repos/repo-{{ product.name }}.yml repo_host=virthost" +- include: instack-virt-setup/gate.yml + +- name: setup the gating repo on the undercloud + hosts: virthost + tasks: + - name: set the permissions on the rpms + sudo: yes + file: path={{ generated_rpms_dir }} + recurse=yes + owner={{ provisioner.remote_user }} + group={{ provisioner.remote_user }} + mode=0755 + + - name: copy gating_repo package + shell: > + scp -F ssh.config.ansible {{ generated_rpms_dir }}/*.rpm undercloud-from-virthost:{{ instack_user_home }}/ + when: gating_repo is defined + +- include: "{{base_dir}}/khaleesi/playbooks/installer/rdo-manager/yum_repos/repo-{{ product.name }}.yml repo_host=undercloud" diff --git a/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/README.txt b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/README.txt new file mode 100644 index 000000000..e4225b712 --- /dev/null +++ b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/README.txt @@ -0,0 +1,6 @@ +This playbook follows the documentation from tripleo as closely as possible + +This is one of many ways to prepare your undercloud environment and nodes for the overcloud. + +You can find the related documentation here: +http://docs.openstack.org/developer/tripleo-docs/environments/environments.html#preparing-the-virtual-environment-automated diff --git a/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/gate.yml b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/gate.yml new file mode 100644 index 000000000..4f98c55f0 --- /dev/null +++ b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/gate.yml @@ -0,0 +1,25 @@ +- name: Copy the gating package + hosts: virthost + vars: + - ansible_ssh_user: root + tasks: + - name: make temp directory + command: mktemp -d + register: temp_dir + + - name: set fact generated_rpms_dir + set_fact: generated_rpms_dir={{ temp_dir.stdout }} + + - name: copy downstream rpm package + copy: src={{ item }} dest={{ generated_rpms_dir }} + with_fileglob: + - "{{ lookup('env', 'PWD') }}/generated_rpms/*.rpm" + when: gating_repo is defined + + - name: install the generated rpm + sudo: yes + shell: "yum localinstall -y {{ generated_rpms_dir }}/*.rpm" + when: gating_repo is defined + +- include: run.yml +- include: post.yml diff --git a/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/main.yml b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/main.yml new file mode 100644 index 000000000..f7b272cd0 --- /dev/null +++ b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/main.yml @@ -0,0 +1,3 @@ +- include: "{{base_dir}}/khaleesi/playbooks/installer/rdo-manager/yum_repos/repo-{{ product.name }}.yml repo_host=virthost" +- include: run.yml +- include: post.yml diff --git a/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/post.yml b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/post.yml new file mode 100644 index 000000000..c7bf264b1 --- /dev/null +++ b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/post.yml @@ -0,0 +1,7 @@ +--- +- name: copy the guest image to the undercloud + hosts: virthost + tasks: + - name: upload the guest-image on the undercloud + command: scp -F ssh.config.ansible {{instack_user_home}}/{{ distro.images[distro.name][distro.full_version].guest_image_name }} undercloud-from-virthost:{{ instack_user_home }}/ + diff --git a/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/run.yml b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/run.yml new file mode 100644 index 000000000..073c88012 --- /dev/null +++ b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/run.yml @@ -0,0 +1,203 @@ +--- +- name: setup the virt host + hosts: virthost + tasks: + - name: set fact stack user home + set_fact: instack_user_home=/home/{{ provisioner.remote_user }} + + - name: get the guest-image + sudo: yes + get_url: > + url="{{ distro.images[distro.name][distro.full_version].remote_file_server }}{{ distro.images[distro.name][distro.full_version].guest_image_name }}" + dest=/root/{{ distro.images[distro.name][distro.full_version].guest_image_name }} + + - name: copy the guest-image in stack user home + sudo: yes + command: cp /root/{{ distro.images[distro.name][distro.full_version].guest_image_name }} {{instack_user_home}}/{{ distro.images[distro.name][distro.full_version].guest_image_name }} + + - name: set the right permissions for the guest-image + sudo: yes + file: > + path={{instack_user_home}}/{{ distro.images[distro.name][distro.full_version].guest_image_name }} + owner={{ provisioner.remote_user }} + group={{ provisioner.remote_user }} + + - name: install yum-plugin-priorities for rdo-manager + sudo: yes + yum: name={{item}} state=present + with_items: + - yum-plugin-priorities + when: product.name == "rdo" + + - name: install rdo-manager-deps + sudo: yes + yum: name={{item}} state=present + with_items: + - python-tripleoclient + when: product.name == "rdo" or product.full_version == "8-director" + + - name: install python-rdomanager-oscplugin + sudo: yes + yum: name=python-rdomanager-oscplugin state=present + + - name: setup environment vars + template: src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/virt-setup-env.j2 dest=~/virt-setup-env mode=0755 + + - name: Contents of virt-setup-env + shell: > + cat {{ instack_user_home }}/virt-setup-env + + - name: Patch instack-virt-setup to ensure dhcp.leases is not used to determine ip (workaround https://review.openstack.org/#/c/232584) + sudo: yes + lineinfile: + dest: /usr/bin/instack-virt-setup + regexp: "/var/lib/libvirt/dnsmasq/default.leases" + line: " IP=$(ip n | grep $(tripleo get-vm-mac $UNDERCLOUD_VM_NAME) | awk '{print $1;}')" + when: workarounds.enabled is defined and workarounds.enabled|bool + + - name: run instack-virt-setup + shell: > + source {{ instack_user_home }}/virt-setup-env; + instack-virt-setup > {{ instack_user_home }}/instack-virt-setup.log; + register: instack_virt_setup_result + ignore_errors: yes + + - name: destroy default pool + sudo: yes + command: virsh pool-destroy default + ignore_errors: true + when: "instack_virt_setup_result.rc !=0" + + - name: update libvirtd unix_sock_group + sudo: yes + lineinfile: + dest: /etc/libvirt/libvirtd.conf + regexp: ^unix_sock_group + line: 'unix_sock_group = "{{ provisioner.remote_user }}"' + when: "instack_virt_setup_result.rc !=0" + + - name: remove libvirt qemu capabilities cache + sudo: yes + command: rm -Rf /var/cache/libvirt/qemu/capabilities/ + when: "instack_virt_setup_result.rc != 0" + # more workaround for the SATA error RHBZ#1195882 + + - name: restart libvirtd + sudo: yes + service: name=libvirtd state=restarted + when: "instack_virt_setup_result.rc != 0" + + - name: inspect virsh capabilities + command: 'virsh capabilities' + when: "instack_virt_setup_result.rc != 0" + + - name: stop virbr0 + sudo: yes + command: ip link set virbr0 down + ignore_errors: true + when: "instack_virt_setup_result.rc != 0" + + - name: delete libvirt bridge virbr0 + sudo: yes + command: brctl delbr virbr0 + ignore_errors: true + when: "instack_virt_setup_result.rc != 0" + + - name: start default libvirt network + sudo: yes + command: virsh net-start default + ignore_errors: true + when: "instack_virt_setup_result.rc != 0" + + - name: retry run instack-virt-setup + shell: > + virsh undefine instack; + source {{ instack_user_home }}/virt-setup-env; + instack-virt-setup > {{ instack_user_home }}/instack-virt-setup-retry.log; + when: "instack_virt_setup_result.rc !=0" + + - name: print out all the VMs + shell: > + sudo virsh list --all + + - name: get undercloud vm ip address + shell: > + export PATH='/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/stack/bin'; + ip n | grep $(tripleo get-vm-mac instack) | awk '{print $1;}' + when: undercloud_ip is not defined + register: undercloud_vm_ip_result + + - name: set_fact for undercloud ip + set_fact: undercloud_ip={{ undercloud_vm_ip_result.stdout }} + +- name: set the undercloud ip as a fact + hosts: localhost + tasks: + - name: set_fact for undercloud ip + set_fact: undercloud_ip={{ hostvars['host0'].undercloud_ip }} + + - name: debug undercloud_ip + debug: var=hostvars['localhost'].undercloud_ip + +- name: add the host to the ansible inventory and setup ssh keys + hosts: virthost + tasks: + - name: wait until ssh is available on undercloud node + wait_for: + host={{ hostvars['localhost'].undercloud_ip }} + state=started + port=22 + delay=15 + timeout=300 + + - name: add undercloud host + add_host: + name=undercloud + groups=undercloud + ansible_ssh_host=undercloud + ansible_fqdn=undercloud + ansible_ssh_user="{{ provisioner.remote_user }}" + ansible_ssh_private_key_file="{{ provisioner.key_file }}" + gating_repo="{{ gating_repo is defined and gating_repo }}" + + - name: setup ssh config + template: src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/ssh_config.j2 dest=~/ssh.config.ansible mode=0755 + + - name: copy ssh_config back to the slave + fetch: src=~/ssh.config.ansible dest="{{ base_dir }}/khaleesi/ssh.config.ansible" flat=yes + + - name: copy id_rsa key back to the slave + fetch: src=~/.ssh/id_rsa dest="{{ base_dir }}/khaleesi/id_rsa_virt_host" flat=yes + + - name: copy undercloud root user authorized_keys to stack user + shell: 'ssh -F ssh.config.ansible undercloud-from-virthost "cp /root/.ssh/authorized_keys /home/stack/.ssh/"' + + - name: chown authorized_keys for stack user + shell: 'ssh -F ssh.config.ansible undercloud-from-virthost "chown stack:stack /home/stack/.ssh/authorized_keys"' + + +- name: regenerate the inventory file after adding hosts + hosts: localhost + tasks: + - name: create inventory from template + template: + dest: "{{ lookup('env', 'PWD') }}/{{ tmp.node_prefix }}hosts" + src: "{{ base_dir }}/khaleesi/playbooks/provisioner/templates/inventory.j2" + + - name: symlink inventory to a static name + file: + dest: "{{ lookup('env', 'PWD') }}/hosts" + state: link + src: "{{ lookup('env', 'PWD') }}/{{ tmp.node_prefix }}hosts" + +- name: test host connection + hosts: all:!localhost + tasks: + - name: test ssh + command: hostname + + - name: check distro + command: cat /etc/redhat-release + + - name: set fact stack user home + set_fact: instack_user_home=/home/{{ provisioner.remote_user }} diff --git a/playbooks/installer/rdo-manager/environment-setup/virthost/main.yml b/playbooks/installer/rdo-manager/environment-setup/virthost/main.yml new file mode 100644 index 000000000..21cd51e6d --- /dev/null +++ b/playbooks/installer/rdo-manager/environment-setup/virthost/main.yml @@ -0,0 +1,12 @@ +--- +- name: clean up rdo-manager virthost + hosts: virthost + vars: + - ansible_ssh_user: root + roles: + - { role: cleanup_nodes/rdo-manager, + when: (installer.type == "rdo-manager" and provisioner.type == "manual") + } + +- include: "{{base_dir}}/khaleesi/playbooks/installer/rdo-manager/user/main.yml host=virthost" +- include: instack-virt-setup/main.yml diff --git a/playbooks/installer/rdo-manager/gate.yml b/playbooks/installer/rdo-manager/gate.yml new file mode 100644 index 000000000..3d554bf2f --- /dev/null +++ b/playbooks/installer/rdo-manager/gate.yml @@ -0,0 +1,5 @@ +--- +- include: environment-setup/gate.yml +- include: undercloud/gate.yml +- include: images/main.yml +- include: overcloud/main.yml diff --git a/playbooks/installer/rdo-manager/images.yml b/playbooks/installer/rdo-manager/images.yml new file mode 100644 index 000000000..90c447bb0 --- /dev/null +++ b/playbooks/installer/rdo-manager/images.yml @@ -0,0 +1,2 @@ +--- +- include: "{{base_dir}}/khaleesi/playbooks/installer/rdo-manager/images/main.yml" diff --git a/playbooks/installer/rdo-manager/images/README.txt b/playbooks/installer/rdo-manager/images/README.txt new file mode 100644 index 000000000..0e27685ef --- /dev/null +++ b/playbooks/installer/rdo-manager/images/README.txt @@ -0,0 +1,5 @@ +This playbook follows the documentation from tripleo as closely as possible + +http://docs.openstack.org/developer/tripleo-docs/basic_deployment/basic_deployment_cli.html#get-images +http://docs.openstack.org/developer/tripleo-docs/basic_deployment/basic_deployment_cli.html#upload-images + diff --git a/playbooks/installer/rdo-manager/images/main.yml b/playbooks/installer/rdo-manager/images/main.yml new file mode 100644 index 000000000..f01787f39 --- /dev/null +++ b/playbooks/installer/rdo-manager/images/main.yml @@ -0,0 +1,2 @@ +--- +- include: run.yml diff --git a/playbooks/installer/rdo-manager/undercloud/build-images.yml b/playbooks/installer/rdo-manager/images/run.yml similarity index 100% rename from playbooks/installer/rdo-manager/undercloud/build-images.yml rename to playbooks/installer/rdo-manager/images/run.yml diff --git a/playbooks/installer/rdo-manager/install_undercloud.yml b/playbooks/installer/rdo-manager/instack-virt-setup.yml similarity index 66% rename from playbooks/installer/rdo-manager/install_undercloud.yml rename to playbooks/installer/rdo-manager/instack-virt-setup.yml index dc9a01940..0fa5ad383 100644 --- a/playbooks/installer/rdo-manager/install_undercloud.yml +++ b/playbooks/installer/rdo-manager/instack-virt-setup.yml @@ -1,3 +1,3 @@ --- - include: "{{base_dir}}/khaleesi/playbooks/provisioner/manual/main.yml" -- include: undercloud/main.yml +- include: environment-setup/main.yml diff --git a/playbooks/installer/rdo-manager/install-undercloud.yml b/playbooks/installer/rdo-manager/install-undercloud.yml new file mode 100644 index 000000000..d80df2b84 --- /dev/null +++ b/playbooks/installer/rdo-manager/install-undercloud.yml @@ -0,0 +1,2 @@ +--- +- include: undercloud/main.yml diff --git a/playbooks/installer/rdo-manager/main.yml b/playbooks/installer/rdo-manager/main.yml index 65d07dfbd..5d609744f 100644 --- a/playbooks/installer/rdo-manager/main.yml +++ b/playbooks/installer/rdo-manager/main.yml @@ -1,3 +1,5 @@ --- +- include: environment-setup/main.yml - include: undercloud/main.yml +- include: images/main.yml - include: overcloud/main.yml diff --git a/playbooks/installer/rdo-manager/undercloud/README.txt b/playbooks/installer/rdo-manager/undercloud/README.txt new file mode 100644 index 000000000..cf27d1638 --- /dev/null +++ b/playbooks/installer/rdo-manager/undercloud/README.txt @@ -0,0 +1,3 @@ +This playbook follows the documentation from tripleo as closely as possible + +http://docs.openstack.org/developer/tripleo-docs/installation/installation.html#installing-the-undercloud diff --git a/playbooks/installer/rdo-manager/undercloud/gate.yml b/playbooks/installer/rdo-manager/undercloud/gate.yml new file mode 100644 index 000000000..577333e86 --- /dev/null +++ b/playbooks/installer/rdo-manager/undercloud/gate.yml @@ -0,0 +1,42 @@ +--- +- name: Group all hosts in gate if we are gating using delorean + hosts: all + tasks: + - group_by: key=gate-delorean + when: use_delorean is defined and use_delorean + +- name: Run Delorean + hosts: virthost:&gate-delorean + roles: + - delorean + +- name: Create local repo for delorean rpms + hosts: undercloud:&gate-delorean + roles: + - delorean_rpms + +- name: Group all hosts in gate if we are gating + hosts: all + tasks: + - group_by: key=gate-install-rpm + when: gating_repo is defined + +- name: Install the custom rpm when gating + hosts: undercloud:&gate-install-rpm + sudo: yes + tasks: + - name: install the gating_repo rpm we previously built + shell: yum -y install /home/stack/*.rpm + +- name: Update all packages + hosts: undercloud:&gate-delorean + tasks: + - yum: name=* state=latest + sudo: yes + +- include: pre.yml +- include: "pre-{{ installer.env.type }}.yml" +- include: run.yml +- include: "post-{{ installer.env.type }}.yml" +- include: post.yml + diff --git a/playbooks/installer/rdo-manager/undercloud/main.yml b/playbooks/installer/rdo-manager/undercloud/main.yml index 285b27561..63cca9ce3 100644 --- a/playbooks/installer/rdo-manager/undercloud/main.yml +++ b/playbooks/installer/rdo-manager/undercloud/main.yml @@ -1,14 +1,7 @@ --- -- name: clean up rdo-manager virthost - hosts: virthost - vars: - - ansible_ssh_user: root - roles: - - { role: cleanup_nodes/rdo-manager, - when: (installer.type == "rdo-manager" and provisioner.type == "manual") - } - -- include: pre-{{ installer.env.type }}.yml +- include: pre.yml +- include: "pre-{{ installer.env.type }}.yml" - include: run.yml -- include: build-images.yml +- include: "post-{{ installer.env.type }}.yml" +- include: post.yml diff --git a/playbooks/installer/rdo-manager/undercloud/post-baremetal.yml b/playbooks/installer/rdo-manager/undercloud/post-baremetal.yml new file mode 100644 index 000000000..c44e36d4d --- /dev/null +++ b/playbooks/installer/rdo-manager/undercloud/post-baremetal.yml @@ -0,0 +1,45 @@ +--- +- name: Execute vendor-specific setup for baremetal environment + hosts: undercloud:&baremetal + tasks: + - name: copy vendor-specific setup file + synchronize: > + src={{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/vendor_specific_setup dest={{ instack_user_home }}/vendor_specific_setup + delegate_to: local + when: hw_env.env_type != 'ovb_host_cloud' + + - name: copy over vendor-specific setup file (quintupleo_host_cloud) + local_action: command rsync --delay-updates -F --compress --archive --rsh "ssh -i {{ provisioner.key_file }} -F {{base_dir}}/khaleesi/ssh.config.ansible -S none -o StrictHostKeyChecking=no" {{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/vendor_specific_setup undercloud:{{ instack_user_home }}/vendor_specific_setup + when: hw_env.env_type == 'ovb_host_cloud' + + - name: execute vendor-specific setup + shell: > + chmod 755 {{ instack_user_home }}/vendor_specific_setup; + {{ instack_user_home }}/vendor_specific_setup + +- name: Set ironic to control the power state + hosts: undercloud:&baremetal + tasks: + - name: get power state from /etc/ironic/ironic.conf (workaround for bz 1246641) + sudo: yes + shell: > + sudo cat /etc/ironic/ironic.conf | grep 'force_power_state_during_sync=False' + when: workarounds.enabled is defined and workarounds.enabled|bool + + - name: allow ironic to control the power state (workaround for bz 1246641) + sudo: yes + shell: > + sed -i 's/force_power_state_during_sync=False/force_power_state_during_sync=True/g' /etc/ironic/ironic.conf + when: workarounds.enabled is defined and workarounds.enabled|bool + + - name: get power state from /etc/ironic/ironic.conf (workaround for bz 1246641) + sudo: yes + shell: > + sudo cat /etc/ironic/ironic.conf | grep 'force_power_state_during_sync=True' + when: workarounds.enabled is defined and workarounds.enabled|bool + + - name: restart openstack-ironic-conductor (workaround for bz 1246641) + sudo: yes + shell: > + systemctl restart openstack-ironic-conductor + when: workarounds.enabled is defined and workarounds.enabled|bool diff --git a/playbooks/installer/rdo-manager/undercloud/post-virthost.yml b/playbooks/installer/rdo-manager/undercloud/post-virthost.yml new file mode 100644 index 000000000..a264bf1fb --- /dev/null +++ b/playbooks/installer/rdo-manager/undercloud/post-virthost.yml @@ -0,0 +1,10 @@ +--- +- name: setup networking on virt for network isolation + hosts: undercloud:&virthost + tasks: + - name: net-iso virt setup vlans + shell: > + source {{ instack_user_home }}/stackrc; + sudo ovs-vsctl add-port br-ctlplane vlan10 tag=10 -- set interface vlan10 type=internal; + sudo ip l set dev vlan10 up; sudo ip addr add 172.16.23.251/24 dev vlan10; + when: installer.network.isolation == 'single_nic_vlans' diff --git a/playbooks/installer/rdo-manager/undercloud/post.yml b/playbooks/installer/rdo-manager/undercloud/post.yml new file mode 100644 index 000000000..9df7a6dac --- /dev/null +++ b/playbooks/installer/rdo-manager/undercloud/post.yml @@ -0,0 +1,45 @@ +--- +- name: undercloud post install workarounds + hosts: undercloud + tasks: + - name: disable haproxy check (workaround bug bz 1246525) + sudo: yes + replace: dest=/etc/haproxy/haproxy.cfg regexp='(listen ironic\n.*\n.*)\n.*option httpchk GET \/' replace='\1' + when: workarounds.enabled is defined and workarounds.enabled|bool and ha_config_file.stat.exists + + - name: restart haproxy service (workaround bug bz 1246525) + sudo: yes + command: systemctl restart haproxy + when: workarounds.enabled is defined and workarounds.enabled|bool and ha_config_file.stat.exists + + - name: increase stack_action_timeout to 4 hours (workaround for bz 1243365) + sudo: yes + command: openstack-config --set /etc/heat/heat.conf DEFAULT stack_action_timeout 14400 + when: workarounds.enabled is defined and workarounds.enabled|bool + + - name: restart openstack-heat-engine (workaround for bz 1243365) + sudo: yes + command: systemctl restart openstack-heat-engine + when: workarounds.enabled is defined and workarounds.enabled|bool + + - name: check if haproxy is present (workaround bug bz 1246525) + stat: path=/etc/haproxy/haproxy.cfg + register: ha_config_file + + - name: disable haproxy check (workaround bug bz 1246525) + sudo: yes + replace: dest=/etc/haproxy/haproxy.cfg regexp='(listen ironic\n.*\n.*)\n.*option httpchk GET \/' replace='\1' + when: workarounds.enabled is defined and workarounds.enabled|bool and ha_config_file.stat.exists + + - name: restart haproxy service (workaround bug bz 1246525) + sudo: yes + command: systemctl restart haproxy + when: workarounds.enabled is defined and workarounds.enabled|bool and ha_config_file.stat.exists + +- name: update neutron values for undercloud + hosts: undercloud + tasks: + - name: update neutron quota to unlimited + shell: > + source {{ instack_user_home }}/stackrc; + neutron quota-update --port -1; diff --git a/playbooks/installer/rdo-manager/undercloud/pre-baremetal.yml b/playbooks/installer/rdo-manager/undercloud/pre-baremetal.yml index 1465740b8..5ab77e744 100644 --- a/playbooks/installer/rdo-manager/undercloud/pre-baremetal.yml +++ b/playbooks/installer/rdo-manager/undercloud/pre-baremetal.yml @@ -1,159 +1,115 @@ --- -- name: Update packages on the host - hosts: undercloud - vars: - - ansible_ssh_user: root +- name: Customize the answer file for baremetal deployment + hosts: undercloud:&baremetal tasks: - - name: repolist - command: yum -d 7 repolist - - - name: update all packages - yum: name=* state=latest - -- name: Create the stack user on the undercloud - hosts: undercloud - vars: - - ansible_ssh_user: root - tasks: - - name: delete user (workaround for BZ 1284717) - user: name="{{ provisioner.remote_user }}" state=absent remove=yes force=yes - tags: workaround - - - name: inspect user removal - shell: > - ls /home | grep "{{ provisioner.remote_user }}"; - cat /etc/passwd | grep "{{ provisioner.remote_user }}"; - cat /etc/group | grep "{{ provisioner.remote_user }}"; - cat /etc/shadow | grep "{{ provisioner.remote_user }}"; - ls /var/spool/mail | grep "{{ provisioner.remote_user }}"; - register: result - failed_when: result.rc != 1 - - - name: create user - user: name="{{ provisioner.remote_user }}" state=present password=stack - - - name: copy the .bash_profile file - command: cp /root/.bash_profile /home/{{ provisioner.remote_user }}/ - - - name: create .ssh dir - file: path=/home/{{ provisioner.remote_user }}/.ssh mode=0700 owner=stack group=stack state=directory - - - name: copy the authorized_keys file - command: cp /root/.ssh/authorized_keys /home/{{ provisioner.remote_user }}/.ssh/ - - - name: set file permissions on authorized_hosts - file: path=/home/{{ provisioner.remote_user }}/.ssh/authorized_keys mode=0600 owner=stack group=stack - - - name: copy ssh keys - command: cp /root/.ssh/id_rsa /home/{{ provisioner.remote_user }}/.ssh/ - when: hw_env.env_type == 'ovb_host_cloud' - - - name: copy ssh pub keys - command: cp /root/.ssh/id_rsa.pub /home/{{ provisioner.remote_user }}/.ssh/ - when: hw_env.env_type == 'ovb_host_cloud' - - - name: set permission on keys - file: path=/home/{{ provisioner.remote_user }}/.ssh/id_rsa mode=0600 owner=stack group=stack - when: hw_env.env_type == 'ovb_host_cloud' - - - name: set permission on pub keys - file: path=/home/{{ provisioner.remote_user }}/.ssh/id_rsa.pub mode=0644 owner=stack group=stack - when: hw_env.env_type == 'ovb_host_cloud' - - - name: add user to sudoers - lineinfile: dest=/etc/sudoers line="stack ALL=(root) NOPASSWD:ALL" - - - name: set fact for the stack user home - set_fact: instack_user_home=/home/{{ provisioner.remote_user }} - - - name: enabling ip forwarding - lineinfile: dest=/etc/sysctl.conf line='net.ipv4.ip_forward = 1' insertafter=EOF state=present - when: hw_env.ip_forwarding is defined and hw_env.ip_forwarding == 'true' - - - name: check ip forwarding - shell: sysctl -p /etc/sysctl.conf - when: hw_env.ip_forwarding is defined and hw_env.ip_forwarding == 'true' - -- include: repo-{{ product.name }}.yml repo_host=undercloud - -- name: Configure the baremetal undercloud - hosts: undercloud - tasks: - - name: check if instackenv.json exists in root - stat: path="/root/instackenv.json" - register: instackenv_json_root - sudo_user: root - sudo: yes - - - name: copy instackenv.json from root if it exists there - shell: cp /root/instackenv.json {{ instack_user_home }}/instackenv.json - when: instackenv_json_root.stat.exists == True - sudo_user: root - sudo: yes - - - name: get instackenv.json - synchronize: src={{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/instackenv.json dest={{ instack_user_home }}/instackenv.json - when: instackenv_json_root.stat.exists == False - - - name: chown instackenv.json - file: path={{ instack_user_home }}/instackenv.json owner=stack group=stack - sudo_user: root - sudo: yes - - - name: install ipmitool - yum: name={{ item }} state=latest - with_items: - - OpenIPMI - - OpenIPMI-tools - sudo_user: root - sudo: yes - - - name: install sshpass - DRACS - yum: name=sshpass state=latest - sudo_user: root - sudo: yes - when: hw_env.remote_mgmt == "dracs" - - - name: start IMPI service - shell: > - sudo chkconfig ipmi on; - sudo service ipmi start - - - name: get tools to validate instackenv.json/nodes.json - git: > - repo="https://github.com/rthallisey/clapper.git" - dest="{{instack_user_home}}/clapper" - - - name: validate instackenv.json - shell: > - chdir={{instack_user_home}} - python clapper/instackenv-validator.py -f {{ instack_user_home }}/instackenv.json - register: instackenv_validator_output - - - name: fail if instackenv.json fails validation - fail: msg="instackenv.json didn't validate." - when: instackenv_validator_output.stdout.find("SUCCESS") == -1 - - - name: get number of overcloud nodes - shell: > - export IP_LENGTH=(`cat {{ instack_user_home }}/instackenv.json | grep -o 'pm_addr.*' | cut -f2- -d':' | wc -l`); - echo $(($IP_LENGTH)) - register: node_length - - - name: power off node boxes - IPMI - shell: > - export IP=(`cat {{ instack_user_home }}/instackenv.json | grep -o 'pm_addr.*' | cut -f2- -d':' | sed 's/[},\"]//g'`); - export USER=(`cat {{ instack_user_home }}/instackenv.json | grep -o 'pm_user.*' | cut -f2- -d':' |rev | cut -c 2- | rev | sed 's/[},\"]//g'`); - export PASSWORD=(`cat {{ instack_user_home }}/instackenv.json | grep -o 'pm_password.*' | cut -f2- -d':' |rev | cut -c 2- | rev | sed 's/[},\"]//g'`); - ipmitool -I lanplus -H ${IP[item]} -U ${USER[item]} -P ${PASSWORD[item]} power off - with_sequence: count="{{node_length.stdout}}" - when: hw_env.remote_mgmt == "ipmi" - - - name: power off node boxes - DRACS - shell: > - export IP=(`cat {{ instack_user_home }}/instackenv.json | grep -o 'pm_addr.*' | cut -f2- -d':' | sed 's/[},\"]//g'`); - export USER=(`cat {{ instack_user_home }}/instackenv.json | grep -o 'pm_user.*' | cut -f2- -d':' |rev | cut -c 2- | rev | sed 's/[},\"]//g'`); - export PASSWORD=(`cat {{ instack_user_home }}/instackenv.json | grep -o 'pm_password.*' | cut -f2- -d':' |rev | cut -c 2- | rev | sed 's/[},\"]//g'`); - sshpass -p ${PASSWORD[item]} ssh -o "StrictHostKeyChecking=no" ${USER[item]}@${IP[item]} "racadm serveraction powerdown" - with_sequence: count="{{node_length.stdout}}" - when: hw_env.remote_mgmt == "dracs" + - name: check if answers file exists + stat: path="/usr/share/instack-undercloud/instack.answers.sample" + register: answers_file_present + + - name: check if conf file exists + stat: path="/usr/share/instack-undercloud/undercloud.conf.sample" + register: conf_file_present + + - name: fail if there is no answers file and no conf file + fail: msg="Neither a conf file nor an answers file exists" + when: answers_file_present.stat.exists == False and conf_file_present.stat.exists == False + + - name: copy baremetal answers file + shell: cp /usr/share/instack-undercloud/instack.answers.sample {{ instack_user_home }}/instack.answers + when: answers_file_present.stat.exists == True + + - name: edit instack.answers file - local_interface + lineinfile: dest={{ instack_user_home }}/instack.answers regexp=^LOCAL_INTERFACE line=LOCAL_INTERFACE={{ hw_env.answers_local_interface }} + when: answers_file_present.stat.exists == True + + - name: edit instack.answers file - network + replace: dest={{ instack_user_home }}/instack.answers regexp='192.0.2' replace={{ hw_env.network }} + when: hw_env.network is defined and answers_file_present.stat.exists == True + + - name: edit instack.answers file - network gateway + lineinfile: dest={{ instack_user_home }}/instack.answers regexp=^NETWORK_GATEWAY line=NETWORK_GATEWAY={{ hw_env.network_gateway }} + when: answers_file_present.stat.exists == True + + - name: copy baremetal conf file + shell: cp /usr/share/instack-undercloud/undercloud.conf.sample {{ instack_user_home }}/undercloud.conf + when: conf_file_present.stat.exists == True + + - name: edit undercloud.conf file - local_interface + lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#local_interface line=local_interface={{ hw_env.answers_local_interface }} + when: conf_file_present.stat.exists == True + + - name: edit undercloud.conf file - dhcp_start + lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#dhcp_start line=dhcp_start={{ hw_env.dhcp_start }} + when: conf_file_present.stat.exists == True and hw_env.dhcp_start is defined + + - name: edit undercloud.conf file - dhcp_end + lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#dhcp_end line=dhcp_end={{ hw_env.dhcp_end }} + when: conf_file_present.stat.exists == True and hw_env.dhcp_end is defined + + - name: edit undercloud.conf file - discovery_iprange + lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#discovery_iprange line=discovery_iprange={{ hw_env.discovery_iprange }} + when: conf_file_present.stat.exists == True and hw_env.discovery_iprange is defined + + - name: edit undercloud.conf file - network_gateway + lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#network_gateway line=network_gateway={{ hw_env.undercloud_network_gateway }} + when: conf_file_present.stat.exists == True and hw_env.undercloud_network_gateway is defined + + - name: edit undercloud.conf file - local_ip + lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#local_ip line=local_ip={{ hw_env.undercloud_local_ip }} + when: conf_file_present.stat.exists == True and hw_env.undercloud_local_ip is defined + + - name: edit undercloud.conf file - undercloud_public_vip + lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#undercloud_public_vip line=undercloud_public_vip={{ hw_env.undercloud_public_vip }} + when: conf_file_present.stat.exists == True and hw_env.undercloud_public_vip is defined + + - name: edit undercloud.conf file - undercloud_admin_vip + lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#undercloud_admin_vip line=undercloud_admin_vip={{ hw_env.undercloud_admin_vip }} + when: conf_file_present.stat.exists == True and hw_env.undercloud_admin_vip is defined + + - name: edit undercloud.conf file - network + shell: > + sed -i 's/192.0.2/{{ hw_env.network }}/g' {{ instack_user_home }}/undercloud.conf; + sed -i '/{{ hw_env.network }}/s/#//g' {{ instack_user_home }}/undercloud.conf + when: hw_env.network is defined and conf_file_present.stat.exists == True + + - name: register short hostname + shell: "hostname -s" + register: short_hostname + + - name: register full hostname + shell: "cat /etc/hostname" + register: full_hostname + + - name: set the hostname + sudo: yes + shell: > + hostnamectl set-hostname {{ full_hostname.stdout }}; + hostnamectl set-hostname --transient {{ full_hostname.stdout }} + + - name: Set /etc/hostname for those that need it + sudo: yes + lineinfile: > + dest=/etc/hosts + line="127.0.1.1 {{ short_hostname.stdout }} {{ full_hostname.stdout }}" + + - name: get domain from /etc/resolv.conf + sudo: yes + shell: cat /etc/resolv.conf | grep search | sed -n -e 's/^.*search //p' + register: search_domain + + - name: add short and full hostname to /etc/hosts + sudo: yes + shell: "sed -i 's/localhost4.localdomain4/localhost4.localdomain4 {{ short_hostname.stdout }} {{ full_hostname.stdout }} {{ short_hostname.stdout }}.{{ search_domain.stdout }}/g' /etc/hosts" + + - name: add images and templates folders + shell: > + mkdir {{ instack_user_home }}/images; + mkdir {{ instack_user_home }}/templates + when: hw_env.env_type == 'scale_lab' + + - name: copy instackenv.json to nodes.json + shell: cp {{ instack_user_home }}/instackenv.json {{ instack_user_home }}/nodes.json + + - name: installing python-six (workaround) + sudo: yes + yum: name=python-six state=present diff --git a/playbooks/installer/rdo-manager/undercloud/pre-virthost.yml b/playbooks/installer/rdo-manager/undercloud/pre-virthost.yml index fb55d9819..cef8d2123 100644 --- a/playbooks/installer/rdo-manager/undercloud/pre-virthost.yml +++ b/playbooks/installer/rdo-manager/undercloud/pre-virthost.yml @@ -1,301 +1,12 @@ --- -- name: Create the stack user on the virthost - hosts: virthost +- name: Update packages on the host + hosts: undercloud vars: - - ansible_ssh_user: root + - ansible_ssh_user: root tasks: - - name: create user - user: name="{{ provisioner.remote_user }}" state=present password=stack + - name: repolist + command: yum -d 7 repolist - - name: copy the .bash_profile file - command: cp /root/.bash_profile /home/{{ provisioner.remote_user }}/ + - name: update all packages + yum: name=* state=latest - - name: set file permissions on .bash_profile - file: path=/home/{{ provisioner.remote_user }}/.bash_profile mode=0755 owner={{ provisioner.remote_user }} group={{ provisioner.remote_user }} - - - name: create .ssh dir - file: path=/home/{{ provisioner.remote_user }}/.ssh mode=0700 owner={{ provisioner.remote_user }} group=stack state=directory - - - name: copy the authorized_keys file - command: cp /root/.ssh/authorized_keys /home/{{ provisioner.remote_user }}/.ssh/ - - - name: set file permissions on authorized_hosts - file: path=/home/{{ provisioner.remote_user }}/.ssh/authorized_keys mode=0600 owner={{ provisioner.remote_user }} group={{ provisioner.remote_user }} - - - name: add user to sudoers - lineinfile: dest=/etc/sudoers line="{{ provisioner.remote_user }} ALL=(root) NOPASSWD:ALL" - - - name: set fact for the stack user home - set_fact: instack_user_home=/home/{{ provisioner.remote_user }} - -- include: repo-{{ product.name }}.yml repo_host=virthost - -- name: Copy the gating package - hosts: virthost - tasks: - - name: copy downstream rpm package - copy: src={{ item }} dest=/home/{{ ansible_ssh_user }}/ - with_fileglob: - - "{{ lookup('env', 'PWD') }}/generated_rpms/*.rpm" - when: gating_repo is defined - -- name: setup the virt host - hosts: virthost - tasks: - - name: install the generated rpm - shell: "yum install -y /home/{{ ansible_ssh_user }}/{{gating_repo}}*.rpm" - sudo: yes - when: gating_repo is defined - -- name: setup the virt host - hosts: virthost - tasks: - - name: set fact stack user home - set_fact: instack_user_home=/home/{{ provisioner.remote_user }} - - - name: get the guest-image - sudo: yes - get_url: > - url="{{ distro.images[distro.name][distro.full_version].remote_file_server }}{{ distro.images[distro.name][distro.full_version].guest_image_name }}" - dest=/root/{{ distro.images[distro.name][distro.full_version].guest_image_name }} - - - name: copy the guest-image in stack user home - sudo: yes - command: cp /root/{{ distro.images[distro.name][distro.full_version].guest_image_name }} {{instack_user_home}}/{{ distro.images[distro.name][distro.full_version].guest_image_name }} - - - name: set the right permissions for the guest-image - sudo: yes - file: > - path={{instack_user_home}}/{{ distro.images[distro.name][distro.full_version].guest_image_name }} - owner={{ provisioner.remote_user }} - group={{ provisioner.remote_user }} - - - name: install yum-plugin-priorities for rdo-manager - yum: name={{item}} state=present - sudo: yes - with_items: - - yum-plugin-priorities - when: product.name == "rdo" - - - name: install rdo-manager-deps - yum: name={{item}} state=present - sudo: yes - with_items: - - python-tripleoclient - when: product.name == "rdo" or product.full_version == "8-director" - - - name: install python-rdomanager-oscplugin - yum: name=python-rdomanager-oscplugin state=present - sudo: yes - - - name: setup environment vars - template: src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/virt-setup-env.j2 dest=~/virt-setup-env mode=0755 - - - name: Contents of virt-setup-env - shell: > - cat {{ instack_user_home }}/virt-setup-env - - - name: Patch instack-virt-setup to ensure dhcp.leases is not used to determine ip (workaround https://review.openstack.org/#/c/232584) - sudo: yes - lineinfile: - dest=/usr/bin/instack-virt-setup - regexp="/var/lib/libvirt/dnsmasq/default.leases" - line=" IP=$(ip n | grep $(tripleo get-vm-mac $UNDERCLOUD_VM_NAME) | awk '{print $1;}')" - when: workarounds.enabled is defined and workarounds.enabled|bool - - - name: run instack-virt-setup - shell: > - source {{ instack_user_home }}/virt-setup-env; - instack-virt-setup > {{ instack_user_home }}/instack-virt-setup.log; - register: instack_virt_setup_result - ignore_errors: yes - - - name: destroy default pool - command: virsh pool-destroy default - sudo: yes - ignore_errors: true - when: "instack_virt_setup_result.rc !=0" - - - name: update libvirtd unix_sock_group - lineinfile: dest=/etc/libvirt/libvirtd.conf - regexp=^unix_sock_group - line='unix_sock_group = "{{ provisioner.remote_user }}"' - when: "instack_virt_setup_result.rc !=0" - sudo: yes - - - name: remove libvirt qemu capabilities cache - command: rm -Rf /var/cache/libvirt/qemu/capabilities/ - sudo: yes - when: "instack_virt_setup_result.rc != 0" - # more workaround for the SATA error RHBZ#1195882 - - - name: restart libvirtd - service: name=libvirtd state=restarted - sudo: yes - when: "instack_virt_setup_result.rc != 0" - - - name: inspect virsh capabilities - command: 'virsh capabilities' - when: "instack_virt_setup_result.rc != 0" - - - name: stop virbr0 - command: ip link set virbr0 down - sudo: yes - ignore_errors: true - when: "instack_virt_setup_result.rc != 0" - - - name: delete libvirt bridge virbr0 - command: brctl delbr virbr0 - sudo: yes - ignore_errors: true - when: "instack_virt_setup_result.rc != 0" - - - name: start default libvirt network - command: virsh net-start default - sudo: yes - ignore_errors: true - when: "instack_virt_setup_result.rc != 0" - - - name: delete instack domain before re-try of instack-virt-setup - command: virsh undefine instack - sudo: yes - ignore_errors: true - when: "instack_virt_setup_result.rc !=0" - - - name: retry run instack-virt-setup - shell: > - source {{ instack_user_home }}/virt-setup-env; - instack-virt-setup > {{ instack_user_home }}/instack-virt-setup-retry.log; - when: "instack_virt_setup_result.rc !=0" - - - name: print out all the VMs - shell: > - sudo virsh list --all - - - name: get undercloud vm ip address - shell: > - export PATH='/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/stack/bin'; - ip n | grep $(tripleo get-vm-mac instack) | awk '{print $1;}' - when: undercloud_ip is not defined - register: undercloud_vm_ip_result - - - name: set_fact for undercloud ip - set_fact: undercloud_ip={{ undercloud_vm_ip_result.stdout }} - -- name: setup the virt host - hosts: localhost - tasks: - - name: set_fact for undercloud ip - set_fact: undercloud_ip={{ hostvars['host0'].undercloud_ip }} - - - name: debug undercloud_ip - debug: var=hostvars['localhost'].undercloud_ip - -- name: setup the virt host - hosts: virthost - tasks: - - name: wait until ssh is available on undercloud node - wait_for: host={{ hostvars['localhost'].undercloud_ip }} - state=started - port=22 - delay=15 - timeout=300 - - - name: add undercloud host - add_host: - name=undercloud - groups=undercloud - ansible_ssh_host=undercloud - ansible_fqdn=undercloud - ansible_ssh_user="{{ provisioner.remote_user }}" - ansible_ssh_private_key_file="{{ provisioner.key_file }}" - gating_repo="{{ gating_repo is defined and gating_repo }}" - - - name: setup ssh config - template: src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/ssh_config.j2 dest=~/ssh.config.ansible mode=0755 - - - name: copy ssh_config back to the slave - fetch: src=~/ssh.config.ansible dest="{{ base_dir }}/khaleesi/ssh.config.ansible" flat=yes - - - name: copy id_rsa key back to the slave - fetch: src=~/.ssh/id_rsa dest="{{ base_dir }}/khaleesi/id_rsa_virt_host" flat=yes - - - name: copy undercloud root user authorized_keys to stack user - shell: 'ssh -F ssh.config.ansible undercloud-from-virthost "cp /root/.ssh/authorized_keys /home/stack/.ssh/"' - - - name: chown authorized_keys for stack user - shell: 'ssh -F ssh.config.ansible undercloud-from-virthost "chown stack:stack /home/stack/.ssh/authorized_keys"' - - - name: copy gating_repo package - shell: > - scp -F ssh.config.ansible /home/{{ ansible_ssh_user }}/{{ gating_repo }}*.rpm undercloud-from-virthost:{{ instack_user_home }}/ - when: gating_repo is defined - -- name: regenerate the inventory file after adding hosts - hosts: localhost - tasks: - - name: create inventory from template - template: - dest: "{{ lookup('env', 'PWD') }}/{{ tmp.node_prefix }}hosts" - src: "{{ base_dir }}/khaleesi/playbooks/provisioner/templates/inventory.j2" - - - name: symlink inventory to a static name - file: - dest: "{{ lookup('env', 'PWD') }}/hosts" - state: link - src: "{{ lookup('env', 'PWD') }}/{{ tmp.node_prefix }}hosts" - -- name: copy the guest image to the undercloud - hosts: virthost - tasks: - - name: upload the guest-image on the undercloud - command: scp -F ssh.config.ansible {{instack_user_home}}/{{ distro.images[distro.name][distro.full_version].guest_image_name }} undercloud-from-virthost:{{ instack_user_home }}/ - -- name: test host connection - hosts: all:!localhost - tasks: - - name: test ssh - command: hostname - - - name: check distro - command: cat /etc/redhat-release - - - name: set fact stack user home - set_fact: instack_user_home=/home/{{ provisioner.remote_user }} - -- include: repo-{{ product.name }}.yml repo_host=undercloud - -- name: Group all hosts in gate if we are gating using delorean - hosts: all - tasks: - - group_by: key=gate-delorean - when: use_delorean is defined and use_delorean - -- name: Run Delorean - hosts: virthost:&gate-delorean - roles: - - delorean - -- name: Create local repo for delorean rpms - hosts: undercloud:&gate-delorean - roles: - - delorean_rpms - -- name: Update all packages - hosts: undercloud:&gate-delorean - tasks: - - yum: name=* state=latest - sudo: yes - -- name: Group all hosts in gate if we are gating - hosts: all - tasks: - - group_by: key=gate-install-rpm - when: gating_repo is defined - -- name: Install the custom rpm when gating - hosts: undercloud:&gate-install-rpm - sudo: yes - tasks: - - name: install the gating_repo rpm we previously built - shell: yum -y install /home/stack/{{ gating_repo }}*.rpm diff --git a/playbooks/installer/rdo-manager/undercloud/pre.yml b/playbooks/installer/rdo-manager/undercloud/pre.yml new file mode 100644 index 000000000..ce00e273a --- /dev/null +++ b/playbooks/installer/rdo-manager/undercloud/pre.yml @@ -0,0 +1,26 @@ +--- +- name: install the undercloud packages + hosts: undercloud + tasks: + - name: install yum-plugin-priorities rdo-manager + sudo: yes + yum: name={{item}} state=present + with_items: + - yum-plugin-priorities + when: product.name == "rdo" + + - name: install rdo-manager-deps + sudo: yes + yum: name={{item}} state=present + with_items: + - python-tripleoclient + when: product.name == "rdo" or product.full_version == "8-director" + + - name: install python-rdomanager-oscplugin + sudo: yes + yum: name=python-rdomanager-oscplugin state=present + when: product.full_version == "7-director" + + - name: install python-passlib + sudo: yes + yum: name=python-passlib state=present diff --git a/playbooks/installer/rdo-manager/undercloud/repo-rdo.yml b/playbooks/installer/rdo-manager/undercloud/repo-rdo.yml deleted file mode 100644 index a6ddc09ed..000000000 --- a/playbooks/installer/rdo-manager/undercloud/repo-rdo.yml +++ /dev/null @@ -1,96 +0,0 @@ ---- -- include: "{{ base_dir }}/khaleesi/playbooks/group_by.yml ansible_ssh_user=root" - -- name: Setup openstack repos - hosts: "{{ repo_host }}" - vars: - - ansible_ssh_user: root - - product_override_version: 7-director - gather_facts: yes - tasks: - - name: clean release rpms - yum: name={{ item }} state=absent - with_items: - - rdo-release* - - epel-release - - rhos-release - - - name: remove any yum repos not owned by rpm - shell: rm -Rf /etc/yum.repos.d/{{ item }} - with_items: - - beaker-* - - - name: Install release tool on machine - command: "yum localinstall -y {{ product.rpmrepo[ansible_distribution] }}" - when: product.repo_type is defined and product.repo_type == 'production' - - #remove this step when rdo and rhos diverge - - name: Install extra release tool on machine - command: "yum localinstall -y {{ product.rpmrepo_override[ansible_distribution] }}" - when: product_override_version is defined and product.repo_type_override == 'rhos-release' - - #remove this step when rdo and rhos diverge - - name: Execute rhos-release for rdo-manager (rdo) - command: "rhos-release {{ product_override_version }}" - when: product_override_version is defined and product.repo_type_override == 'rhos-release' - - - name: Install epel release - command: "yum localinstall -y {{ distro.epel_release }}" - - - name: yum clean all - command: yum clean all - -- name: RHEL RDO prep - hosts: "{{ repo_host }}:&RedHat" - vars: - - ansible_ssh_user: root - roles: - # enable this role when rdo and rhos officially diverge - #- { role: linux/rhel/rdo } - - { role: product/rdo/rhel } - -- name: CentOS RDO prep - hosts: "{{ repo_host }}:&CentOS" - vars: - - ansible_ssh_user: root - roles: - - { role: linux/centos } - - { role: product/rdo/rhel } - -- name: Linux common prep (Collect performance data, etc.) - hosts: "{{ repo_host }}" - vars: - - ansible_ssh_user: root - roles: - - { role: linux-common } - -- name: Update packages on the host - hosts: "{{ repo_host }}" - vars: - - ansible_ssh_user: root - tasks: - - name: repolist - command: yum -d 7 repolist - - - name: update all packages - yum: name=* state=latest - when: yum_update | bool - - - name: Find if a new kernel was installed - shell: find /boot/ -anewer /proc/1/stat -name 'initramfs*' | egrep ".*" - register: new_kernel - ignore_errors: True - when: "'{{ repo_host }}' == 'virthost'" - - - name: reboot host - sudo: no - local_action: - wait_for_ssh - reboot_first=true - host="{{ ansible_ssh_host }}" - user="root" - ssh_opts="-F {{ base_dir }}/khaleesi/ssh.config.ansible" - key="{{ ansible_ssh_private_key_file }}" - timeout=900 - sudo=false - when: "'{{ repo_host }}' == 'virthost' and new_kernel.rc == 0" diff --git a/playbooks/installer/rdo-manager/undercloud/run.yml b/playbooks/installer/rdo-manager/undercloud/run.yml index ebeb87c98..4a675c409 100644 --- a/playbooks/installer/rdo-manager/undercloud/run.yml +++ b/playbooks/installer/rdo-manager/undercloud/run.yml @@ -1,149 +1,4 @@ --- -- name: install the undercloud packages and get the guest image - hosts: undercloud - tasks: - - name: install python-rdomanager-oscplugin - yum: name=python-rdomanager-oscplugin state=present - sudo: yes - - - name: install yum-plugin-priorities rdo-manager - yum: name={{item}} state=present - sudo: yes - with_items: - - yum-plugin-priorities - when: product.name == "rdo" - - - name: install rdo-manager-deps - yum: name={{item}} state=present - sudo: yes - with_items: - - python-tripleoclient - when: product.name == "rdo" or product.full_version == "8-director" - - - name: install python-rdomanager-oscplugin - yum: name=python-rdomanager-oscplugin state=present - sudo: yes - - - name: install python-passlib - yum: name=python-passlib state=present - sudo: yes - - -- name: Customize the answer file for baremetal deployment - hosts: undercloud:&baremetal - tasks: - - name: check if answers file exists - stat: path="/usr/share/instack-undercloud/instack.answers.sample" - register: answers_file_present - - - name: check if conf file exists - stat: path="/usr/share/instack-undercloud/undercloud.conf.sample" - register: conf_file_present - - - name: fail if there is no answers file and no conf file - fail: msg="Neither a conf file nor an answers file exists" - when: answers_file_present.stat.exists == False and conf_file_present.stat.exists == False - - - name: copy baremetal answers file - shell: cp /usr/share/instack-undercloud/instack.answers.sample {{ instack_user_home }}/instack.answers - when: answers_file_present.stat.exists == True - - - name: edit instack.answers file - local_interface - lineinfile: dest={{ instack_user_home }}/instack.answers regexp=^LOCAL_INTERFACE line=LOCAL_INTERFACE={{ hw_env.answers_local_interface }} - when: answers_file_present.stat.exists == True - - - name: edit instack.answers file - network - replace: dest={{ instack_user_home }}/instack.answers regexp='192.0.2' replace={{ hw_env.network }} - when: hw_env.network is defined and answers_file_present.stat.exists == True - - - name: edit instack.answers file - network gateway - lineinfile: dest={{ instack_user_home }}/instack.answers regexp=^NETWORK_GATEWAY line=NETWORK_GATEWAY={{ hw_env.network_gateway }} - when: answers_file_present.stat.exists == True - - - name: copy baremetal conf file - shell: cp /usr/share/instack-undercloud/undercloud.conf.sample {{ instack_user_home }}/undercloud.conf - when: conf_file_present.stat.exists == True - - - name: edit undercloud.conf file - local_interface - lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#local_interface line=local_interface={{ hw_env.answers_local_interface }} - when: conf_file_present.stat.exists == True - - - name: edit undercloud.conf file - dhcp_start - lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#dhcp_start line=dhcp_start={{ hw_env.dhcp_start }} - when: conf_file_present.stat.exists == True and hw_env.dhcp_start is defined - - - name: edit undercloud.conf file - dhcp_end - lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#dhcp_end line=dhcp_end={{ hw_env.dhcp_end }} - when: conf_file_present.stat.exists == True and hw_env.dhcp_end is defined - - - name: edit undercloud.conf file - discovery_iprange - lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#discovery_iprange line=discovery_iprange={{ hw_env.discovery_iprange }} - when: conf_file_present.stat.exists == True and hw_env.discovery_iprange is defined - - - name: edit undercloud.conf file - network_gateway - lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#network_gateway line=network_gateway={{ hw_env.undercloud_network_gateway }} - when: conf_file_present.stat.exists == True and hw_env.undercloud_network_gateway is defined - - - name: edit undercloud.conf file - local_ip - lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#local_ip line=local_ip={{ hw_env.undercloud_local_ip }} - when: conf_file_present.stat.exists == True and hw_env.undercloud_local_ip is defined - - - name: edit undercloud.conf file - undercloud_public_vip - lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#undercloud_public_vip line=undercloud_public_vip={{ hw_env.undercloud_public_vip }} - when: conf_file_present.stat.exists == True and hw_env.undercloud_public_vip is defined - - - name: edit undercloud.conf file - undercloud_admin_vip - lineinfile: dest={{ instack_user_home }}/undercloud.conf regexp=^#undercloud_admin_vip line=undercloud_admin_vip={{ hw_env.undercloud_admin_vip }} - when: conf_file_present.stat.exists == True and hw_env.undercloud_admin_vip is defined - - - name: edit undercloud.conf file - network - shell: > - sed -i 's/192.0.2/{{ hw_env.network }}/g' {{ instack_user_home }}/undercloud.conf; - sed -i '/{{ hw_env.network }}/s/#//g' {{ instack_user_home }}/undercloud.conf - when: hw_env.network is defined and conf_file_present.stat.exists == True - - - name: register short hostname - shell: "hostname -s" - register: short_hostname - - - name: register full hostname - shell: "cat /etc/hostname" - register: full_hostname - - - name: set the hostname - shell: > - hostnamectl set-hostname {{ full_hostname.stdout }}; - hostnamectl set-hostname --transient {{ full_hostname.stdout }} - sudo: yes - - - name: Set /etc/hostname for those that need it - lineinfile: > - dest=/etc/hosts - line="127.0.1.1 {{ short_hostname.stdout }} {{ full_hostname.stdout }}" - sudo: yes - - - name: get domain from /etc/resolv.conf - shell: cat /etc/resolv.conf | grep search | sed -n -e 's/^.*search //p' - register: search_domain - sudo: yes - - - name: add short and full hostname to /etc/hosts - shell: "sed -i 's/localhost4.localdomain4/localhost4.localdomain4 {{ short_hostname.stdout }} {{ full_hostname.stdout }} {{ short_hostname.stdout }}.{{ search_domain.stdout }}/g' /etc/hosts" - sudo: yes - - - name: add images and templates folders - shell: > - mkdir {{ instack_user_home }}/images; - mkdir {{ instack_user_home }}/templates - when: hw_env.env_type == 'scale_lab' - - - name: copy instackenv.json to nodes.json - shell: cp {{ instack_user_home }}/instackenv.json {{ instack_user_home }}/nodes.json - - - name: installing python-six (workaround) - yum: name=python-six state=present - sudo: yes - - name: install the undercloud hosts: undercloud tasks: @@ -179,120 +34,3 @@ - name: install the undercloud shell: openstack undercloud install &> {{ instack_user_home }}/undercloud_install_idempotent_check.log -- name: undercloud post install workarounds - hosts: undercloud - tasks: - - name: disable haproxy check (workaround bug bz 1246525) - sudo: yes - replace: dest=/etc/haproxy/haproxy.cfg regexp='(listen ironic\n.*\n.*)\n.*option httpchk GET \/' replace='\1' - when: workarounds.enabled is defined and workarounds.enabled|bool and ha_config_file.stat.exists - - - name: restart haproxy service (workaround bug bz 1246525) - command: systemctl restart haproxy - sudo: yes - when: workarounds.enabled is defined and workarounds.enabled|bool and ha_config_file.stat.exists - - - name: increase stack_action_timeout to 4 hours (workaround for bz 1243365) - command: openstack-config --set /etc/heat/heat.conf DEFAULT stack_action_timeout 14400 - sudo: yes - when: workarounds.enabled is defined and workarounds.enabled|bool - - - name: restart openstack-heat-engine (workaround for bz 1243365) - command: systemctl restart openstack-heat-engine - sudo: yes - when: workarounds.enabled is defined and workarounds.enabled|bool - - - name: check if haproxy is present (workaround bug bz 1246525) - stat: path=/etc/haproxy/haproxy.cfg - register: ha_config_file - - - name: disable haproxy check (workaround bug bz 1246525) - sudo: yes - replace: dest=/etc/haproxy/haproxy.cfg regexp='(listen ironic\n.*\n.*)\n.*option httpchk GET \/' replace='\1' - when: workarounds.enabled is defined and workarounds.enabled|bool and ha_config_file.stat.exists - - - name: restart haproxy service (workaround bug bz 1246525) - command: systemctl restart haproxy - sudo: yes - when: workarounds.enabled is defined and workarounds.enabled|bool and ha_config_file.stat.exists - -- name: Execute vendor-specific setup for baremetal environment - hosts: undercloud:&baremetal - tasks: - - name: copy vendor-specific setup file - synchronize: > - src={{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/vendor_specific_setup dest={{ instack_user_home }}/vendor_specific_setup - delegate_to: local - when: hw_env.env_type != 'ovb_host_cloud' - - - name: copy over vendor-specific setup file (quintupleo_host_cloud) - local_action: command rsync --delay-updates -F --compress --archive --rsh "ssh -i {{ provisioner.key_file }} -F {{base_dir}}/khaleesi/ssh.config.ansible -S none -o StrictHostKeyChecking=no" {{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/vendor_specific_setup undercloud:{{ instack_user_home }}/vendor_specific_setup - when: hw_env.env_type == 'ovb_host_cloud' - - - name: execute vendor-specific setup - shell: > - chmod 755 {{ instack_user_home }}/vendor_specific_setup; - {{ instack_user_home }}/vendor_specific_setup - -- name: Set ironic to control the power state - hosts: undercloud:&baremetal - tasks: - - name: get power state from /etc/ironic/ironic.conf (workaround for bz 1246641) - sudo: yes - shell: > - sudo cat /etc/ironic/ironic.conf | grep 'force_power_state_during_sync=False' - when: workarounds.enabled is defined and workarounds.enabled|bool - - - name: allow ironic to control the power state (workaround for bz 1246641) - sudo: yes - shell: > - sed -i 's/force_power_state_during_sync=False/force_power_state_during_sync=True/g' /etc/ironic/ironic.conf - when: workarounds.enabled is defined and workarounds.enabled|bool - - - name: get power state from /etc/ironic/ironic.conf (workaround for bz 1246641) - sudo: yes - shell: > - sudo cat /etc/ironic/ironic.conf | grep 'force_power_state_during_sync=True' - when: workarounds.enabled is defined and workarounds.enabled|bool - - - name: restart openstack-ironic-conductor (workaround for bz 1246641) - sudo: yes - shell: > - systemctl restart openstack-ironic-conductor - when: workarounds.enabled is defined and workarounds.enabled|bool - -- name: Execute vendor-specific setup for baremetal environment - hosts: undercloud:&baremetal - tasks: - - name: copy vendor-specific setup file - synchronize: > - src={{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/vendor_specific_setup dest={{ instack_user_home }}/vendor_specific_setup - delegate_to: local - when: hw_env.env_type != 'ovb_host_cloud' - - - name: copy over vendor-specific setup file (quintupleo_host_cloud) - local_action: command rsync --delay-updates -F --compress --archive --rsh "ssh -i {{ provisioner.key_file }} -F {{base_dir}}/khaleesi/ssh.config.ansible -S none -o StrictHostKeyChecking=no" {{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/vendor_specific_setup undercloud:{{ instack_user_home }}/vendor_specific_setup - when: hw_env.env_type == 'ovb_host_cloud' - - - name: execute vendor-specific setup - shell: > - chmod 755 {{ instack_user_home }}/vendor_specific_setup; - {{ instack_user_home }}/vendor_specific_setup - -- name: setup networking on virt for network isolation - hosts: undercloud:&virthost - tasks: - - name: net-iso virt setup vlans - when: installer.network.isolation == 'single_nic_vlans' - shell: > - source {{ instack_user_home }}/stackrc; - sudo ovs-vsctl add-port br-ctlplane vlan10 tag=10 -- set interface vlan10 type=internal; - sudo ip l set dev vlan10 up; sudo ip addr add 172.16.23.251/24 dev vlan10; - -- name: update neutron values for undercloud - hosts: undercloud - tasks: - - name: update neutron quota to unlimited - shell: > - source {{ instack_user_home }}/stackrc; - neutron quota-update --port -1; diff --git a/playbooks/installer/rdo-manager/user/README.txt b/playbooks/installer/rdo-manager/user/README.txt new file mode 100644 index 000000000..81e18bb2d --- /dev/null +++ b/playbooks/installer/rdo-manager/user/README.txt @@ -0,0 +1,4 @@ +This playbook follows the documentation from tripleo as closely as possible + +The user playbooks have been broken out of the environment setup as they are used by multiple environments +http://docs.openstack.org/developer/tripleo-docs/environments/environments.html diff --git a/playbooks/installer/rdo-manager/user/main.yml b/playbooks/installer/rdo-manager/user/main.yml new file mode 100644 index 000000000..c6933f7d2 --- /dev/null +++ b/playbooks/installer/rdo-manager/user/main.yml @@ -0,0 +1,46 @@ +--- +- name: Create the stack user + hosts: "{{ host }}" + vars: + - ansible_ssh_user: root + tasks: + - name: create user + user: name="{{ provisioner.remote_user }}" state=present password=stack + + - name: copy the .bash_profile file + command: cp /root/.bash_profile /home/{{ provisioner.remote_user }}/ + + - name: set file permissions on .bash_profile + file: path=/home/{{ provisioner.remote_user }}/.bash_profile mode=0755 owner={{ provisioner.remote_user }} group={{ provisioner.remote_user }} + + - name: create .ssh dir + file: path=/home/{{ provisioner.remote_user }}/.ssh mode=0700 owner={{ provisioner.remote_user }} group=stack state=directory + + - name: copy the authorized_keys file + command: cp /root/.ssh/authorized_keys /home/{{ provisioner.remote_user }}/.ssh/ + + - name: set file permissions on authorized_hosts + file: path=/home/{{ provisioner.remote_user }}/.ssh/authorized_keys mode=0600 owner={{ provisioner.remote_user }} group={{ provisioner.remote_user }} + + - name: add user to sudoers + lineinfile: dest=/etc/sudoers line="{{ provisioner.remote_user }} ALL=(root) NOPASSWD:ALL" + + - name: set fact for the stack user home + set_fact: instack_user_home=/home/{{ provisioner.remote_user }} + + - name: copy ssh keys + command: cp /root/.ssh/id_rsa /home/{{ provisioner.remote_user }}/.ssh/ + when: hw_env.env_type == 'ovb_host_cloud' + + - name: copy ssh pub keys + command: cp /root/.ssh/id_rsa.pub /home/{{ provisioner.remote_user }}/.ssh/ + when: hw_env.env_type == 'ovb_host_cloud' + + - name: set permission on keys + file: path=/home/{{ provisioner.remote_user }}/.ssh/id_rsa mode=0600 owner=stack group=stack + when: hw_env.env_type == 'ovb_host_cloud' + + - name: set permission on pub keys + file: path=/home/{{ provisioner.remote_user }}/.ssh/id_rsa.pub mode=0644 owner=stack group=stack + when: hw_env.env_type == 'ovb_host_cloud' + diff --git a/playbooks/installer/rdo-manager/yum_repos/README.txt b/playbooks/installer/rdo-manager/yum_repos/README.txt new file mode 100644 index 000000000..22e111cb6 --- /dev/null +++ b/playbooks/installer/rdo-manager/yum_repos/README.txt @@ -0,0 +1,4 @@ +This playbook follows the documentation from tripleo as closely as possible + +The yum repository playbooks have been broken out of the environment setup as they are used by multiple environments +http://docs.openstack.org/developer/tripleo-docs/environments/environments.html diff --git a/playbooks/installer/rdo-manager/yum_repos/repo-rdo.yml b/playbooks/installer/rdo-manager/yum_repos/repo-rdo.yml new file mode 100644 index 000000000..80f7ada59 --- /dev/null +++ b/playbooks/installer/rdo-manager/yum_repos/repo-rdo.yml @@ -0,0 +1,65 @@ +--- +- include: "{{ base_dir }}/khaleesi/playbooks/group_by.yml ansible_ssh_user=root" + +- name: RHEL RDO prep + hosts: "{{ repo_host }}:&RedHat" + vars: + - ansible_ssh_user: root + roles: + # enable this role when rdo and rhos officially diverge + #- { role: linux/rhel/rdo } + - { role: product/rdo/rhel } + +- name: CentOS RDO prep + hosts: "{{ repo_host }}:&CentOS" + vars: + - ansible_ssh_user: root + roles: + - { role: linux/centos } + - { role: product/rdo/rhel } + +- name: Linux common prep (Collect performance data, etc.) + hosts: "{{ repo_host }}" + vars: + - ansible_ssh_user: root + roles: + - { role: linux-common } + +- name: Enable EPEL + hosts: "{{ repo_host }}" + vars: + - ansible_ssh_user: root + tasks: + - name: Install epel release + command: "yum localinstall -y {{ distro.epel_release }}" + +- name: Update packages on the host + hosts: "{{ repo_host }}" + vars: + - ansible_ssh_user: root + tasks: + - name: repolist + command: yum -d 7 repolist + + - name: update all packages + yum: name=* state=latest + when: yum_update | bool + + - name: Find if a new kernel was installed + shell: find /boot/ -anewer /proc/1/stat -name 'initramfs*' | egrep ".*" + register: new_kernel + ignore_errors: True + when: "'{{ repo_host }}' == 'virthost'" + + - name: reboot host + sudo: no + local_action: + wait_for_ssh + reboot_first=true + host="{{ ansible_ssh_host }}" + user="root" + ssh_opts="-F {{ base_dir }}/khaleesi/ssh.config.ansible" + key="{{ ansible_ssh_private_key_file }}" + timeout=900 + sudo=false + when: "'{{ repo_host }}' == 'virthost' and new_kernel.rc == 0" diff --git a/playbooks/installer/rdo-manager/undercloud/repo-rhos.yml b/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml similarity index 93% rename from playbooks/installer/rdo-manager/undercloud/repo-rhos.yml rename to playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml index 93118dd72..01e1fd49c 100644 --- a/playbooks/installer/rdo-manager/undercloud/repo-rhos.yml +++ b/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml @@ -5,19 +5,8 @@ hosts: "{{ repo_host }}:&RedHat" vars: - ansible_ssh_user: root - - product_override_version: 7 gather_facts: yes tasks: - - name: clean release rpms - yum: name={{ item }} state=absent - with_items: - - rhos-release - - - name: remove any yum repos not owned by rpm - shell: rm -Rf /etc/yum.repos.d/{{ item }} - with_items: - - beaker-* - - name: Install release tool on machine command: "yum localinstall -y {{ product.rpm }}" From b91fd73a02c9aca2c5be313015a7849df5d94fd5 Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Wed, 27 Jan 2016 09:42:26 -0500 Subject: [PATCH 028/137] add more debug information regarding ironic to the status playbook Change-Id: Ic4e58a4d577c147848d7243af1e861e02afafdc6 --- .../installer/rdo-manager/overcloud/status.yml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/playbooks/installer/rdo-manager/overcloud/status.yml b/playbooks/installer/rdo-manager/overcloud/status.yml index fff46ad6a..a8a468905 100644 --- a/playbooks/installer/rdo-manager/overcloud/status.yml +++ b/playbooks/installer/rdo-manager/overcloud/status.yml @@ -43,6 +43,13 @@ when: overcloud_deployment_result is defined and overcloud_deployment_result != "0" sudo: yes command: "grep ERROR /var/log/heat/heat-engine.log" + ignore_errors: yes + + - name: grep for errors in the ironic logs + when: overcloud_deployment_result is defined and overcloud_deployment_result != "0" + sudo: yes + command: "cat /var/log/ironic/* | grep -v ERROR_FOR_DIVISION_BY_ZERO | grep ERROR" + ignore_errors: yes - name: show ironic nodes create template template: src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/show_nodes.sh dest={{ instack_user_home }}/show_nodes.sh mode=0755 @@ -105,7 +112,7 @@ command: ceph status ignore_errors: yes -- name: dump puppet apply logs into /var/log for collection +- name: dump journal logs into /var/log for collection hosts: overcloud gather_facts: no tasks: @@ -114,6 +121,11 @@ shell: journalctl -u os-collect-config > /var/log/os-collect-config.log ignore_errors: yes + - name: get ironic logs + sudo: yes + shell: journalctl -u openstack-ironic-conductor -u openstack-ironic-api > /var/log/ironic-conductor-api-journal.log + ignore_errors: yes + - name: fail playbook when instack-deploy-overcloud fails hosts: undercloud tasks: From b3f4579178ad375f8bb83fcee558b4f5fcdf4212 Mon Sep 17 00:00:00 2001 From: Harry Rybacki Date: Tue, 19 Jan 2016 10:29:26 -0500 Subject: [PATCH 029/137] Track blueprint history in Khaleesi Presently all blueprint/spec discussions regarding Khaleesi happen through email and/or irc discussions. This attempts to more closely align the blueprint/spec development process with upstream OS components. The primary goals are: - Standardize the way in which blueprint/specs are proposed and agreed upon. - Ensure historical context around the development for any given blueprint/spec is available regardless of how long it has been since it was agreed upon or implemented. - Increase accuracy of projected timelines for Khaleesi features. - Encourage development of a 'road map' for Khaleesi. Note: The base templatec was based off of the current heat-spec release (Mitaka) template. Change-Id: I80d4df2656f7aae65736bf05012bfa9d55c98b26 --- blueprints/templates/template.rst | 93 +++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 blueprints/templates/template.rst diff --git a/blueprints/templates/template.rst b/blueprints/templates/template.rst new file mode 100644 index 000000000..5c2320bca --- /dev/null +++ b/blueprints/templates/template.rst @@ -0,0 +1,93 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +.. + This template should be in ReSTructured text. The filename in the git + repository should match the launchpad URL, for example a URL of + https://bugzilla.redhat.com/show_bug.cgi?id= should be named + .rst . Please do not delete any of the sections in this + template. If you have nothing to say for a whole section, just write: None + For help with syntax, see http://sphinx-doc.org/rest.html + To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html + +=========================== +The title of your blueprint +=========================== + +Introduction paragraph -- why are we doing anything? + +Problem description +=================== + +A detailed description of the problem. + +Proposed change +=============== + +Here is where you cover the change you propose to make in detail. How do you +propose to solve this problem? + +If this is one part of a larger effort make it clear where this piece ends. In +other words, what's the scope of this effort? + +Include where in the Khaleesi tree hierarchy this will reside. + +Alternatives +------------ + +This is an optional section, where it does apply we'd just like a demonstration +that some thought has been put into why the proposed approach is the best one. + +Implementation +============== + +Assignee(s) +----------- + +Who is leading the writing of the code? Or is this a blueprint where you're +throwing it out there to see who picks it up? + +If more than one person is working on the implementation, please designate the +primary author and contact. + +Primary assignee: + + TBD: + +Can optionally can list additional ids if they intend on doing +substantial implementation work on this blueprint. + +Milestones +---------- + +Target Milestone for completion: + + TBD: As Khaleesi has no current 'release cycle' it's hard to project time lines and + allocate resources accordingly. This is something we should discuss. + +Work Items +---------- + +Work items or tasks -- break the feature up into the things that need to be +done to implement it. Those parts might end up being done by different people, +but we're mostly trying to understand the time line for implementation. + +- : + +- : + + ... + +- : + +Dependencies +============ + +- Include specific references to specs and/or blueprints in Khaleesi, or in other + projects, that this one either depends on or is related to. + +- Does this feature require any new library dependencies or code otherwise not + included in OpenStack? Or does it depend on a specific version of library? From a40dd790f8593c59645df826ab96a04b8d37ad1b Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Thu, 31 Dec 2015 15:17:13 +0200 Subject: [PATCH 030/137] Update packstack installer to user rhos-release module Fix call to set literal which might be unsupported on python 2.6 and el6. Change-Id: If1ac0d3fec4631823a35f6b73b5db5b9c550545c --- library/rhos-release.py | 2 +- playbooks/installer/packstack/repo-rhos.yml | 9 +++++++-- settings/product/rhos/repo/common/common.yml | 1 + settings/product/rhos/repo/poodle.yml | 3 --- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/library/rhos-release.py b/library/rhos-release.py index fba05293f..dfa57b80a 100644 --- a/library/rhos-release.py +++ b/library/rhos-release.py @@ -177,7 +177,7 @@ def released(line): installed_releases = map(released, release_lines) if len(installed_releases) > 2 or (len(installed_releases) == 2 and - set(r["channel"] for r in installed_releases) != {"ospd", "core"}): + set(r["channel"] for r in installed_releases) != set(("ospd", "core"))): module.fail_json(msg="Can't handle more than 2 channels. 1 core, 1 ospd. Found %s" % installed_releases) return dict( diff --git a/playbooks/installer/packstack/repo-rhos.yml b/playbooks/installer/packstack/repo-rhos.yml index 69ac5db52..3eb0ce841 100644 --- a/playbooks/installer/packstack/repo-rhos.yml +++ b/playbooks/installer/packstack/repo-rhos.yml @@ -14,8 +14,13 @@ - name: Install release tool on machines command: "yum localinstall -y {{ product.rpm }}" - - name: Execute rhos-release for packstack poodle/puddle - command: "rhos-release {{ product.version.major }} {{ product.repo.rhos_release.extra_args|join(' ') }}" + - name: Get RHOS repo files + rhos-release: + release: "{{ product.version.major }}" + repo_type: "{{ product.repo.type }}" + state: "{{ product.repo.state }}" + distro: "{{ product.repo.distro | default(omit) }}" + dest: "{{ product.repo.dest | default(omit) }}" - name: repolist command: yum -d 7 repolist diff --git a/settings/product/rhos/repo/common/common.yml b/settings/product/rhos/repo/common/common.yml index e348a03a8..8bb8a3696 100644 --- a/settings/product/rhos/repo/common/common.yml +++ b/settings/product/rhos/repo/common/common.yml @@ -1,6 +1,7 @@ product: rpm: !lookup private.distro.rhel.rhos_release_rpm repo: + state: pinned release: latest location: bos mirror: download.eng.{{ !lookup product.repo.location }}.redhat.com diff --git a/settings/product/rhos/repo/poodle.yml b/settings/product/rhos/repo/poodle.yml index 83adea54c..c591fffd2 100644 --- a/settings/product/rhos/repo/poodle.yml +++ b/settings/product/rhos/repo/poodle.yml @@ -4,6 +4,3 @@ product: repo: type: poodle short_type: pod - rhos_release: - extra_args: - - "-d" From 38515c8bb817b75ef876274e46d74c60d68c7d28 Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Sun, 31 Jan 2016 08:47:06 -0500 Subject: [PATCH 031/137] update tempest skip list for ospd7/8 Change-Id: I678479d871b593ec238a8a90154e0b101545574d --- .../rdoci-rhos-7-director-rdo-manager | 4 +++ .../rdoci-rhos-8-director-rdo-manager | 28 ------------------- 2 files changed, 4 insertions(+), 28 deletions(-) diff --git a/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-7-director-rdo-manager b/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-7-director-rdo-manager index e69de29bb..dc63e9f64 100644 --- a/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-7-director-rdo-manager +++ b/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-7-director-rdo-manager @@ -0,0 +1,4 @@ +# rhbz1266947 +-tempest.api.identity.admin.v3 +# rhbz1295561 +-tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image diff --git a/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager b/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager index c4dc18db0..dc63e9f64 100644 --- a/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager +++ b/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager @@ -1,32 +1,4 @@ -# rhbz1253709 --tempest.api.compute.certificates.test_certificates.CertificatesV2TestJSON.test_create_root_certificate --tempest.api.compute.certificates.test_certificates.CertificatesV2TestJSON.test_get_root_certificate -# rhbz1253765 --tempest.api.object_storage.test_container_staticweb.StaticWebTest.test_web_index --tempest.api.object_storage.test_container_staticweb.StaticWebTest --tempest.api.object_storage.test_object_slo.ObjectSloTest.test_delete_large_object --tempest.api.object_storage.test_object_slo.ObjectSloTest.test_list_large_object_metadata --tempest.api.object_storage.test_object_slo.ObjectSloTest.test_retrieve_large_object --tempest.api.object_storage.test_object_slo.ObjectSloTest.test_upload_manifest --tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container --tempest.api.orchestration.stacks.test_swift_resources.SwiftResourcesTestJSON.test_acl --tempest.api.orchestration.stacks.test_swift_resources.SwiftResourcesTestJSON.test_metadata -# rhbz1254938 --tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV1Test.test_volume_backup_create_get_detailed_list_restore_delete --tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV2Test.test_volume_backup_create_get_detailed_list_restore_delete --tempest.api.volume.test_volumes_snapshots.VolumesV1SnapshotTestJSON.test_volume_from_snapshot --tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_volume_from_snapshot # rhbz1266947 -tempest.api.identity.admin.v3 -# rhbz1274308 --tempest.api.object_storage.test_container_services.ContainerTest.test_create_container --tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata -# rhbz1240816 --tempest.scenario.test_volume_boot_pattern -# rhbz1295556 --tempest.api.volume.test_volumes_get # rhbz1295561 -tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image -# rhbz1295565 --tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools --tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools From f3e9d0723446df1e82047c578c8fd2533efcdb00 Mon Sep 17 00:00:00 2001 From: Matt Young Date: Fri, 29 Jan 2016 16:13:05 -0500 Subject: [PATCH 032/137] Update cleanup_virthost.yml to use cleanup role playbooks/installer/rdo-manager/undercloud/cleanup-virthost.yml doesn't exist anymore, it's been relocated to a role. Update this playbook to use the role, to facilitate easily cleaning up a virthost. Change-Id: Ic341a93c8ce6576a1ce5d076b4ed95fbdf7fbb10 --- playbooks/installer/rdo-manager/cleanup_virthost.yml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/playbooks/installer/rdo-manager/cleanup_virthost.yml b/playbooks/installer/rdo-manager/cleanup_virthost.yml index 84b2ed598..7b4e8efe1 100644 --- a/playbooks/installer/rdo-manager/cleanup_virthost.yml +++ b/playbooks/installer/rdo-manager/cleanup_virthost.yml @@ -1,3 +1,10 @@ --- -- include: "{{base_dir}}/khaleesi/playbooks/provision.yml" -- include: "{{base_dir}}/khaleesi/playbooks/installer/rdo-manager/undercloud/cleanup-virthost.yml" +- include: "{{base_dir}}/khaleesi/playbooks/provisioner/manual/main.yml" +- name: clean up rdo-manager virthost + hosts: virthost + vars: + - ansible_ssh_user: root + roles: + - { role: cleanup_nodes/rdo-manager, + when: (installer.type == "rdo-manager" and provisioner.type == "manual") + } From 680e579c90dbb84d35b69d6eca49a245036b6ed4 Mon Sep 17 00:00:00 2001 From: Harry Rybacki Date: Mon, 25 Jan 2016 13:52:27 -0500 Subject: [PATCH 033/137] Adds best practices documentation for Khaleesi Provides Rules and Examples for best practices and coding standards for Khlaeesi Change-Id: Ia51f89a6be6384db0889ddd447006689c5cf1a2b --- .gitignore | 1 + doc/best_practices.rst | 311 +++++++++++++++++++++++++++++++++++++++++ doc/index.rst | 1 + 3 files changed, 313 insertions(+) create mode 100644 doc/best_practices.rst diff --git a/.gitignore b/.gitignore index 7575531ae..5c5b6ac9a 100644 --- a/.gitignore +++ b/.gitignore @@ -34,3 +34,4 @@ instack_hosts doc/_build/ fence_xvm.key vm-host-table +*.swp diff --git a/doc/best_practices.rst b/doc/best_practices.rst new file mode 100644 index 000000000..989e0d1d0 --- /dev/null +++ b/doc/best_practices.rst @@ -0,0 +1,311 @@ +Khaleesi Best Practices Guide +============================= + +The purpose of this guide is to lay out the coding standards and best practices to be applied when +working with Khaleesi. These best practices are specific to Khlaeesi but should be in line with +general `Ansible guidelines `_. + +Each section includes: + * A 'Rule' which states the best practice to apply + * Explanations and notable exceptions + * Examples of code applying the rule and, if applicable, examples of where the exceptions would hold + +General Best Practices +---------------------- + +**Rule: Whitespace and indentation** - Use 4 spaces. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Ensure that you use 4 spaces, not tabs, to separate each level of indentation. + +Examples:: + + # BEST_PRACTICES_APPLIED + - name: set plan values for plan based ceph deployments + shell: > + source {{ instack_user_home }}/stackrc; + openstack management plan set {{ overcloud_uuid }} + -P Controller-1::CinderEnableIscsiBackend=false; + when: installer.deploy.type == 'plan' + + +**Rule: Parameter Format** - Use the YAML dictionary format when 3 or more parameters are being passed. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When several parameters are being passed in a module, it is hard to see exactly what value each +parameter is getting. It is preferable to use the Ansible YAML syntax to pass in parameters so +that it is clear what values are being passed for each parameter. + +Examples:: + + # Step with all arguments passed in one line + - name: create .ssh dir + file: path=/home/{{ provisioner.remote_user }}/.ssh mode=0700 owner=stack group=stack state=directory + + # BEST_PRACTICE_APPLIED + - name: create .ssh dir + file: + path: /home/{{ provisioner.remote_user }}/.ssh + mode: 0700 + owner: stack + group: stack + state: directory + + +**Rule: Line Length** - Keep text under 100 characters per line. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For ease of readability, keep text to a uniform length of 100 characters or less. + +Examples:: + + # BEST_PRACTICE_APPLIED + - name: set plan values for plan based ceph deployments + shell: > + source {{ instack_user_home }}/stackrc; + source {{ instack_user_home }}/deploy-nodesrc; + openstack management plan set {{ overcloud_uuid }} + -P Controller-1::CinderEnableIscsiBackend=false + -P Controller-1::CinderEnableRbdBackend=true + -P Controller-1::GlanceBackend=rbd + -P Compute-1::NovaEnableRbdBackend=true; + when: installer.deploy.type == 'plan' + + +**Rule: Using Quotes** - Use single quotes. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Use single quotes throughout playbooks except when double quotes are required +for ``shell`` commands or enclosing ``when`` statements. + +Examples:: + + # BEST_PRACTICE_APPLIED + - name: get floating ip address + register: floating_ip_result + shell: > + source {{ instack_user_home }}/overcloudrc; + neutron floatingip-show '{{ floating_ip.stdout }}' | grep 'ip_address' | sed -e 's/|//g'; + + # EXCEPTION - shell command uses both single and double quotes + - name: copy instackenv.json to root dir + shell: > + 'ssh -t -o "StrictHostKeyChecking=no" {{ provisioner.host_cloud_user }}@{{ floating_ip.stdout }} \ + "sudo cp /home/{{ provisioner.host_cloud_user }}/instackenv.json /root/instackenv.json"' + when: provisioner.host_cloud_user != 'root' + + # EXCEPTION - enclosing a ``when`` statement + - name: copy instackenv.json to root dir + shell: > + 'ssh -t -o "StrictHostKeyChecking=no" {{ provisioner.host_cloud_user }}@{{ floating_ip.stdout }} \ + "sudo cp /home/{{ provisioner.host_cloud_user }}/instackenv.json /root/instackenv.json"' + when: "provisioner.host_cloud_user != {{ user }}" + + +**Rule: Order of Arguments** - Keep argument order consistent within a playbook. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The order of arguments is:: + + tasks: + - name: + hosts: + sudo: + module: + register: + retries: + delay: + until: + ignore_errors: + with_items: + when: + +.. Warning:: While ``name`` is not required, it is an Ansible best practice, and a Khaleesi best + practice, to `name all tasks `_. + +Examples:: + + # BEST_PRACTICE_APPLIED - polling + - name: poll for heat stack-list to go to COMPLETE + shell: > + source {{ instack_user_home }}/stackrc; + heat stack-list; + register: heat_stack_list_result + retries: 10 + delay: 180 + until: heat_stack_list_result.stdout.find("COMPLETE") != -1 + when: node_to_scale is defined + + # BEST_PRACTICE_APPLIED - looping through items + - name: remove any yum repos not owned by rpm + sudo: yes + shell: rm -Rf /etc/yum.repos.d/{{ item }} + ignore_errors: true + with_items: + - beaker-* + + +**Rule: Adding Workarounds** - Create bug reports and flags for all workarounds. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +More detailed information and examples on working with workarounds in Khaleesi can be found +in the documentation on `Handling Workarounds `_. + + +**Rule: Ansible Modules** - Use Ansible modules over ``shell`` where available. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The generic ``shell`` module should be used only when there is not a suitable Ansible module +available to do the required steps. Use the ``command`` module when a step requires a single +bash command. + +Examples:: + + # BEST_PRACTICE_APPLIED - using Ansible 'git' module rather than 'shell: git clone' + - name: clone openstack-virtual-baremetal repo + git: + repo=https://github.com/cybertron/openstack-virtual-baremetal/ + dest={{instack_user_home}}/openstack-virtual-baremetal + + # BEST_PRACTICE_APPLIED - using Openstack modules that have checks for redundancy or + # existing elements + - name: setup neutron network for floating ips + register: public_network_uuid_result + quantum_network: + state: present + auth_url: '{{ get_auth_url_result.stdout }}' + login_username: admin + login_password: '{{ get_admin_password_result.stdout }}' + login_tenant_name: admin + name: '{{ installer.network.name }}' + provider_network_type: '{{ hw_env.network_type }}' + provider_physical_network: '{{ hw_env.physical_network }}' + provider_segmentation_id: '{{ hw_env.ExternalNetworkVlanID }}' + router_external: yes + shared: no + + # EXCEPTION - using shell as there are no Ansible modules yet for updating nova quotas + - name: set neutron subnet quota to unlimited + ignore_errors: true + shell: > + source {{ instack_user_home }}/overcloudrc; + neutron quota-update --subnet -1; + neutron quota-update --network -1; + + +**Rule: Scripts** - Use scripts rather than shell for lengthy or complex bash operations. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Scripts can hide output details and debugging scripts requires the user to look in multiple +directories for the code involved. Consider using scripts over ``shell`` if the step in Ansible +requires multiple lines (more than ten), involves complex logic, or is called more than once. + +Examples:: + + # BEST_PRACTICE_APPLIED - calling Beaker checkout script, + # keeps the complexity of Beaker provisioning in a standalone script + - name: provision beaker machine with kerberos auth + register: beaker_job_status + shell: > + chdir={{base_dir}}/khaleesi-settings + {{base_dir}}/khaleesi-settings/beakerCheckOut.sh + --arch={{ provisioner.beaker_arch }} + --family={{ provisioner.beaker_family }} + --distro={{ provisioner.beaker_distro }} + --variant={{ provisioner.beaker_variant }} + --hostrequire=hostlabcontroller={{ provisioner.host_lab_controller }} + --task=/CoreOS/rhsm/Install/automatjon-keys + --keyvalue=HVM=1 + --ks_meta=ksdevice=link + --whiteboard={{ provisioner.whiteboard_message }} + --job-group={{ provisioner.beaker_group }} + --machine={{ lookup('env', 'BEAKER_MACHINE') }} + --timeout=720; + async: 7200 + poll: 180 + when: provisioner.beaker_password is not defined + + +**Rule - Roles** - Use roles for generic tasks which are applied across installers, provisioners, or testers. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Roles should be used to avoid code duplication. When using roles, take care to use debug steps and +print appropriate code output to allow users to trace the source of errors since the exact steps +are not visible directly in the playbook run. Please review the `Ansibles official best practices `_ +documentation for more information regarding role structure. + +Examples:: + + # BEST_PRACTICE_APPLIED - validate role that can be used with multiple installers + https://github.com/redhat-openstack/khaleesi/tree/master/roles/validate_openstack + + + +RDO-Manager Specific Best Practices +----------------------------------- + +The following rules apply to RDO-Manager specific playbooks and roles. + + +**Rule: Step Placement** - Place a step under the playbook directory named for where it will be executed. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The RDO-Manager related playbooks have the following directory structure:: + + |-- installer + |-- rdo-manager + |-- overcloud + |-- undercloud + | -- post-deploy + |-- rdo-manager + + +These guidelines are used when deciding where to place new steps: + + * ``undercloud`` - any step that can be executed without the overcloud + * ``overcloud`` - any step that is used to deploy the overcloud + * ``post-deploy`` - always a standalone playbook - steps executed once the overcloud is deployed + + +**Rule: Idempotency** - Any step executed post setup should be idempotent. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +RDO-Manager has some set up steps that cannot be run multiple times without cleaning up the +environment. Any step added after setup should be able to rerun without causing damage. +*Defensive programming* conditions, that check for existence or availability etc. and modify +when or how a step is run, can be added to ensure playbooks remain idempotent. + +Examples:: + + # BEST_PRACTICE_APPLIED - using Ansible modules that check for existing elements + - name: create provisioning network + register: provision_network_uuid_result + quantum_network: + state: present + auth_url: "{{ get_auth_url_result.stdout }}" + login_username: admin + login_password: "{{ get_admin_password_result.stdout }}" + login_tenant_name: admin + name: "{{ tmp.node_prefix }}provision" + + # BEST_PRACTICE_APPLIED - defensive programming, + # ignoring errors from creating a flavor that already exists + - name: create baremetal flavor + shell: > + source {{ instack_user_home }}/overcloudrc; + nova flavor-create baremetal auto 6144 50 2; + ignore_errors: true + + +Applying these Best Practices and Guidelines +-------------------------------------------- + +Before submitting a review for Khaleesi please review your changes to ensure they follow +with the best practices outlined above. + + +Contributing to this Guide +-------------------------- +Additional best practices and suggestions for improvements to the coding standards are welcome. +To contribute to this guide, please review `contribution documentation `_ +and submit a review to `GerritHub `_. diff --git a/doc/index.rst b/doc/index.rst index 754c3c380..c10a9a20c 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -13,6 +13,7 @@ Contents: khaleesi community_guidelines + best_practices development ksgen kcli From 46a431d32542dcb4a74ac9443aa9fe99832c08f9 Mon Sep 17 00:00:00 2001 From: Attila Darazs Date: Mon, 1 Feb 2016 17:55:00 +0100 Subject: [PATCH 034/137] Install rdo-release rpm during rdo production jobs Change-Id: I20c8083e89bb43386f729ad3182954963b7c1e3a --- .../installer/rdo-manager/yum_repos/repo-rdo.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/playbooks/installer/rdo-manager/yum_repos/repo-rdo.yml b/playbooks/installer/rdo-manager/yum_repos/repo-rdo.yml index 80f7ada59..484949d29 100644 --- a/playbooks/installer/rdo-manager/yum_repos/repo-rdo.yml +++ b/playbooks/installer/rdo-manager/yum_repos/repo-rdo.yml @@ -33,6 +33,17 @@ - name: Install epel release command: "yum localinstall -y {{ distro.epel_release }}" +- name: Add the RDO release repos + hosts: "{{ repo_host }}" + vars: + - ansible_ssh_user: root + tasks: + - name: Install rdo-release rpm + yum: + name: "{{ product.rpmrepo[ansible_distribution] }}" + state: present + when: product.repo_type == 'production' + - name: Update packages on the host hosts: "{{ repo_host }}" vars: From 8a214a6f5a24dbefbbb34011a58413a0df821130 Mon Sep 17 00:00:00 2001 From: John Trowbridge Date: Wed, 20 Jan 2016 08:59:35 -0500 Subject: [PATCH 035/137] Remove promote multi-job RDO has created a rdo-infra repo[1] to house the promote job jjb, in order to allow for other projects to participate in gating delorean repo promotion without having to go through khaleesi. Khaleesi jobs will still be allowed in the testing phase of the promote job, but will need to consume prebuilt images (overcloud or undercloud). All jobs that run in the testing phase will have to use and absolute timeout of 2 hours. I have also removed myself as an owner of the khaleesi based jobs. [1] https://github.com/redhat-openstack/rdo-infra Change-Id: I3c41ffb99680639a2827036aaaf318208939350b --- jenkins-jobs/promote.yml | 163 +-------------------------------------- 1 file changed, 3 insertions(+), 160 deletions(-) diff --git a/jenkins-jobs/promote.yml b/jenkins-jobs/promote.yml index 3c96fd5b9..34e036510 100644 --- a/jenkins-jobs/promote.yml +++ b/jenkins-jobs/promote.yml @@ -13,28 +13,12 @@ - timestamps - workspace-cleanup - timeout: - type: elastic - elastic-percentage: 300 - elastic-default-timeout: 360 - timeout: 360 + type: absolute + timeout: 120 + fail: true publishers: - default-publishers -- defaults: - name: parent-promote-defaults - description: | -

Documentation: http://khaleesi.readthedocs.org/en/master/

- - concurrent: false - node: khaleesi - logrotate: - daysToKeep: 5 - artifactDaysToKeep: 5 - wrappers: - - ansicolor - - timestamps - - workspace-cleanup - - job-template: name: 'packstack-promote-{product}-{product-version}' defaults: rdo-manager-defaults @@ -88,136 +72,11 @@ - ownership: owner: whayutin at redhat.com co-owners: - - trown at redhat.com - adarazs at redhat.com publishers: - default-publishers - tests-publishers -- job-template: - name: 'promote-get-hash' - defaults: script-defaults - builders: - - shell: - !include-raw-escape: - - scripts/centos-liberty.sh - - scripts/promote-get-hash.sh - properties: - - ownership: - owner: whayutin at redhat.com - co-owners: - - trown at redhat.com - -- job-template: - name: 'promote-upload' - defaults: script-defaults - builders: - - shell: - !include-raw-escape: - - scripts/centos-liberty.sh - - scripts/promote-upload-images.sh - properties: - - ownership: - owner: whayutin at redhat.com - co-owners: - - trown at redhat.com - -- job-template: - name: 'promote-execute-promote-centos-liberty' - defaults: script-defaults - builders: - - shell: - !include-raw-escape: - - scripts/centos-liberty.sh - - scripts/promote-execute-promote.sh - properties: - - ownership: - owner: whayutin at redhat.com - co-owners: - - trown at redhat.com - - -- job-template: - name: rdo-delorean-promote-liberty - project-type: multijob - triggers: - - timed: "H */8 * * *" - defaults: parent-promote-defaults - builders: - - phase-get-hash - - phase-test-build - - phase-test-import - - phase-upload - - phase-execute-promote-centos-liberty - properties: - - ownership: - owner: whayutin@redhat.com - -- project: - name: rdo-manager-promote-jobs - jobs: - - rdo-delorean-promote-liberty - -- builder: - name: phase-get-hash - builders: - - multijob: - name: "GET THE LATEST DELOREAN YUM REPOSITORY HASH" - condition: SUCCESSFUL - projects: - - name: promote-get-hash - -- builder: - name: phase-test-build - builders: - - multijob: - name: "INSTALL / TEST (BUILD IMAGES)" - condition: UNSTABLE - projects: - - name: rdo-manager-promote-rdo-liberty-minimal_no_ceph-build_rdo_promote - kill-phase-on: NEVER - property-file: /tmp/delorean_current_hash - - name: packstack-promote-rdo-liberty - kill-phase-on: NEVER - property-file: /tmp/delorean_current_hash - -- builder: - name: phase-test-import - builders: - - multijob: - name: "INSTALL / TEST (IMPORT IMAGES)" - condition: UNSTABLE - projects: - - name: rdo-manager-promote-rdo-liberty-minimal_no_ceph-build - kill-phase-on: NEVER - property-file: /tmp/delorean_current_hash - - name: rdo-manager-promote-rdo-liberty-minimal_no_ceph-import_rdo_overcloud - kill-phase-on: NEVER - property-file: /tmp/delorean_current_hash - - name: rdo-manager-promote-rdo-liberty-minimal_ha_no_ceph-import_rdo_overcloud - kill-phase-on: NEVER - property-file: /tmp/delorean_current_hash - -- builder: - name: phase-upload - builders: - - multijob: - name: "UPLOAD IMAGES TO FILE SERVER" - condition: SUCCESSFUL - projects: - - name: promote-upload - property-file: /tmp/delorean_current_hash - -- builder: - name: phase-execute-promote-centos-liberty - builders: - - multijob: - name: "UPLOAD IMAGES TO FILE SERVER" - condition: SUCCESSFUL - projects: - - name: promote-execute-promote-centos-liberty - property-file: /tmp/delorean_current_hash - - project: name: rdo-promote-jobs installer: rdo_manager @@ -259,19 +118,3 @@ pin: latest jobs: - 'packstack-promote-{product}-{product-version}' - - -- project: - name: promote-get-hash - jobs: - - promote-get-hash - -- project: - name: promote-upload - jobs: - - promote-upload - -- project: - name: promote-execute-promote-centos-liberty - jobs: - - promote-execute-promote-centos-liberty From 40197ce143ffc7d4e8c38a5ed94ca7f4ae713c3c Mon Sep 17 00:00:00 2001 From: Mathieu Bultel Date: Wed, 27 Jan 2016 14:53:35 +0100 Subject: [PATCH 036/137] Change pinned version for GA release Change-Id: I30de8766958190505f23ffc86fabeb69b03e3af6 --- settings/product/rhos/version/7_director/build/ga_71.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/settings/product/rhos/version/7_director/build/ga_71.yml b/settings/product/rhos/version/7_director/build/ga_71.yml index bc102751e..6ac12a2fa 100644 --- a/settings/product/rhos/version/7_director/build/ga_71.yml +++ b/settings/product/rhos/version/7_director/build/ga_71.yml @@ -3,10 +3,10 @@ product: build: ga build_version: ga-7.1 repo: - puddle_pin_version: 'GA' - poodle_pin_version: 'GA' + puddle_pin_version: 'Z2' + poodle_pin_version: 'Z2' core_product_version: 7 - puddle_director_pin_version: 'GA' + puddle_director_pin_version: 'Y1' installer: images: From 660ffe0a5994a4f15e0612bd69e744ceb2dc5cb1 Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Fri, 29 Jan 2016 18:13:14 -0500 Subject: [PATCH 037/137] break up the overcloud playbooks into logical steps breakup the overcloud playbooks into logical steps The goal w/ this patch is to start breaking down playbooks into independent and idempotent steps as much as possible. The breakdown of the overcloud should follow the tripleo docs as much as possible. Note, overcloud/register-nodes, overcloud/introspect-nodes is terminology that matches directly with the tripleo documentation. In this change you will find parts of the basic and advanced deployment topics. Another benefit of breaking the deployment playbooks into their corresponding doc steps is that we will be able to add new features to the deployment e.g. "Deploying Manila" or "Configuring Cinder with a netapp backend" with out distrupting the current playbooks. Additionally this enables a user to step through the automation along side the documentation which should allow developers and users to retry steps, plug in development code, or rollback a step if required. There are many ways to enable a step by step deployment, after considering the options we have concluded that using a directory structure with a 1-1 mapping to the docs and the yaml description of the doc placed directly in the playbooks offers the greatest transparency to the reader. Change-Id: I99d23a5dacc57163cb5e057c566de5198f941a9f --- .../break-out-overcloud-playbooks.rst | 88 +++++ .../rdo-manager/advanced-profile-matching.yml | 2 + .../rdo-manager/deploy-overcloud-execute.yml | 3 + ...loy-overcloud-prep-template-deployment.yml | 3 + ...ploy-overcloud-prep-tuskar-deployments.yml | 3 + .../installer/rdo-manager/heat-templates.yml | 2 + .../installer/rdo-manager/images/main.yml | 1 + .../installer/rdo-manager/images/run.yml | 7 +- .../installer/rdo-manager/images/upload.yml | 12 + .../rdo-manager/introspect-nodes.yml | 2 + .../installer/rdo-manager/nameserver.yml | 2 + .../openstack-virtual-baremetal/main.yml | 0 .../openstack-virtual-baremetal/run.yml | 44 +++ .../advanced-profile-matching/main.yml | 3 + .../advanced-profile-matching/post.yml | 13 + .../run-matching-ahc.yml | 2 +- .../run-matching-basic.yml | 0 .../advanced-profile-matching/run.yml | 2 + .../overcloud/ansible-inventory.yml | 50 +++ .../overcloud/deploy-overcloud/main.yml | 4 + .../overcloud/deploy-overcloud/plan/main.yml | 2 + .../overcloud/deploy-overcloud/plan/run.yml | 27 ++ .../overcloud/deploy-overcloud/pre.yml | 46 +++ .../overcloud/deploy-overcloud/run.yml | 34 ++ .../deploy-overcloud/templates/main.yml | 2 + .../deploy-overcloud/templates/run.yml | 29 ++ .../rdo-manager/overcloud/flavors/README | 1 + .../overcloud/heat-templates/README | 0 .../overcloud/heat-templates/main.yml | 3 + .../heat-templates/pre-baremetal.yml | 21 + .../overcloud/heat-templates/pre-virthost.yml | 26 ++ .../overcloud/heat-templates/run.yml | 57 +++ .../overcloud/introspect-nodes/main.yml | 2 + .../overcloud/introspect-nodes/run.yml | 58 +++ .../installer/rdo-manager/overcloud/main.yml | 9 +- .../rdo-manager/overcloud/nameserver/main.yml | 2 + .../rdo-manager/overcloud/nameserver/run.yml | 22 ++ .../overcloud/register-nodes/main.yml | 2 + .../overcloud/register-nodes/run.yml | 34 ++ .../installer/rdo-manager/overcloud/run.yml | 363 ------------------ .../rdo-manager/overcloud/status.yml | 62 +-- .../installer/rdo-manager/register-nodes.yml | 2 + .../templates/edeploy-state.j2 | 0 43 files changed, 627 insertions(+), 420 deletions(-) create mode 100644 blueprints/templates/break-out-overcloud-playbooks.rst create mode 100644 playbooks/installer/rdo-manager/advanced-profile-matching.yml create mode 100644 playbooks/installer/rdo-manager/deploy-overcloud-execute.yml create mode 100644 playbooks/installer/rdo-manager/deploy-overcloud-prep-template-deployment.yml create mode 100644 playbooks/installer/rdo-manager/deploy-overcloud-prep-tuskar-deployments.yml create mode 100644 playbooks/installer/rdo-manager/heat-templates.yml create mode 100644 playbooks/installer/rdo-manager/images/upload.yml create mode 100644 playbooks/installer/rdo-manager/introspect-nodes.yml create mode 100644 playbooks/installer/rdo-manager/nameserver.yml create mode 100644 playbooks/installer/rdo-manager/openstack-virtual-baremetal/main.yml create mode 100644 playbooks/installer/rdo-manager/openstack-virtual-baremetal/run.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/main.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/post.yml rename playbooks/installer/rdo-manager/overcloud/{ => advanced-profile-matching}/run-matching-ahc.yml (97%) rename playbooks/installer/rdo-manager/overcloud/{ => advanced-profile-matching}/run-matching-basic.yml (100%) create mode 100644 playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/run.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/ansible-inventory.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/deploy-overcloud/main.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/deploy-overcloud/plan/main.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/deploy-overcloud/plan/run.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/deploy-overcloud/pre.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/deploy-overcloud/run.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/deploy-overcloud/templates/main.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/deploy-overcloud/templates/run.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/flavors/README create mode 100644 playbooks/installer/rdo-manager/overcloud/heat-templates/README create mode 100644 playbooks/installer/rdo-manager/overcloud/heat-templates/main.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/heat-templates/pre-baremetal.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/heat-templates/pre-virthost.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/introspect-nodes/main.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/introspect-nodes/run.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/nameserver/main.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/nameserver/run.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/register-nodes/main.yml create mode 100644 playbooks/installer/rdo-manager/overcloud/register-nodes/run.yml delete mode 100644 playbooks/installer/rdo-manager/overcloud/run.yml create mode 100644 playbooks/installer/rdo-manager/register-nodes.yml rename playbooks/installer/rdo-manager/{overcloud => }/templates/edeploy-state.j2 (100%) diff --git a/blueprints/templates/break-out-overcloud-playbooks.rst b/blueprints/templates/break-out-overcloud-playbooks.rst new file mode 100644 index 000000000..2201c4664 --- /dev/null +++ b/blueprints/templates/break-out-overcloud-playbooks.rst @@ -0,0 +1,88 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +.. + This template should be in ReSTructured text. The filename in the git + repository should match the launchpad URL, for example a URL of + https://bugzilla.redhat.com/show_bug.cgi?id= should be named + .rst . Please do not delete any of the sections in this + template. If you have nothing to say for a whole section, just write: None + For help with syntax, see http://sphinx-doc.org/rest.html + To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html + +=========================== +Break out overcloud playbooks to match tripleo documentation +=========================== + +Introduction paragraph -- why are we doing anything? + +Problem description +=================== + +Originally spec'd out as a requirement for dell/dci integration in December 2015. +Search Google docs for dci_dell_khaleesi integration. + +Integration: +------------ +How can 3rd parties inject a custom workflow? At the moment 3rd parties to CI +are not able to inject requirements into the ci w/o making a change directly to the code path. + +Integration: +------------ +Any 3rd party changes are difficult to integrate and test. The complete matrix of +gates must be executed for any change. + +Time to results: +---------------- +Breaking out a deployment into two parts undercloud and overcloud is not sufficient +when users want to deploy a cloud by hand. If there is an issue one must start from +the beginning. + +Proposed change +=============== + +The change will breakout the overcloud playbooks to match the sections as described in [1]. +A user can follow the code in the playbooks and match it directly to documentation. + +[1] http://docs.openstack.org/developer/tripleo-docs/ + +Alternatives +------------ + +none propoposed. + +Implementation +============== + +Assignee(s) +----------- +- wes hayutin +- harry rybacki + + +Milestones +---------- + +Target Milestone for completion: + + - create directory structure that matches the tripleo documentation + - move the content of the playbooks into the new playbooks + - test virt, and baremetal deployments + - test puddle and poodle jobs + +Work Items +---------- + + - create directory structure that matches the tripleo documentation + - move the content of the playbooks into the new playbooks + - test virt, and baremetal deployments + - test puddle and poodle jobs + + +Dependencies +============ + +none diff --git a/playbooks/installer/rdo-manager/advanced-profile-matching.yml b/playbooks/installer/rdo-manager/advanced-profile-matching.yml new file mode 100644 index 000000000..d6420466b --- /dev/null +++ b/playbooks/installer/rdo-manager/advanced-profile-matching.yml @@ -0,0 +1,2 @@ +--- +- include: overcloud/advanced-profile-matching/main.yml diff --git a/playbooks/installer/rdo-manager/deploy-overcloud-execute.yml b/playbooks/installer/rdo-manager/deploy-overcloud-execute.yml new file mode 100644 index 000000000..6f1cf347d --- /dev/null +++ b/playbooks/installer/rdo-manager/deploy-overcloud-execute.yml @@ -0,0 +1,3 @@ +--- +- include: overcloud/deploy-overcloud/run.yml +- include: overcloud/deploy-overcloud/status.yml diff --git a/playbooks/installer/rdo-manager/deploy-overcloud-prep-template-deployment.yml b/playbooks/installer/rdo-manager/deploy-overcloud-prep-template-deployment.yml new file mode 100644 index 000000000..0c40b7220 --- /dev/null +++ b/playbooks/installer/rdo-manager/deploy-overcloud-prep-template-deployment.yml @@ -0,0 +1,3 @@ +--- +- include: overcloud/deploy-overcloud/pre.yml +- include: "overcloud/deploy-overcloud/{{ installer.deploy.type | default('templates') }}/main.yml" diff --git a/playbooks/installer/rdo-manager/deploy-overcloud-prep-tuskar-deployments.yml b/playbooks/installer/rdo-manager/deploy-overcloud-prep-tuskar-deployments.yml new file mode 100644 index 000000000..0c40b7220 --- /dev/null +++ b/playbooks/installer/rdo-manager/deploy-overcloud-prep-tuskar-deployments.yml @@ -0,0 +1,3 @@ +--- +- include: overcloud/deploy-overcloud/pre.yml +- include: "overcloud/deploy-overcloud/{{ installer.deploy.type | default('templates') }}/main.yml" diff --git a/playbooks/installer/rdo-manager/heat-templates.yml b/playbooks/installer/rdo-manager/heat-templates.yml new file mode 100644 index 000000000..f2f836207 --- /dev/null +++ b/playbooks/installer/rdo-manager/heat-templates.yml @@ -0,0 +1,2 @@ +--- +- include: overcloud/heat-templates/main.yml diff --git a/playbooks/installer/rdo-manager/images/main.yml b/playbooks/installer/rdo-manager/images/main.yml index f01787f39..120b3f6aa 100644 --- a/playbooks/installer/rdo-manager/images/main.yml +++ b/playbooks/installer/rdo-manager/images/main.yml @@ -1,2 +1,3 @@ --- - include: run.yml +- include: upload.yml diff --git a/playbooks/installer/rdo-manager/images/run.yml b/playbooks/installer/rdo-manager/images/run.yml index 87a5e0462..f527c8e5b 100644 --- a/playbooks/installer/rdo-manager/images/run.yml +++ b/playbooks/installer/rdo-manager/images/run.yml @@ -100,7 +100,7 @@ when: installer.overcloud_images is defined and installer.overcloud_images == "import" -- name: prep and upload images into glance +- name: prep images for glance hosts: undercloud tasks: - name: untar the overcloud images @@ -119,8 +119,3 @@ - name: list the files in overcloud_images command: ls -la {{ instack_user_home }}/overcloud_images/ - - name: prepare for overcloud by loading the images into glance - shell: > - source {{ instack_user_home }}/stackrc; - pushd {{ instack_user_home }}/overcloud_images; - openstack overcloud image upload diff --git a/playbooks/installer/rdo-manager/images/upload.yml b/playbooks/installer/rdo-manager/images/upload.yml new file mode 100644 index 000000000..65ac2d66e --- /dev/null +++ b/playbooks/installer/rdo-manager/images/upload.yml @@ -0,0 +1,12 @@ +--- +- name: upload images into glance + hosts: undercloud + tasks: + - name: list the files in overcloud_images + command: ls -la {{ instack_user_home }}/overcloud_images/ + + - name: prepare for overcloud by loading the images into glance + shell: > + source {{ instack_user_home }}/stackrc; + pushd {{ instack_user_home }}/overcloud_images; + openstack overcloud image upload diff --git a/playbooks/installer/rdo-manager/introspect-nodes.yml b/playbooks/installer/rdo-manager/introspect-nodes.yml new file mode 100644 index 000000000..9c73d34a8 --- /dev/null +++ b/playbooks/installer/rdo-manager/introspect-nodes.yml @@ -0,0 +1,2 @@ +--- +- include: overcloud/introspect-nodes/main.yml diff --git a/playbooks/installer/rdo-manager/nameserver.yml b/playbooks/installer/rdo-manager/nameserver.yml new file mode 100644 index 000000000..31d2a201e --- /dev/null +++ b/playbooks/installer/rdo-manager/nameserver.yml @@ -0,0 +1,2 @@ +--- +- include: overcloud/nameserver/main.yml diff --git a/playbooks/installer/rdo-manager/openstack-virtual-baremetal/main.yml b/playbooks/installer/rdo-manager/openstack-virtual-baremetal/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/playbooks/installer/rdo-manager/openstack-virtual-baremetal/run.yml b/playbooks/installer/rdo-manager/openstack-virtual-baremetal/run.yml new file mode 100644 index 000000000..fd9c6206e --- /dev/null +++ b/playbooks/installer/rdo-manager/openstack-virtual-baremetal/run.yml @@ -0,0 +1,44 @@ +--- +- name: Set up for custom template deploy with nova change + hosts: undercloud:&openstack_virtual_baremetal + tasks: + - name: clone openstack-virtual-baremetal repo + git: + repo=https://github.com/cybertron/openstack-virtual-baremetal/ + dest={{instack_user_home}}/openstack-virtual-baremetal + + - name: pin openstack virtual baremetal to a specific hash + shell: > + chdir={{instack_user_home}}/openstack-virtual-baremetal + git reset --hard {{ installer.custom_deploy.ovb_pin_version }} + when: installer.custom_deploy.ovb_pin_version is defined + + - name: copy tripleo-heat-templates to custom + shell: > + cp -r /usr/share/openstack-tripleo-heat-templates/ {{ instack_user_home }}/custom + + - name: add the necessary hieradata configuration + shell: > + echo "neutron::agents::ml2::ovs::firewall_driver: neutron.agent.firewall.NoopFirewallDriver" >> {{ instack_user_home }}/custom/puppet/hieradata/common.yaml + + - name: create param.ini file + local_action: shell echo "DNS_SERVER={{ hw_env.dns_server }}" > {{ base_dir }}/param.ini + + - name: check that param.ini file exists + local_action: wait_for path="{{ base_dir }}/param.ini" + + - name: add other variables to param.ini file + local_action: shell echo -e "PARENT_WORKSPACE_DIR={{ base_dir }}\nREMOTE_FILE_SERVER={{ installer.custom_deploy.image.remote_file_server }}\nIMAGE_NAME={{ installer.custom_deploy.image.name }}\nPROVISION_CIDR={{ installer.custom_deploy.host_cloud_networks.provision.cidr }}\nPRIVATE_CIDR={{ installer.custom_deploy.host_cloud_networks.private.cidr }}\nPUBLIC_CIDR={{ installer.custom_deploy.host_cloud_networks.public.cidr }}" >> {{ base_dir }}/param.ini + + - name: get ctlplane subnet uuid + register: ctlplane_subnet_uuid + shell: > + source {{ instack_user_home }}/stackrc; + neutron net-show ctlplane -f value -F subnets; + when: installer.env.type == "virthost" + + - name: update dns server on ctlplane + shell: > + source {{ instack_user_home }}/stackrc; + neutron subnet-update {{ ctlplane_subnet_uuid.stdout }} --dns_nameservers list=true {{ hw_env.dns_server }} + when: installer.env.type == "virthost" diff --git a/playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/main.yml b/playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/main.yml new file mode 100644 index 000000000..5d4805e6a --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/main.yml @@ -0,0 +1,3 @@ +--- +- include: run.yml +- include: post.yml diff --git a/playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/post.yml b/playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/post.yml new file mode 100644 index 000000000..299338217 --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/post.yml @@ -0,0 +1,13 @@ +--- +- name: wait for nova + hosts: undercloud + tasks: + - name: wait until nova becomes aware of first bare metal instance + shell: > + source {{ instack_user_home }}/stackrc; + nova hypervisor-stats | grep ' vcpus ' | head -n1 | awk '{ print $4; }' + register: vcpu_count_single + retries: 20 + delay: 15 + until: vcpu_count_single.stdout|int > 0 + ignore_errors: true diff --git a/playbooks/installer/rdo-manager/overcloud/run-matching-ahc.yml b/playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/run-matching-ahc.yml similarity index 97% rename from playbooks/installer/rdo-manager/overcloud/run-matching-ahc.yml rename to playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/run-matching-ahc.yml index d25015e4f..03603b043 100644 --- a/playbooks/installer/rdo-manager/overcloud/run-matching-ahc.yml +++ b/playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/run-matching-ahc.yml @@ -15,7 +15,7 @@ - name: create edeploy state file sudo: yes template: - src=templates/edeploy-state.j2 + src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/edeploy-state.j2 dest=/etc/ahc-tools/edeploy/state force=yes mode=0644 diff --git a/playbooks/installer/rdo-manager/overcloud/run-matching-basic.yml b/playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/run-matching-basic.yml similarity index 100% rename from playbooks/installer/rdo-manager/overcloud/run-matching-basic.yml rename to playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/run-matching-basic.yml diff --git a/playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/run.yml b/playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/run.yml new file mode 100644 index 000000000..63e63b0ae --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/advanced-profile-matching/run.yml @@ -0,0 +1,2 @@ +--- +- include: run-matching-{{ installer.match_style | default('basic') }}.yml diff --git a/playbooks/installer/rdo-manager/overcloud/ansible-inventory.yml b/playbooks/installer/rdo-manager/overcloud/ansible-inventory.yml new file mode 100644 index 000000000..ff6db4090 --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/ansible-inventory.yml @@ -0,0 +1,50 @@ +--- +- name: Post deploy + hosts: undercloud + tasks: + - name: copy the undercloud id_rsa key back to the slave + fetch: src=~/.ssh/id_rsa dest="{{ base_dir }}/khaleesi/id_rsa_undercloud" flat=yes + + - name: copy get-overcloud-nodes.py to undercloud + template: > + src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/get-overcloud-nodes.py.j2 + dest={{ instack_user_home }}/get-overcloud-nodes.py + mode=0755 + + - name: fetch overcloud node names and IPs + shell: > + source {{ instack_user_home }}/stackrc; + python {{ instack_user_home }}/get-overcloud-nodes.py + register: overcloud_nodes + ignore_errors: yes + + - name: add each overcloud node to ansible + add_host: + name={{ item.key }} + groups=overcloud + ansible_ssh_host={{ item.key }} + ansible_fqdn={{ item.value }} + ansible_ssh_user="heat-admin" + ansible_ssh_private_key_file="{{ lookup('env', 'PWD') }}/id_rsa_undercloud" + ignore_errors: yes + with_dict: overcloud_nodes.stdout + +- name: regenerate the inventory file after adding hosts + hosts: localhost + tasks: + - name: set_fact for undercloud ip #required for regeneration of ssh.config.ansible + set_fact: undercloud_ip={{ hostvars['undercloud']['ansible_default_ipv4']['address'] }} + + - name: create inventory from template + template: + dest: "{{ lookup('env', 'PWD') }}/{{ tmp.node_prefix }}hosts" + src: "{{ base_dir }}/khaleesi/playbooks/provisioner/templates/inventory.j2" + + - name: symlink inventory to a static name + file: + dest: "{{ lookup('env', 'PWD') }}/hosts" + state: link + src: "{{ lookup('env', 'PWD') }}/{{ tmp.node_prefix }}hosts" + + - name: regenerate ssh config + template: src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/ssh_config.j2 dest={{ base_dir }}/khaleesi/ssh.config.ansible mode=0755 diff --git a/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/main.yml b/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/main.yml new file mode 100644 index 000000000..c2f1bf0cf --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/main.yml @@ -0,0 +1,4 @@ +--- +- include: pre.yml +- include: "{{ installer.deploy.type | default('templates') }}/main.yml" +- include: run.yml diff --git a/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/plan/main.yml b/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/plan/main.yml new file mode 100644 index 000000000..f01787f39 --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/plan/main.yml @@ -0,0 +1,2 @@ +--- +- include: run.yml diff --git a/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/plan/run.yml b/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/plan/run.yml new file mode 100644 index 000000000..04d54503c --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/plan/run.yml @@ -0,0 +1,27 @@ +- name: setup deployment for a tuskar (plan) style deployment + hosts: undercloud + tasks: + - name: get plan list + shell: > + source {{ instack_user_home }}/stackrc; + openstack management plan list | grep overcloud | cut -d " " -f2 + register: overcloud_uuid_result + when: installer.deploy.type == 'plan' + + - name: set fact for openstack management plan + set_fact: + overcloud_uuid: "{{ overcloud_uuid_result.stdout }}" + when: installer.deploy.type == 'plan' + + - name: set plan values for plan based ceph deployments + shell: > + source {{ instack_user_home }}/stackrc; + source {{ instack_user_home }}/deploy-nodesrc; + if [ "$CEPHSTORAGESCALE" -gt "0" ]; then + openstack management plan set {{ overcloud_uuid }} \ + -P Controller-1::CinderEnableIscsiBackend=false \ + -P Controller-1::CinderEnableRbdBackend=true \ + -P Controller-1::GlanceBackend=rbd \ + -P Compute-1::NovaEnableRbdBackend=true; + fi + when: installer.deploy.type == 'plan' diff --git a/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/pre.yml b/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/pre.yml new file mode 100644 index 000000000..3ca620990 --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/pre.yml @@ -0,0 +1,46 @@ +- name: prepare for the overcloud deployment + hosts: undercloud + tasks: + - name: get ironic node ids (workaround for bz 1246641) + shell: > + source {{ instack_user_home }}/stackrc; + ironic node-list | grep 'None' | awk '{ print $2; }' + register: ironic_node_ids + when: workarounds.enabled is defined and workarounds.enabled|bool + + - name: power off ironic nodes (workaround for bz 1246641) + shell: > + source {{ instack_user_home }}/stackrc; + ironic node-set-power-state {{item}} 'off' + with_items: ironic_node_ids.stdout_lines + when: workarounds.enabled is defined and workarounds.enabled|bool + + - name: get number of nodes that could be used for the overcloud + shell: > + if [ -f {{ instack_user_home }}/instackenv.json ]; then + cat {{ instack_user_home }}/instackenv.json | grep -o pm_addr | wc -l + else + cat {{ installer.nodes.node_count | default('3') }} + fi + register: number_of_possible_nodes + + - name: poll for nodes to be in powered off state + shell: > + source {{ instack_user_home }}/stackrc; + ironic node-list | grep 'power off' | wc -l + register: ironic_node_power_off + retries: 10 + until: ironic_node_power_off.stdout == number_of_possible_nodes.stdout + + - name: copy template file with environment variables for overcloud nodes + template: + src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/deploy-nodes.j2 + dest={{ instack_user_home }}/deploy-nodesrc + mode=0755 + + - name: copy template file with environment variables for overcloud deploy + template: + src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/deploy-overcloudrc.j2 + dest={{ instack_user_home }}/deploy-overcloudrc + mode=0755 + diff --git a/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/run.yml b/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/run.yml new file mode 100644 index 000000000..10ee86c5a --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/run.yml @@ -0,0 +1,34 @@ +--- +- name: deploy the overcloud + hosts: undercloud + tasks: + - name: echo deploy command + shell: > + source {{ instack_user_home }}/stackrc; + source {{ instack_user_home }}/deploy-nodesrc; + source {{ instack_user_home }}/deploy-overcloudrc; + echo $DEPLOY_COMMAND + register: overcloud_deploy_command + + - name: deploy-overcloud + shell: > + source {{ instack_user_home }}/stackrc; + {{ overcloud_deploy_command.stdout }} &> overcloud_deployment_console.log + register: overcloud_deployment_result + ignore_errors: yes + + - name: echo deploy-overcloud return code + debug: var=overcloud_deployment_result.rc + + - name: heat stack-list + shell: > + source {{ instack_user_home }}/stackrc; + heat stack-list + ignore_errors: yes + + - name: overcloud deployment logs + debug: msg=" Please refer to the undercloud log file for detailed status. The deployment debug logs are stored under /home/stack" + + - name: set fact overcloud_deployment_result + set_fact: + overcloud_deployment_result: "{{ overcloud_deployment_result.rc }}" diff --git a/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/templates/main.yml b/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/templates/main.yml new file mode 100644 index 000000000..f01787f39 --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/templates/main.yml @@ -0,0 +1,2 @@ +--- +- include: run.yml diff --git a/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/templates/run.yml b/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/templates/run.yml new file mode 100644 index 000000000..74c0f11ca --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/deploy-overcloud/templates/run.yml @@ -0,0 +1,29 @@ +- name: setup deployment for a heat templated (templates) style deployment + hosts: undercloud + tasks: + - name: echo deploy command + shell: > + source {{ instack_user_home }}/stackrc; + source {{ instack_user_home }}/deploy-nodesrc; + source {{ instack_user_home }}/deploy-overcloudrc; + echo $DEPLOY_COMMAND + register: overcloud_deploy_command + + - name: find the env files to be used in deploy + shell: > + echo {{ overcloud_deploy_command.stdout }} | grep -o -e '\-e .*yaml' | sed s'/\-e //g' | sed s'#[A-Z a-z 0-9 _ -]*\.yaml##g' + register: env_files + + - name: clone template validation tools + git: + repo=https://github.com/openstack/tripleo-heat-templates.git + dest={{instack_user_home}}/tripleo-heat-templates + + - name: validate the yaml files + shell: > + chdir={{instack_user_home}} + python tripleo-heat-templates/tools/yaml-validate.py {{ item }} + register: validate_yaml_output + with_items: env_files.stdout.split('\n') + failed_when: validate_yaml_output.stdout.find('Validation failed on') != -1 + diff --git a/playbooks/installer/rdo-manager/overcloud/flavors/README b/playbooks/installer/rdo-manager/overcloud/flavors/README new file mode 100644 index 000000000..aaa0999a6 --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/flavors/README @@ -0,0 +1 @@ +#See advanced-profile-matching diff --git a/playbooks/installer/rdo-manager/overcloud/heat-templates/README b/playbooks/installer/rdo-manager/overcloud/heat-templates/README new file mode 100644 index 000000000..e69de29bb diff --git a/playbooks/installer/rdo-manager/overcloud/heat-templates/main.yml b/playbooks/installer/rdo-manager/overcloud/heat-templates/main.yml new file mode 100644 index 000000000..16ce46cee --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/heat-templates/main.yml @@ -0,0 +1,3 @@ +--- +- include: "pre-{{ installer.env.type }}.yml" +- include: run.yml diff --git a/playbooks/installer/rdo-manager/overcloud/heat-templates/pre-baremetal.yml b/playbooks/installer/rdo-manager/overcloud/heat-templates/pre-baremetal.yml new file mode 100644 index 000000000..0e9ec9118 --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/heat-templates/pre-baremetal.yml @@ -0,0 +1,21 @@ +--- +- name: Copy over and modify network config template + hosts: undercloud + tasks: + - name: check that network config file exists + stat: > + path="{{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/network_configs/{{ installer.network.isolation }}/{{ installer.network.isolation }}.yml" + when: installer.network.isolation != 'none' + + #the long line in this task fails when broken up + - name: copy over template file (baremetal) + synchronize: > + src={{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/network_configs/{{ installer.network.isolation }}/{{ installer.network.isolation }}.yml + dest={{ instack_user_home }}/network-environment.yaml + when: installer.network.isolation != 'none' + + - name: copy over common environment file (baremetal) + synchronize: > + src={{base_dir}}/khaleesi-settings/hardware_environments/common/plan-parameter-neutron-bridge.yaml + dest={{ instack_user_home }}/plan-parameter-neutron-bridge.yaml + when: installer.network.isolation != 'none' and installer.deploy.type == 'plan' diff --git a/playbooks/installer/rdo-manager/overcloud/heat-templates/pre-virthost.yml b/playbooks/installer/rdo-manager/overcloud/heat-templates/pre-virthost.yml new file mode 100644 index 000000000..0695c3eea --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/heat-templates/pre-virthost.yml @@ -0,0 +1,26 @@ +--- +- name: Copy over and modify network config template + hosts: undercloud + tasks: + - name: check that network config file exists + stat: > + path="{{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/network_configs/{{ installer.network.isolation }}/{{ installer.network.isolation }}.yml" + when: installer.network.isolation != 'none' + + #the long line in this task fails when broken up + - name: copy over template file (virt) + local_action: > + shell pushd {{ base_dir }}/khaleesi; rsync --delay-updates -F --compress --archive --rsh \ + "ssh -F ssh.config.ansible -S none -o StrictHostKeyChecking=no" \ + {{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/network_configs/{{ installer.network.isolation }}/{{ installer.network.isolation }}.yml \ + undercloud:{{ instack_user_home }}/network-environment.yaml + when: installer.network.isolation != 'none' + + #the long line in this task fails when broken up + - name: copy over common environment file (virt) + local_action: > + shell pushd {{ base_dir }}/khaleesi; rsync --delay-updates -F --compress --archive --rsh \ + "ssh -F ssh.config.ansible -S none -o StrictHostKeyChecking=no" \ + {{base_dir}}/khaleesi-settings/hardware_environments/common/plan-parameter-neutron-bridge.yaml undercloud:{{ instack_user_home }}/plan-parameter-neutron-bridge.yaml + when: installer.network.isolation != 'none' and installer.deploy.type == 'plan' + diff --git a/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml b/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml new file mode 100644 index 000000000..207cfd06b --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml @@ -0,0 +1,57 @@ +--- +- name: Copy over and modify network config template + hosts: undercloud + tasks: + - name: make a nic-configs dir + file: path={{ instack_user_home }}/nic-configs state=directory + when: installer.network.isolation != 'none' + + #the long line in this task fails when broken up + - name: copy over standard nic-configs default directory + shell: > + cp /usr/share/openstack-tripleo-heat-templates/network/config/{{ installer.network.isolation | replace('_', '-') }}/*.yaml {{ instack_user_home }}/nic-configs + when: installer.network.isolation != 'none' and installer.network.isolation != 'default' + + #the long line in this task fails when broken up + - name: check if env-specific nic-configs exist + local_action: > + stat path={{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/network_configs/{{ installer.network.isolation }}/nic-configs/ + register: nic_config_dir + when: installer.network.isolation != 'none' + + #the long line in this task fails when broken up + - name: copy nic-configs saved version if available + synchronize: > + src={{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/network_configs/{{ installer.network.isolation }}/nic-configs/{{ item }}.yaml + dest={{ instack_user_home }}/nic-configs + with_items: + - controller + - compute + - ceph-storage + - cinder-storage + - swift-storage + when: installer.network.isolation != 'none' and nic_config_dir.stat.exists == True + + - name: poll for files to exist + wait_for: path={{ instack_user_home }}/nic-configs/swift-storage.yaml + when: installer.network.isolation != 'none' and installer.network.isolation != 'default' + + - name: Check for additional network config files + local_action: > + shell ls "{{ base_dir }}/khaleesi-settings/hardware_environments/{{ hw_env.env_type }}/network_configs/{{ installer.network.isolation }}/" + register: nic_configs + + - debug: var=nic_configs.stdout_lines + + - name: Custom Network config for node profiles + synchronize: > + src={{ base_dir }}/khaleesi-settings/hardware_environments/{{ hw_env.env_type }}/network_configs/{{ installer.network.isolation }}/{{ item }} + dest=/home/stack/nic-configs/ + ignore_errors: yes + with_items: + - controller.yaml + - compute.yaml + - cinder-storage.yaml + - swift-storage.yaml + - ceph-storage.yaml + when: item in nic_configs.stdout_lines diff --git a/playbooks/installer/rdo-manager/overcloud/introspect-nodes/main.yml b/playbooks/installer/rdo-manager/overcloud/introspect-nodes/main.yml new file mode 100644 index 000000000..f01787f39 --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/introspect-nodes/main.yml @@ -0,0 +1,2 @@ +--- +- include: run.yml diff --git a/playbooks/installer/rdo-manager/overcloud/introspect-nodes/run.yml b/playbooks/installer/rdo-manager/overcloud/introspect-nodes/run.yml new file mode 100644 index 000000000..219bbaf8d --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/introspect-nodes/run.yml @@ -0,0 +1,58 @@ +--- +- name: introspect nodes + hosts: undercloud + tasks: + - name: get full list of node UUIDs + shell: > + source {{ instack_user_home }}/stackrc; + ironic node-list | grep 'power' | awk '{print $2}' + register: ironic_node_full_list_uuid + + - name: start bulk introspection + shell: > + source {{ instack_user_home }}/stackrc; + openstack baremetal introspection bulk start; + when: installer.introspection_method == 'bulk' + + - name: introspect node by node + shell: > + source {{ instack_user_home }}/stackrc; + ironic node-set-maintenance {{ item }} true; + openstack baremetal introspection start {{ item }}; + export STATUS=$(openstack baremetal introspection status {{ item }} | grep 'finished'); + while [[ $STATUS != *"True"* ]]; do + echo "Waiting for instrospection to complete."; + sleep 180; + export STATUS=$(openstack baremetal introspection status {{ item }} | grep 'finished'); + done; + openstack baremetal introspection status {{ item }} | grep 'error' + register: introspect_status + retries: 3 + delay: 5 + until: introspect_status.stdout.find("None") != -1 + with_items: ironic_node_full_list_uuid.stdout_lines + when: installer.introspection_method == 'node_by_node' + + - name: set maintenance status to false + shell: > + source {{ instack_user_home }}/stackrc; + ironic node-set-maintenance {{ item }} False + with_items: ironic_node_full_list_uuid.stdout_lines + when: installer.introspection_method == 'node_by_node' + + - name: check instrospections status + register: introspection_result + retries: 45 + delay: 20 + until: introspection_result.rc == 0 + shell: | + source {{ instack_user_home }}/stackrc + OUTPUT=$(openstack baremetal introspection bulk status) + TOTAL_NODES=$(echo "$OUTPUT" | grep -E '\w{8}-\w{4}' | wc -l) + INTROSPECTED_NODES=$(echo "$OUTPUT" | grep -E ' True *\| *None ' | wc -l) + [ "$TOTAL_NODES" == "$INTROSPECTED_NODES" ] + + - name: show profile + shell: > + source {{ instack_user_home }}/stackrc; + instack-ironic-deployment --show-profile; diff --git a/playbooks/installer/rdo-manager/overcloud/main.yml b/playbooks/installer/rdo-manager/overcloud/main.yml index b85681e17..cf2ad8fa8 100644 --- a/playbooks/installer/rdo-manager/overcloud/main.yml +++ b/playbooks/installer/rdo-manager/overcloud/main.yml @@ -1,3 +1,10 @@ --- -- include: run.yml +- include: register-nodes/main.yml +- include: introspect-nodes/main.yml +- include: advanced-profile-matching/main.yml +- include: heat-templates/main.yml +#note: ovb {{ base_dir }}/khaleesi/installer/rdo-manager/installer/openstack-virtual-baremetal/main.yml +- include: nameserver/main.yml +- include: deploy-overcloud/main.yml +- include: ansible-inventory.yml - include: status.yml diff --git a/playbooks/installer/rdo-manager/overcloud/nameserver/main.yml b/playbooks/installer/rdo-manager/overcloud/nameserver/main.yml new file mode 100644 index 000000000..f01787f39 --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/nameserver/main.yml @@ -0,0 +1,2 @@ +--- +- include: run.yml diff --git a/playbooks/installer/rdo-manager/overcloud/nameserver/run.yml b/playbooks/installer/rdo-manager/overcloud/nameserver/run.yml new file mode 100644 index 000000000..98c4857ec --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/nameserver/run.yml @@ -0,0 +1,22 @@ +- name: deploy the overcloud + hosts: undercloud + tasks: + - name: get subnet uuid + shell: > + source {{ instack_user_home }}/stackrc; + neutron subnet-list | grep {{ hw_env.network }} | sed -e 's/|//g' | awk '{print $1}' + register: subnet_uuid + when: hw_env.env_type is defined and hw_env.env_type in ['ovb_host_cloud', 'scale_lab'] + + - name: get nameserver + sudo: yes + shell: > + cat /etc/resolv.conf | grep -m 1 'nameserver' | sed -n -e 's/^.*nameserver //p' + register: nameserver + when: hw_env.env_type is defined and hw_env.env_type in ['ovb_host_cloud', 'scale_lab'] + + - name: configure a nameserver for the overcloud + shell: > + source {{ instack_user_home }}/stackrc; + neutron subnet-update {{ subnet_uuid.stdout }} --dns-nameserver {{ nameserver.stdout }} + when: hw_env.env_type is defined and hw_env.env_type in ['ovb_host_cloud', 'scale_lab'] diff --git a/playbooks/installer/rdo-manager/overcloud/register-nodes/main.yml b/playbooks/installer/rdo-manager/overcloud/register-nodes/main.yml new file mode 100644 index 000000000..f01787f39 --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/register-nodes/main.yml @@ -0,0 +1,2 @@ +--- +- include: run.yml diff --git a/playbooks/installer/rdo-manager/overcloud/register-nodes/run.yml b/playbooks/installer/rdo-manager/overcloud/register-nodes/run.yml new file mode 100644 index 000000000..39f986bf4 --- /dev/null +++ b/playbooks/installer/rdo-manager/overcloud/register-nodes/run.yml @@ -0,0 +1,34 @@ +--- +- name: register nodes + hosts: undercloud + tasks: + - name: register bm nodes with openstack cli + shell: > + source {{ instack_user_home }}/stackrc; + openstack baremetal import --json instackenv.json; + register: register_nodes_result + retries: 10 + delay: 10 + until: register_nodes_result.rc == 0 + + - name: register bm nodes with ironic + shell: > + source {{ instack_user_home }}/stackrc; + openstack baremetal configure boot + register: register_nodes_result + retries: 10 + delay: 10 + until: register_nodes_result.rc == 0 + + - name: get nodes UUID + shell: > + source {{ instack_user_home }}/stackrc; + ironic node-list | grep 'power' | awk '{print $2}' | tail -3 + register: ironic_node_list_uuid + + - name: update nodes with disk size hint + shell: > + source {{ instack_user_home }}/stackrc; + ironic node-update {{ item }} add properties/root_device='{"size": {{ hw_env.disk_root_device_size | int }}}' + with_items: ironic_node_list_uuid.stdout_lines + when: (hw_env is defined) and (hw_env.disk_root_device_size is defined) and product.full_version == '8-director' diff --git a/playbooks/installer/rdo-manager/overcloud/run.yml b/playbooks/installer/rdo-manager/overcloud/run.yml deleted file mode 100644 index a0d284851..000000000 --- a/playbooks/installer/rdo-manager/overcloud/run.yml +++ /dev/null @@ -1,363 +0,0 @@ ---- -- name: register and discover nodes - hosts: undercloud - tasks: - - name: register bm nodes with openstack cli - register: register_nodes_result - retries: 10 - delay: 10 - until: register_nodes_result.rc == 0 - shell: > - source {{ instack_user_home }}/stackrc; - openstack baremetal import --json instackenv.json; - - - - name: register bm nodes with ironic - register: register_nodes_result - retries: 10 - delay: 10 - until: register_nodes_result.rc == 0 - shell: > - source {{ instack_user_home }}/stackrc; - openstack baremetal configure boot - - - name: get nodes UUID - shell: > - source {{ instack_user_home }}/stackrc; - ironic node-list | grep 'power' | awk '{print $2}' | tail -3 - register: ironic_node_list_uuid - - - name: update nodes with disk size hint - shell: > - source {{ instack_user_home }}/stackrc; - ironic node-update {{ item }} add properties/root_device='{"size": {{ hw_env.disk_root_device_size | int }}}' - with_items: ironic_node_list_uuid.stdout_lines - when: (hw_env is defined) and (hw_env.disk_root_device_size is defined) and product.full_version == '8-director' - - - name: get full list of node UUIDs - shell: > - source {{ instack_user_home }}/stackrc; - ironic node-list | grep 'power' | awk '{print $2}' - register: ironic_node_full_list_uuid - - - name: start bulk introspection - shell: > - source {{ instack_user_home }}/stackrc; - openstack baremetal introspection bulk start; - when: installer.introspection_method == 'bulk' - - - name: introspect node by node - shell: > - source {{ instack_user_home }}/stackrc; - ironic node-set-maintenance {{ item }} true; - openstack baremetal introspection start {{ item }}; - export STATUS=$(openstack baremetal introspection status {{ item }} | grep 'finished'); - while [[ $STATUS != *"True"* ]]; do - echo "Waiting for instrospection to complete."; - sleep 180; - export STATUS=$(openstack baremetal introspection status {{ item }} | grep 'finished'); - done; - openstack baremetal introspection status {{ item }} | grep 'error' - register: introspect_status - retries: 3 - delay: 5 - until: introspect_status.stdout.find("None") != -1 - with_items: ironic_node_full_list_uuid.stdout_lines - when: installer.introspection_method == 'node_by_node' - - - name: set maintenance status to false - shell: > - source {{ instack_user_home }}/stackrc; - ironic node-set-maintenance {{ item }} False - with_items: ironic_node_full_list_uuid.stdout_lines - when: installer.introspection_method == 'node_by_node' - - - name: check instrospections status - register: introspection_result - retries: 45 - delay: 20 - until: introspection_result.rc == 0 - shell: | - source {{ instack_user_home }}/stackrc - OUTPUT=$(openstack baremetal introspection bulk status) - TOTAL_NODES=$(echo "$OUTPUT" | grep -E '\w{8}-\w{4}' | wc -l) - INTROSPECTED_NODES=$(echo "$OUTPUT" | grep -E ' True *\| *None ' | wc -l) - [ "$TOTAL_NODES" == "$INTROSPECTED_NODES" ] - - - name: show profile - shell: > - source {{ instack_user_home }}/stackrc; - instack-ironic-deployment --show-profile; - -- include: run-matching-{{ installer.match_style | default('basic') }}.yml - -- name: wait for nova - hosts: undercloud - tasks: - - name: wait until nova becomes aware of first bare metal instance - register: vcpu_count_single - retries: 20 - delay: 15 - until: vcpu_count_single.stdout|int > 0 - ignore_errors: true - shell: > - source {{ instack_user_home }}/stackrc; - nova hypervisor-stats | grep ' vcpus ' | head -n1 | awk '{ print $4; }' - -- name: Copy over and modify network config template - hosts: undercloud - tasks: - - name: check that network config file exists - stat: path="{{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/network_configs/{{ installer.network.isolation }}/{{ installer.network.isolation }}.yml" - when: installer.network.isolation != 'none' - - - name: copy over template file (baremetal) - synchronize: > - src={{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/network_configs/{{ installer.network.isolation }}/{{ installer.network.isolation }}.yml dest={{ instack_user_home }}/network-environment.yaml - when: installer.network.isolation != 'none' and installer.env.type != "virthost" - - - name: copy over template file (virt) - local_action: shell pushd {{ base_dir }}/khaleesi; rsync --delay-updates -F --compress --archive --rsh "ssh -F ssh.config.ansible -S none -o StrictHostKeyChecking=no" {{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/network_configs/{{ installer.network.isolation }}/{{ installer.network.isolation }}.yml undercloud:{{ instack_user_home }}/network-environment.yaml - when: installer.network.isolation != 'none' and installer.env.type == "virthost" - - - name: copy over common environment file (baremetal) - synchronize: > - src={{base_dir}}/khaleesi-settings/hardware_environments/common/plan-parameter-neutron-bridge.yaml dest={{ instack_user_home }}/plan-parameter-neutron-bridge.yaml - when: installer.network.isolation != 'none' and installer.env.type != "virthost" and installer.deploy.type == 'plan' - - - name: copy over common environment file (virt) - local_action: shell pushd {{ base_dir }}/khaleesi; rsync --delay-updates -F --compress --archive --rsh "ssh -F ssh.config.ansible -S none -o StrictHostKeyChecking=no" {{base_dir}}/khaleesi-settings/hardware_environments/common/plan-parameter-neutron-bridge.yaml undercloud:{{ instack_user_home }}/plan-parameter-neutron-bridge.yaml - when: installer.network.isolation != 'none' and installer.env.type == "virthost" and installer.deploy.type == 'plan' - - - name: make a nic-configs dir - shell: > - mkdir {{ instack_user_home }}/nic-configs - when: installer.network.isolation != 'none' - - - name: copy over standard nic-configs default directory - shell: > - cp /usr/share/openstack-tripleo-heat-templates/network/config/{{ installer.network.isolation | replace('_', '-') }}/*.yaml {{ instack_user_home }}/nic-configs - when: installer.network.isolation != 'none' and installer.network.isolation != 'default' - - - name: check if env-specific nic-configs exist - local_action: stat path={{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/network_configs/{{ installer.network.isolation }}/nic-configs/ - register: nic_config_dir - when: installer.network.isolation != 'none' and installer.env.type != 'virthost' - - - name: copy nic-configs saved version if available - synchronize: > - src={{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/network_configs/{{ installer.network.isolation }}/nic-configs/{{ item }}.yaml - dest={{ instack_user_home }}/nic-configs - with_items: - - controller - - compute - - ceph-storage - - cinder-storage - - swift-storage - when: installer.network.isolation != 'none' and installer.env.type != "virthost" and nic_config_dir.stat.exists == True - - - name: poll for files to exist - wait_for: path={{ instack_user_home }}/nic-configs/swift-storage.yaml - when: installer.network.isolation != 'none' and installer.network.isolation != 'default' - - - name: Check for additional network config files - local_action: shell ls "{{ base_dir }}/khaleesi-settings/hardware_environments/{{ hw_env.env_type }}/network_configs/{{ installer.network.isolation }}/" - register: nic_configs - - - debug: var=nic_configs.stdout_lines - - - name: Custom Network config for node profiles - synchronize: > - src={{ base_dir }}/khaleesi-settings/hardware_environments/{{ hw_env.env_type }}/network_configs/{{ installer.network.isolation }}/{{ item }} dest=/home/stack/nic-configs/ - ignore_errors: yes - with_items: - - controller.yaml - - compute.yaml - - cinder-storage.yaml - - swift-storage.yaml - - ceph-storage.yaml - when: item in nic_configs.stdout_lines - -- name: Set up for custom template deploy with nova change - hosts: undercloud:&openstack_virtual_baremetal - tasks: - - name: clone openstack-virtual-baremetal repo - git: - repo=https://github.com/cybertron/openstack-virtual-baremetal/ - dest={{instack_user_home}}/openstack-virtual-baremetal - - - name: pin openstack virtual baremetal to a specific hash - shell: > - chdir={{instack_user_home}}/openstack-virtual-baremetal - git reset --hard {{ installer.custom_deploy.ovb_pin_version }} - when: installer.custom_deploy.ovb_pin_version is defined - - - name: copy tripleo-heat-templates to custom - shell: > - cp -r /usr/share/openstack-tripleo-heat-templates/ {{ instack_user_home }}/custom - - - name: add the necessary hieradata configuration - shell: > - echo "neutron::agents::ml2::ovs::firewall_driver: neutron.agent.firewall.NoopFirewallDriver" >> {{ instack_user_home }}/custom/puppet/hieradata/common.yaml - - - name: create param.ini file - local_action: shell echo "DNS_SERVER={{ hw_env.dns_server }}" > {{ base_dir }}/param.ini - - - name: check that param.ini file exists - local_action: wait_for path="{{ base_dir }}/param.ini" - - - name: add other variables to param.ini file - local_action: shell echo -e "PARENT_WORKSPACE_DIR={{ base_dir }}\nREMOTE_FILE_SERVER={{ installer.custom_deploy.image.remote_file_server }}\nIMAGE_NAME={{ installer.custom_deploy.image.name }}\nPROVISION_CIDR={{ installer.custom_deploy.host_cloud_networks.provision.cidr }}\nPRIVATE_CIDR={{ installer.custom_deploy.host_cloud_networks.private.cidr }}\nPUBLIC_CIDR={{ installer.custom_deploy.host_cloud_networks.public.cidr }}" >> {{ base_dir }}/param.ini - - - name: get ctlplane subnet uuid - register: ctlplane_subnet_uuid - shell: > - source {{ instack_user_home }}/stackrc; - neutron net-show ctlplane -f value -F subnets; - when: installer.env.type == "virthost" - - - name: update dns server on ctlplane - shell: > - source {{ instack_user_home }}/stackrc; - neutron subnet-update {{ ctlplane_subnet_uuid.stdout }} --dns_nameservers list=true {{ hw_env.dns_server }} - when: installer.env.type == "virthost" - -- name: deploy the overcloud - hosts: undercloud - tasks: - - name: get ironic node ids (workaround for bz 1246641) - shell: > - source {{ instack_user_home }}/stackrc; - ironic node-list | grep 'None' | awk '{ print $2; }' - register: ironic_node_ids - when: workarounds.enabled is defined and workarounds.enabled|bool - - - name: power off ironic nodes (workaround for bz 1246641) - shell: > - source {{ instack_user_home }}/stackrc; - ironic node-set-power-state {{item}} 'off' - with_items: ironic_node_ids.stdout_lines - when: workarounds.enabled is defined and workarounds.enabled|bool - - - name: get number of nodes that could be used for the overcloud - shell: > - if [ -f {{ instack_user_home }}/instackenv.json ]; then - cat {{ instack_user_home }}/instackenv.json | grep -o pm_addr | wc -l - else - cat {{ installer.nodes.node_count | default('3') }} - fi - register: number_of_possible_nodes - - - name: poll for nodes to be in powered off state - register: ironic_node_power_off - retries: 10 - shell: > - source {{ instack_user_home }}/stackrc; - ironic node-list | grep 'power off' | wc -l - until: ironic_node_power_off.stdout == number_of_possible_nodes.stdout - - - name: get subnet uuid - shell: > - source {{ instack_user_home }}/stackrc; - neutron subnet-list | grep {{ hw_env.network }} | sed -e 's/|//g' | awk '{print $1}' - register: subnet_uuid - when: hw_env.env_type is defined and hw_env.env_type in ['ovb_host_cloud', 'scale_lab'] - - - name: get nameserver - shell: > - cat /etc/resolv.conf | grep -m 1 'nameserver' | sed -n -e 's/^.*nameserver //p' - register: nameserver - sudo: yes - when: hw_env.env_type is defined and hw_env.env_type in ['ovb_host_cloud', 'scale_lab'] - - - name: configure a nameserver for the overcloud - shell: > - source {{ instack_user_home }}/stackrc; - neutron subnet-update {{ subnet_uuid.stdout }} --dns-nameserver {{ nameserver.stdout }} - when: hw_env.env_type is defined and hw_env.env_type in ['ovb_host_cloud', 'scale_lab'] - - - - name: get plan list - register: overcloud_uuid_result - shell: > - source {{ instack_user_home }}/stackrc; - openstack management plan list | grep overcloud | cut -d " " -f2 - - - name: set fact for openstack management plan - set_fact: - overcloud_uuid: "{{ overcloud_uuid_result.stdout }}" - - - name: copy template file with environment variables for overcloud nodes - template: - src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/deploy-nodes.j2 - dest={{ instack_user_home }}/deploy-nodesrc - mode=0755 - - - name: copy template file with environment variables for overcloud deploy - template: - src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/deploy-overcloudrc.j2 - dest={{ instack_user_home }}/deploy-overcloudrc - mode=0755 - - - name: set plan values for plan based ceph deployments - shell: > - source {{ instack_user_home }}/stackrc; - source {{ instack_user_home }}/deploy-nodesrc; - if [ "$CEPHSTORAGESCALE" -gt "0" ]; then - openstack management plan set {{ overcloud_uuid }} \ - -P Controller-1::CinderEnableIscsiBackend=false \ - -P Controller-1::CinderEnableRbdBackend=true \ - -P Controller-1::GlanceBackend=rbd \ - -P Compute-1::NovaEnableRbdBackend=true; - fi - when: installer.deploy.type == 'plan' - - - name: echo deploy command - register: overcloud_deploy_command - shell: > - source {{ instack_user_home }}/stackrc; - source {{ instack_user_home }}/deploy-nodesrc; - source {{ instack_user_home }}/deploy-overcloudrc; - echo $DEPLOY_COMMAND - - - name: find the env files to be used in deploy - register: env_files - shell: > - echo {{ overcloud_deploy_command.stdout }} | grep -o -e '\-e .*yaml' | sed s'/\-e //g' | sed s'#[A-Z a-z 0-9 _ -]*\.yaml##g' - - - name: clone template validation tools - git: - repo=https://github.com/openstack/tripleo-heat-templates.git - dest={{instack_user_home}}/tripleo-heat-templates - - - name: validate the yaml files - shell: > - chdir={{instack_user_home}} - python tripleo-heat-templates/tools/yaml-validate.py {{ item }} - register: validate_yaml_output - failed_when: validate_yaml_output.stdout.find('Validation failed on') != -1 - with_items: env_files.stdout.split('\n') - - - name: deploy-overcloud - register: overcloud_deployment_result - ignore_errors: yes - shell: > - source {{ instack_user_home }}/stackrc; - {{ overcloud_deploy_command.stdout }} &> overcloud_deployment_console.log - - - name: echo deploy-overcloud return code - debug: var=overcloud_deployment_result.rc - - - name: heat stack-list - ignore_errors: yes - shell: > - source {{ instack_user_home }}/stackrc; - heat stack-list - - - name: overcloud deployment logs - debug: msg=" Please refer to the undercloud log file for detailed status. The deployment debug logs are stored under /home/stack" - - - name: set fact overcloud_deployment_result - set_fact: - overcloud_deployment_result: "{{ overcloud_deployment_result.rc }}" - diff --git a/playbooks/installer/rdo-manager/overcloud/status.yml b/playbooks/installer/rdo-manager/overcloud/status.yml index a8a468905..f60addee6 100644 --- a/playbooks/installer/rdo-manager/overcloud/status.yml +++ b/playbooks/installer/rdo-manager/overcloud/status.yml @@ -2,6 +2,10 @@ - name: Post deploy hosts: undercloud tasks: + - name: set fact overcloud_deployment_result + set_fact: + overcloud_deployment_result: "{{ overcloud_deployment_result | default('1') }}" + - name: echo deploy-overcloud return code in status playbook debug: var=overcloud_deployment_result @@ -18,26 +22,27 @@ openstack server list; - name: heat debug deploy-overcloud failure - when: overcloud_deployment_result is defined and overcloud_deployment_result != "0" ignore_errors: yes shell: > source {{ instack_user_home }}/stackrc; heat resource-list overcloud; heat event-list overcloud; + when: overcloud_deployment_result is defined and overcloud_deployment_result != "0" - name: debug deploy-overcloud failure - when: overcloud_deployment_result is defined and overcloud_deployment_result != "0" ignore_errors: yes shell: > source {{ instack_user_home }}/stackrc; heat resource-show overcloud ControllerNodesPostDeployment; + when: overcloud_deployment_result is defined and overcloud_deployment_result != "0" - name: debug all deployment failures - when: overcloud_deployment_result is defined and overcloud_deployment_result != "0" ignore_errors: yes shell: > source {{ instack_user_home }}/stackrc; - for failed_deployment in $(heat resource-list --nested-depth 5 overcloud | grep FAILED | grep 'StructuredDeployment ' | cut -d '|' -f3); do heat deployment-show $failed_deployment; done; + for failed_deployment in $(heat resource-list --nested-depth 5 overcloud | grep FAILED | grep 'StructuredDeployment ' | cut -d '|' -f3); \ + do heat deployment-show $failed_deployment; done; + when: overcloud_deployment_result is defined and overcloud_deployment_result != "0" - name: grep for errors in heat-engine.log when: overcloud_deployment_result is defined and overcloud_deployment_result != "0" @@ -52,57 +57,16 @@ ignore_errors: yes - name: show ironic nodes create template - template: src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/show_nodes.sh dest={{ instack_user_home }}/show_nodes.sh mode=0755 + template: > + src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/show_nodes.sh + dest={{ instack_user_home }}/show_nodes.sh + mode=0755 when: overcloud_deployment_result is defined and overcloud_deployment_result == "0" - name: show ironic nodes shell: "{{ instack_user_home }}/show_nodes.sh" when: overcloud_deployment_result is defined and overcloud_deployment_result == "0" - - name: copy the undercloud id_rsa key back to the slave - fetch: src=~/.ssh/id_rsa dest="{{ base_dir }}/khaleesi/id_rsa_undercloud" flat=yes - - - name: copy get-overcloud-nodes.py to undercloud - template: src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/get-overcloud-nodes.py.j2 dest={{ instack_user_home }}/get-overcloud-nodes.py mode=0755 - - - name: fetch overcloud node names and IPs - register: overcloud_nodes - ignore_errors: yes - shell: > - source {{ instack_user_home }}/stackrc; - python {{ instack_user_home }}/get-overcloud-nodes.py - - - name: add each overcloud node to ansible - with_dict: overcloud_nodes.stdout - ignore_errors: yes - add_host: - name={{ item.key }} - groups=overcloud - ansible_ssh_host={{ item.key }} - ansible_fqdn={{ item.value }} - ansible_ssh_user="heat-admin" - ansible_ssh_private_key_file="{{ lookup('env', 'PWD') }}/id_rsa_undercloud" - -- name: regenerate the inventory file after adding hosts - hosts: localhost - tasks: - - name: set_fact for undercloud ip #required for regeneration of ssh.config.ansible - set_fact: undercloud_ip={{ hostvars['undercloud']['ansible_default_ipv4']['address'] }} - - - name: create inventory from template - template: - dest: "{{ lookup('env', 'PWD') }}/{{ tmp.node_prefix }}hosts" - src: "{{ base_dir }}/khaleesi/playbooks/provisioner/templates/inventory.j2" - - - name: symlink inventory to a static name - file: - dest: "{{ lookup('env', 'PWD') }}/hosts" - state: link - src: "{{ lookup('env', 'PWD') }}/{{ tmp.node_prefix }}hosts" - - - name: regenerate ssh config - template: src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/ssh_config.j2 dest={{ base_dir }}/khaleesi/ssh.config.ansible mode=0755 - - name: debug output from the overcloud controller hosts: overcloud-controller-0 gather_facts: no diff --git a/playbooks/installer/rdo-manager/register-nodes.yml b/playbooks/installer/rdo-manager/register-nodes.yml new file mode 100644 index 000000000..1358ee44d --- /dev/null +++ b/playbooks/installer/rdo-manager/register-nodes.yml @@ -0,0 +1,2 @@ +--- +- include: overcloud/register-nodes/main.yml diff --git a/playbooks/installer/rdo-manager/overcloud/templates/edeploy-state.j2 b/playbooks/installer/rdo-manager/templates/edeploy-state.j2 similarity index 100% rename from playbooks/installer/rdo-manager/overcloud/templates/edeploy-state.j2 rename to playbooks/installer/rdo-manager/templates/edeploy-state.j2 From 3c3f152343d709fa48bac299e82782a119f685cc Mon Sep 17 00:00:00 2001 From: Arie Bregman Date: Sat, 30 Jan 2016 22:22:32 +0200 Subject: [PATCH 038/137] Improve 'component testing' documentation This introduce the following changes: * Document the only requirement needed for running component tests. * Fix functional examples, since functional no longer require OpenStack installation. Change-Id: Ie51a7f0b9a7e45c4f0c9b5d827a66f52e838e7e3 --- doc/khaleesi.rst | 60 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 57 insertions(+), 3 deletions(-) diff --git a/doc/khaleesi.rst b/doc/khaleesi.rst index b478e9e9b..8ef71a613 100644 --- a/doc/khaleesi.rst +++ b/doc/khaleesi.rst @@ -363,6 +363,61 @@ up the environment needed for running the tests: Testers are passed to the ksgen CLI as '--tester=' argument value: pep8, unittest, functional, integration, api, tempest +Requirements: + +There is only one requirement and it's to have an jenkins-config yml file in +the root of the component directory. For example, if the component is neutron, +then there should be an neutron/jenkins-config.yml file. The name may differ +and can be set by using --extra-vars tester.component.config_file in ksgen +invocation. + +The structure of an jenkins-config should be similar to: + +----------------------- jenkins-config sample beginning------------------------ +# Khaleesi will read and execute this section only if --tester=pep8 included in ksgen invocation +pep8: + rpm_deps: [ python-neutron, python-hacking, pylint ] + remove_rpm: [] + run: tox --sitepackages -v -e pep8 2>&1 | tee ../logs/testrun.log; + +# Khaleesi will read and execute this section only if --tester=unittest included in ksgen invocation +unittest: + rpm_deps: [ python-neutron, python-cliff ] + remove_rpm: [] + run: tox --sitepackages -v -e py27 2>&1 | tee ../logs/testrun.log; + +# Common RPMs that are used by all the testers +rpm_deps: [ gcc, git, "{{ hostvars[inventory_hostname][tester.component.tox_target]['rpm_deps'] }}" ] + +# The RPMs that shouldn't be installed when running tests, no matter which tester chosen +remove_rpm: [ "{{ hostvars[inventory_hostname][tester.component.tox_target]['remove_rpm'] }}" ] + +# Common pre-run steps for all testers +neutron_virt_run_config: + run: > + set -o pipefail; + rpm -qa > installed-rpms.txt; + truncate --size 0 requirements.txt && truncate --size 0 test-requirements.txt; + {{ hostvars[inventory_hostname][tester.component.tox_target]['run'] }} + +# Files to archive + archive: + - ../logs/testrun.log + - installed-rpms.txt + +# Main section that will be read by khaleesi +test_config: + virt: + RedHat-7: + setup: + install: "{{ rpm_deps }}" # Optional. When you would like to install requirements + remove: "{{ remove_rpm }}" # Optional. When you would like to remove packages + run: "{{ neutron_virt_run_config.run }}" # A must. The actual command used to run the tests + archive: "{{ neutron_virt_run_config.archive }}" # A must. Files to archive +----------------------- jenkins-config sample end ------------------------ + +Usage: + Below are examples on how to use the different testers: To run pep8 you would use the following ksgen invocation: @@ -399,10 +454,9 @@ To run functional tests, you would use: --provisioner-site=qeos \ --distro=rhel-7.2 \ --product=rhos \ - --installer=packstack \ - --installer-config=full \ # To install single component use basic_neutron + --installer=project \ + --installer-component=heat \ --tester=functional \ - --installer-component=neutron ksgen_settings.yml To run API in-tree tests, you would use: From 0143bdbe768dc6d314190875a4af2afe5abf4ed0 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Fri, 29 Jan 2016 18:19:34 +0100 Subject: [PATCH 039/137] Explicit requirement of git for integration tests Remove the requirement of git package from the set of required packages for the Horizon integration tests and add it explicitly in the code, as it is needed by the Ansible git module. Change-Id: I9ec24fce0fd2b4e119bdeaa875219a0e8cf91322 --- playbooks/tester/integration/pre.yml | 3 +++ settings/tester/integration/component/horizon.yml | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/playbooks/tester/integration/pre.yml b/playbooks/tester/integration/pre.yml index 9ca821d4c..520f81023 100644 --- a/playbooks/tester/integration/pre.yml +++ b/playbooks/tester/integration/pre.yml @@ -20,6 +20,9 @@ - yum: name={{ item }} state=present with_items: tester.packages + # requirements of Ansible git modules + - yum: name=git state=present + - yum: name=python-virtualenv state=present when: "tester.pip_packages is defined and tester.pip_packages|length > 0" diff --git a/settings/tester/integration/component/horizon.yml b/settings/tester/integration/component/horizon.yml index 30da5e1b5..2bccae96a 100644 --- a/settings/tester/integration/component/horizon.yml +++ b/settings/tester/integration/component/horizon.yml @@ -41,7 +41,6 @@ tester: - python-virtualenv - firefox - unzip - - git - python-keystoneclient - xorg-x11-server-Xvfb - xorg-x11-font* From 8baec26e79f7cfc83d5926f3468b3f5bb88ac2ad Mon Sep 17 00:00:00 2001 From: Harry Rybacki Date: Thu, 28 Jan 2016 13:35:16 -0500 Subject: [PATCH 040/137] Allow for project tests with manual provisioner At some point this functionality was lost. Modifying settings to allow users to run tests on a single node with a manual provisioner. Note: Sample ksgen call ksgen --config-dir settings generate --provisioner=manual \ --product=rhos --product-repo=poodle --product-version=7.0 \ --distro=rhel-7.2 --installer=project --installer-component=heat \ --tester=unittest --provisioner-topology=single_node \ --extra-vars @../khaleesi-settings/settings/product/rhos/private_settings/redhat_internal.yml \ ksgen_settings.yml Change-Id: Ic8eab9ddf7da32bd1c79c67bf237b491786e5eda --- playbooks/provisioner/manual/main.yml | 2 +- settings/provisioner/manual/topology/single_node.yml | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 settings/provisioner/manual/topology/single_node.yml diff --git a/playbooks/provisioner/manual/main.yml b/playbooks/provisioner/manual/main.yml index 9b974fd8e..71c789716 100644 --- a/playbooks/provisioner/manual/main.yml +++ b/playbooks/provisioner/manual/main.yml @@ -10,7 +10,7 @@ with_dict: provisioner.nodes - name: Add the host to the inventory - when: installer.type in ['rdo-manager'] + when: installer.type in ['rdo-manager', 'project'] add_host: name="{{ item.value.name }}" groups="{{ item.value.groups diff --git a/settings/provisioner/manual/topology/single_node.yml b/settings/provisioner/manual/topology/single_node.yml new file mode 100644 index 000000000..02b3797ea --- /dev/null +++ b/settings/provisioner/manual/topology/single_node.yml @@ -0,0 +1,10 @@ +provisioner: + nodes: + host0: + name: host0 + remote_user: root + hostname: "{{ lookup('env', 'TEST_MACHINE') }}" + groups: + - provisioned + - controller + - tester From bf6a587e83916cb2410d3e6734056e3f99d44370 Mon Sep 17 00:00:00 2001 From: Arie Bregman Date: Wed, 21 Oct 2015 09:59:22 +0300 Subject: [PATCH 041/137] Add opendaylight playbooks OpenDaylight is one of neutron's backends. This adds the necessary playbooks to configure opendaylight ( with L3 ) as neutron back-end and install opendaylight on dedicated node with one of the following installation methods: 1. RPM (opendaylight) 2. URL of a zip file 3. Source (mvn build) It also adds tempest tests configuration. Opendaylight isn't supporting ipv6 and security group at the moment, so those are skipped. This supports OpenStack all-in-one topology and not distributed. Change-Id: I149537e3e82f0b17a60eee6b921ec4f96efa8376 --- playbooks/full-job-opendaylight.yml | 5 ++ playbooks/full-job-patch-opendaylight.yml | 44 ++++++++++ .../opendaylight/configure_neutron.yml | 38 +++++++++ .../opendaylight/install_odl_driver.yml | 17 ++++ .../opendaylight/install_odl_rpm.yml | 7 ++ .../opendaylight/install_odl_source.yml | 83 +++++++++++++++++++ .../opendaylight/install_odl_zip.yml | 28 +++++++ .../packstack/opendaylight/main.yml | 7 ++ .../packstack/opendaylight/start_odl.yml | 19 +++++ .../packstack/opendaylight/start_services.yml | 40 +++++++++ .../packstack/opendaylight/stop_services.yml | 26 ++++++ .../templates/component-test-copr-repo.j2 | 7 ++ .../templates/epel-apache-maven.j2 | 13 +++ .../opendaylight/templates/m2_settings.j2 | 43 ++++++++++ .../opendaylight/templates/mock_config.j2 | 20 +++++ .../openstack/topology/all-in-one-odl.yml | 46 ++++++++++ .../tempest/tests/neutron_opendaylight.yml | 75 +++++++++++++++++ 17 files changed, 518 insertions(+) create mode 100644 playbooks/full-job-opendaylight.yml create mode 100644 playbooks/full-job-patch-opendaylight.yml create mode 100644 playbooks/post-deploy/packstack/opendaylight/configure_neutron.yml create mode 100644 playbooks/post-deploy/packstack/opendaylight/install_odl_driver.yml create mode 100644 playbooks/post-deploy/packstack/opendaylight/install_odl_rpm.yml create mode 100644 playbooks/post-deploy/packstack/opendaylight/install_odl_source.yml create mode 100644 playbooks/post-deploy/packstack/opendaylight/install_odl_zip.yml create mode 100644 playbooks/post-deploy/packstack/opendaylight/main.yml create mode 100644 playbooks/post-deploy/packstack/opendaylight/start_odl.yml create mode 100644 playbooks/post-deploy/packstack/opendaylight/start_services.yml create mode 100644 playbooks/post-deploy/packstack/opendaylight/stop_services.yml create mode 100644 playbooks/post-deploy/packstack/opendaylight/templates/component-test-copr-repo.j2 create mode 100644 playbooks/post-deploy/packstack/opendaylight/templates/epel-apache-maven.j2 create mode 100644 playbooks/post-deploy/packstack/opendaylight/templates/m2_settings.j2 create mode 100644 playbooks/post-deploy/packstack/opendaylight/templates/mock_config.j2 create mode 100644 settings/provisioner/openstack/topology/all-in-one-odl.yml create mode 100644 settings/tester/tempest/tests/neutron_opendaylight.yml diff --git a/playbooks/full-job-opendaylight.yml b/playbooks/full-job-opendaylight.yml new file mode 100644 index 000000000..51bc03801 --- /dev/null +++ b/playbooks/full-job-opendaylight.yml @@ -0,0 +1,5 @@ +--- +- include: provision.yml +- include: install.yml +- include: post-deploy/{{ installer.type }}/opendaylight/main.yml +- include: test.yml diff --git a/playbooks/full-job-patch-opendaylight.yml b/playbooks/full-job-patch-opendaylight.yml new file mode 100644 index 000000000..58a31e22e --- /dev/null +++ b/playbooks/full-job-patch-opendaylight.yml @@ -0,0 +1,44 @@ +--- +- name: Patch rpm + hosts: local + roles: + - patch_rpm + +- include: provision.yml + +- name: Create local repo for patched rpm + hosts: controller + tasks: + - name: Install release tool + sudo: yes + command: "yum localinstall -y {{ product.rpm }}" + + - name: Execute rhos-release for packstack poodle/puddle + sudo: yes + command: "rhos-release {{ product.full_version|int }} {{ product.repo.rhos_release.extra_args|join(' ') }}" + + - name: Install createrepo + sudo: yes + yum: name=createrepo state=present + + - name: create repo folder + file: path=/home/{{ ansible_ssh_user }}/dist-git/ state=directory + + - name: copy the generated rpms + copy: src={{ item }} dest=/home/{{ ansible_ssh_user }}/dist-git/{{ patch.dist_git.name }}/ + with_fileglob: + - "{{ lookup('env', 'PWD') }}/generated_rpms/*.rpm" + + - name: Setup repository for patched rpm + sudo: yes + template: "src={{ lookup('env', 'PWD') }}/roles/patch_rpm/templates/patched_rpms.j2 dest=/etc/yum.repos.d/patched_rpms.repo" + when: hostvars["localhost"].rpm_build_rc == 0 + + - name: Create local repo for patched rpm + sudo: yes + shell: "createrepo /home/{{ ansible_ssh_user }}/dist-git/{{ patch.dist_git.name }}" + when: hostvars["localhost"].rpm_build_rc == 0 + +- include: install.yml +- include: post-deploy/{{ installer.type }}/opendaylight/main.yml +- include: test.yml diff --git a/playbooks/post-deploy/packstack/opendaylight/configure_neutron.yml b/playbooks/post-deploy/packstack/opendaylight/configure_neutron.yml new file mode 100644 index 000000000..d7fd4b0a2 --- /dev/null +++ b/playbooks/post-deploy/packstack/opendaylight/configure_neutron.yml @@ -0,0 +1,38 @@ +- name: Configure neutron to use opendaylight + hosts: controller + sudo: yes + tasks: + - name: set mechanism drivers + ini_file: + dest="/etc/neutron/plugins/ml2/ml2_conf.ini" + section="ml2" + option={{ item.option }} + value={{ item.value }} + with_items: + - { option: 'mechanism_drivers', value: 'opendaylight' } + - { option: 'tenant_network_types', value: 'vxlan' } + + - name: Add opendaylight to ML2 configuration + ini_file: + dest="/etc/neutron/plugins/ml2/ml2_conf.ini" + section="ml2_odl" + option={{ item.option }} + value={{ item.value }} + with_items: + - { option: 'password', value: 'admin' } + - { option: 'username', value: 'admin' } + - { option: 'url', value: 'http://{{ hostvars[provisioner.nodes.odl_controller.name].ansible_default_ipv4.address }}:8080/controller/nb/v2/neutron' } + + - name: Configure neutron to use OpenDaylight L3 + shell: > + sed -i "s/router,//g" /etc/neutron/neutron.conf; + sed -i "/^service_plugins/s/$/,networking_odl.l3.l3_odl.OpenDaylightL3RouterPlugin/" /etc/neutron/neutron.conf + + - name: Clean neutron ML2 database + shell: > + export db_connection=`sudo grep ^connection /etc/neutron/neutron.conf`; + export db_name=`echo $db_connection | rev | cut -d/ -f1 | rev | cut -d? -f1`; + sudo mysql -e "drop database if exists $db_name;"; + sudo mysql -e "create database $db_name character set utf8;"; + sudo mysql -e "grant all on $db_name.* to 'neutron'@'%';"; + sudo neutron-db-manage --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head diff --git a/playbooks/post-deploy/packstack/opendaylight/install_odl_driver.yml b/playbooks/post-deploy/packstack/opendaylight/install_odl_driver.yml new file mode 100644 index 000000000..3f6d06f55 --- /dev/null +++ b/playbooks/post-deploy/packstack/opendaylight/install_odl_driver.yml @@ -0,0 +1,17 @@ +--- +- name: Attach ovs to an active opendaylight controller + hosts: controller + sudo: yes + tasks: + - name: Start openvswitch + service: name=openvswitch state=running + + - name: Attach ovs to opendaylight controller + command: ovs-vsctl set-manager tcp:{{ hostvars[provisioner.nodes.odl_controller.name].ansible_default_ipv4.address }}:6640 + +- name: Install opendaylight driver using rpm + hosts: controller + sudo: yes + tasks: + - name: Install opendaylight driver + yum: name=python-networking-odl state=latest diff --git a/playbooks/post-deploy/packstack/opendaylight/install_odl_rpm.yml b/playbooks/post-deploy/packstack/opendaylight/install_odl_rpm.yml new file mode 100644 index 000000000..34f52d62e --- /dev/null +++ b/playbooks/post-deploy/packstack/opendaylight/install_odl_rpm.yml @@ -0,0 +1,7 @@ +--- +- name: Install OpenDaylight distribution + hosts: odl_controller + sudo: yes + tasks: + - name: Install OpenDaylight distribution using an RPM + yum: name=opendaylight state=present diff --git a/playbooks/post-deploy/packstack/opendaylight/install_odl_source.yml b/playbooks/post-deploy/packstack/opendaylight/install_odl_source.yml new file mode 100644 index 000000000..56420f2ad --- /dev/null +++ b/playbooks/post-deploy/packstack/opendaylight/install_odl_source.yml @@ -0,0 +1,83 @@ +--- +- name: Prepare environment for building odl from source + hosts: odl_controller + sudo: yes + tasks: + - name: Create the COPR repos required for component tests + template: src=templates/component-test-copr-repo.j2 dest=/etc/yum.repos.d/component-test-copr.repo + + - name: Install rhpkg repo + command: "yum localinstall -y {{ distro.repo.rhpkg }}" + + - name: Install apache-maven repo + template: src=templates/epel-apache-maven.j2 dest=/etc/yum.repos.d/epel-apache-maven.repo + + - name: Install required RPMs for the build + yum: name="{{ item }}" state=present + with_items: + - mock + - git + - GitPython + - apache-maven + + - name: Install settings + template: src=templates/m2_settings.j2 dest=/usr/share/apache-maven/conf/settings.xml + + - name: Create mock configuration for the build + template: src=templates/mock_config.j2 dest=/etc/mock/rhos-{{ product.full_version }}-odl-rhel-{{ ansible_distribution_version|int }}-build.cfg + + - name: Add entries to hosts file + lineinfile: + dest="/etc/hosts" + insertafter=EOF + line="{{ item }}" + with_items: + - '127.1.0.1 nexus.opendaylight.org' + - '127.1.0.2 repo.maven.apache.org' + - '127.1.0.3 oss.sonatype.org' + - '127.1.0.4 registry.npmjs.org' + + - name: Clone opendayligt dist-git + git: repo='{{ odl.dist_git.url }}' + version='{{ odl.dist_git.branch }}' + dest='/home/{{ ansible_ssh_user}}/opendaylight' + accept_hostkey=true + + - name: Clone maven-chain-builder + git: repo=https://github.com/bregman-arie/maven-chain-builder.git + dest='/home/{{ ansible_ssh_user }}/maven-chain-builder' + accept_hostkey=true + + - name: Install PME + get_url: url={{ odl.pme.url }} dest=/usr/share/apache-maven/lib/ext + +- name: Build opendaylight + hosts: odl_controller + sudo: yes + tasks: + - name: Prepare chain file + args: + chdir: /home/{{ ansible_ssh_user}}/maven-chain-builder + shell: > + sudo sed -i "s/\$TAG_TO_BUILD/rhos-{{ product.full_version }}-patches/g" /home/{{ ansible_ssh_user }}/opendaylight/make-vars; + /home/{{ ansible_ssh_user }}/opendaylight/make-vars; + cp /home/{{ ansible_ssh_user}}/opendaylight/opendaylight-chain/opendaylight-chain.ini .; + cd /home/{{ ansible_ssh_user }}/opendaylight && git checkout -- make-vars && git checkout -- opendaylight-chain/opendaylight-chain.ini && cd -; + redhat_version=`cat /home/{{ ansible_ssh_user }}/opendaylight/*/*.ini | grep "redhat_version = " | cut -d= -f2 | xargs`; + sed -i "s/\%(redhat_version)s/$redhat_version/g" *.ini; + bomver=`cat /home/{{ ansible_ssh_user }}/opendaylight/*/*.ini | grep "bomversion = " | cut -d= -f2 | xargs`; + sed -i "s/\%\(bomversion\)s/$bomver/g" *.ini; + sed -i "s/skipTests/skipTests=true/g" *.ini; + sed -i "s/properties = /\n/g" *.ini + + - name: Run apache-chain-builder and build the opendaylight disturbution + args: + chdir: /home/{{ ansible_ssh_user}}/maven-chain-builder + shell: "python maven-chain-builder.py opendaylight-chain.ini {{ ansible_ssh_user }}" + +- name: Prepare opendaylight distribution for run + hosts: odl_controller + sudo: yes + tasks: + - name: Extract odl distribution to /opt/karaf + shell: "tar -zxf /tmp/org/opendaylight/ovsdb/karaf/*/*.tar.gz -C /opt && mv /opt/karaf* /opt/opendaylight" diff --git a/playbooks/post-deploy/packstack/opendaylight/install_odl_zip.yml b/playbooks/post-deploy/packstack/opendaylight/install_odl_zip.yml new file mode 100644 index 000000000..2b80237df --- /dev/null +++ b/playbooks/post-deploy/packstack/opendaylight/install_odl_zip.yml @@ -0,0 +1,28 @@ +--- +- name: Deploy OpenDaylight using zip file + hosts: odl_controller + sudo: yes + tasks: + - name: Download zip file + get_url: + url="{{ opendaylight.distribution.zip }}" + dest=/tmp/karaf.zip + + - name: Ensure unzip installed to extract OpenDaylight distribution + yum: + name=unzip + state=present + + - name: Ensure java installed to run OpenDaylight + yum: + name=java + state=present + + - name: Unzip OpenDaylight distribution + unarchive: + src=/tmp/karaf.zip + dest=/opt + copy=no + + - name: Rename directory to opendaylight + shell: mv /opt/karaf* /opt/opendaylight diff --git a/playbooks/post-deploy/packstack/opendaylight/main.yml b/playbooks/post-deploy/packstack/opendaylight/main.yml new file mode 100644 index 000000000..a955159f7 --- /dev/null +++ b/playbooks/post-deploy/packstack/opendaylight/main.yml @@ -0,0 +1,7 @@ +--- +- include: "install_odl_{{ odl.install.type| default('rpm') }}.yml" +- include: start_odl.yml +- include: stop_services.yml +- include: install_odl_driver.yml +- include: configure_neutron.yml +- include: start_services.yml diff --git a/playbooks/post-deploy/packstack/opendaylight/start_odl.yml b/playbooks/post-deploy/packstack/opendaylight/start_odl.yml new file mode 100644 index 000000000..97d051a52 --- /dev/null +++ b/playbooks/post-deploy/packstack/opendaylight/start_odl.yml @@ -0,0 +1,19 @@ +--- +- name: Start OpenDaylight distribution + hosts: odl_controller + sudo: yes + vars: + odl_controller_name: "{{ provisioner.nodes.odl_controller.name }}" + tasks: + - name: Enable traffic from OpenStack Controller to OpenDaylight node + shell: iptables -I INPUT -j ACCEPT -p tcp -s {{ hostvars[provisioner.nodes.controller.name].ansible_default_ipv4.address }} + + - name: Add L3 configuration + shell: > + echo "ovsdb.l3.fwd.enabled=yes" >> /opt/opendaylight/etc/custom.properties; + eth0_mac_address={{ hostvars[odl_controller_name]['ansible_eth0']['macaddress'] }}; + echo "ovsdb.l3gateway.mac=$eth0_mac_address" >> /opt/opendaylight/etc/custom.properties + + - name: Run controller + command: "sh /opt/opendaylight/bin/start" + async: 20 diff --git a/playbooks/post-deploy/packstack/opendaylight/start_services.yml b/playbooks/post-deploy/packstack/opendaylight/start_services.yml new file mode 100644 index 000000000..f56829718 --- /dev/null +++ b/playbooks/post-deploy/packstack/opendaylight/start_services.yml @@ -0,0 +1,40 @@ +--- +- name: Start neutron-server + hosts: controller + sudo: yes + tasks: + - name: Start neutron-server service + service: name=neutron-server + state=running + + # Required for running tests + - name: Create an external network + quantum_network: + state: present + auth_url: "http://{{ hostvars[inventory_hostname].ansible_default_ipv4.address }}:35357/v2.0/" + login_username: admin + login_password: "{{ hostvars[inventory_hostname].admin_password | default('redhat') }}" + login_tenant_name: admin + name: "{{ installer.network.name }}" + provider_network_type: "{{ installer.network.external.provider_network_type }}" + provider_physical_network: "{{ installer.network.label }}" + provider_segmentation_id: "{{ installer.network.external.vlan.tag|default(omit) }}" + router_external: yes + shared: no + admin_state_up: yes + + - name: Create subnet for external network + quantum_subnet: + state: present + auth_url: "http://{{ hostvars[inventory_hostname].ansible_default_ipv4.address }}:35357/v2.0/" + login_username: admin + login_password: "{{ hostvars[inventory_hostname].admin_password | default('redhat') }}" + login_tenant_name: admin + tenant_name: admin + network_name: "{{ installer.network.name }}" + name: external-subnet + enable_dhcp: False + gateway_ip: "{{ provisioner.network.network_list.external.nested.subnet_gateway }}" + cidr: "{{ provisioner.network.network_list.external.nested.subnet_cidr}}" + allocation_pool_start: "{{ provisioner.network.network_list.external.nested.allocation_pool_start }}" + allocation_pool_end: "{{ provisioner.network.network_list.external.nested.allocation_pool_end }}" diff --git a/playbooks/post-deploy/packstack/opendaylight/stop_services.yml b/playbooks/post-deploy/packstack/opendaylight/stop_services.yml new file mode 100644 index 000000000..16726ab1c --- /dev/null +++ b/playbooks/post-deploy/packstack/opendaylight/stop_services.yml @@ -0,0 +1,26 @@ +--- +- name: Stop networking services + hosts: controller + sudo: yes + tasks: + - name: Stop neutron-server + service: name=neutron-server + state=stopped + + - name: Stop neutron-openvswitch-agent + service: name=neutron-openvswitch-agent + state=stopped + + - name: Stop openvswitch + service: name=openvswitch + state=stopped + +- name: Remove openvswitch logs and configuration + hosts: controller + sudo: yes + tasks: + - name: Remove openvswitch logs + command: "rm -rf /var/log/openvswitch/*" + + - name: Remove openvswitch configuration + command: "rm -rf /etc/openvswitch/conf.db" \ No newline at end of file diff --git a/playbooks/post-deploy/packstack/opendaylight/templates/component-test-copr-repo.j2 b/playbooks/post-deploy/packstack/opendaylight/templates/component-test-copr-repo.j2 new file mode 100644 index 000000000..89c52ecee --- /dev/null +++ b/playbooks/post-deploy/packstack/opendaylight/templates/component-test-copr-repo.j2 @@ -0,0 +1,7 @@ +[abregman-el{{ ansible_distribution_version|int }}-rhos{{ product.version.major }}-test-deps] +name=Copr repo for el{{ ansible_distribution_version|int }}-rhos{{ product.version.major }}-test-deps owned by abregman +baseurl=https://copr-be.cloud.fedoraproject.org/results/abregman/el{{ ansible_distribution_version|int }}-rhos{{ product.version.major}}-test-deps/epel-{{ ansible_distribution_version|int }}-$basearch/ +skip_if_unavailable=True +gpgcheck=0 +gpgkey=https://copr-be.cloud.fedoraproject.org/results/abregman/el{{ ansible_distribution_version|int }}-rhos{{ product.version.major}}-test-deps/pubkey.gpg +enabled=1 diff --git a/playbooks/post-deploy/packstack/opendaylight/templates/epel-apache-maven.j2 b/playbooks/post-deploy/packstack/opendaylight/templates/epel-apache-maven.j2 new file mode 100644 index 000000000..5bfa544fb --- /dev/null +++ b/playbooks/post-deploy/packstack/opendaylight/templates/epel-apache-maven.j2 @@ -0,0 +1,13 @@ +[epel-apache-maven] +name=maven from apache foundation. +baseurl=http://repos.fedorapeople.org/repos/dchen/apache-maven/epel-$releasever/$basearch/ +enabled=1 +skip_if_unavailable=1 +gpgcheck=0 + +[epel-apache-maven-source] +name=maven from apache foundation. - Source +baseurl=http://repos.fedorapeople.org/repos/dchen/apache-maven/epel-$releasever/SRPMS +enabled=0 +skip_if_unavailable=1 +gpgcheck=0 diff --git a/playbooks/post-deploy/packstack/opendaylight/templates/m2_settings.j2 b/playbooks/post-deploy/packstack/opendaylight/templates/m2_settings.j2 new file mode 100644 index 000000000..a9b28a6b0 --- /dev/null +++ b/playbooks/post-deploy/packstack/opendaylight/templates/m2_settings.j2 @@ -0,0 +1,43 @@ + + + + + opendaylight-release + + + opendaylight-mirror + opendaylight-mirror + {{ private.distro.rhel.download_server }}/brewroot/repos/rhos-{{ product.full_version }}-odl-rhel-{{ ansible_distribution_version|int }}-build/latest/maven/ + + true + never + + + false + + + + + + opendaylight-mirror + opendaylight-mirror + {{ private.distro.rhel.download_server }}/brewroot/repos/rhos-{{ product.full_version }}-odl-rhel-{{ ansible_distribution_version|int }}-build/latest/maven/ + + true + never + + + false + + + + + + + + + opendaylight-release + + diff --git a/playbooks/post-deploy/packstack/opendaylight/templates/mock_config.j2 b/playbooks/post-deploy/packstack/opendaylight/templates/mock_config.j2 new file mode 100644 index 000000000..a2991d5b0 --- /dev/null +++ b/playbooks/post-deploy/packstack/opendaylight/templates/mock_config.j2 @@ -0,0 +1,20 @@ +config_opts['chroothome'] = '/builddir' +config_opts['use_host_resolv'] = True +config_opts['basedir'] = '/var/lib/mock' +config_opts['rpmbuild_timeout'] = 86400 +config_opts['yum.conf'] = '[main]\ncachedir=/var/cache/yum\ndebuglevel=9\nlogfile=/var/log/yum.log\nreposdir=/dev/null\nretries=20\nobsoletes=1\ngpgcheck=0\nassumeyes=1\n\n# repos\n\n[build]\nname=build\nbaseurl={{ private.distro.rhel.download_server }}/brewroot/repos/rhos-{{ product.full_version }}-odl-rhel-{{ ansible_distribution_version|int }}-build/latest/x86_64\n' +config_opts['chroot_setup_cmd'] = 'groupinstall maven-build' +config_opts['target_arch'] = 'x86_64' +config_opts['root'] = 'rhos-{{ product.full_version }}-odl-rhel-{{ ansible_distribution_version|int }}-build' + +config_opts['plugin_conf']['root_cache_enable'] = False +config_opts['plugin_conf']['yum_cache_enable'] = False +config_opts['plugin_conf']['ccache_enable'] = False + +config_opts['macros']['%_host'] = 'x86_64-koji-linux-gnu' +config_opts['macros']['%_host_cpu'] = 'x86_64' +config_opts['macros']['%vendor'] = 'Koji' +config_opts['macros']['%distribution'] = 'Koji Testing' +config_opts['macros']['%_topdir'] = '/builddir/build' +config_opts['macros']['%_rpmfilename'] = '%%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm' +config_opts['macros']['%packager'] = 'Koji' diff --git a/settings/provisioner/openstack/topology/all-in-one-odl.yml b/settings/provisioner/openstack/topology/all-in-one-odl.yml new file mode 100644 index 000000000..3f56753ac --- /dev/null +++ b/settings/provisioner/openstack/topology/all-in-one-odl.yml @@ -0,0 +1,46 @@ +--- +provisioner: + nodes: + controller: &controller + name: '{{ tmp.node_prefix }}controller' + hostname: + rebuild: no + flavor_id: !lookup provisioner.flavor.large + image_id: !lookup provisioner.images[ !lookup distro.name ][ !lookup distro.full_version ].id + remote_user: !lookup provisioner.images[ !lookup distro.name ][ !lookup distro.full_version ].remote_user + network: &network_params + interfaces: &interfaces + data: &data_interface + label: eth1 + config_params: &data_interface_params + bootproto: static + ipaddr: 10.0.0.1 + netmask: 255.255.255.0 + nm_controlled: "no" + type: ethernet + onboot: yes + device: !lookup provisioner.nodes.controller.network.interfaces.data.label + external: &external_interface + label: eth2 + groups: + - controller + - network + - compute + - openstack_nodes + + odl_controller: + <<: *controller + name: '{{ tmp.node_prefix }}odl_controller' + network: + <<: *network_params + interfaces: + <<: *interfaces + data: + <<: *data_interface + config_params: + <<: *data_interface_params + ipaddr: 10.0.0.2 + + groups: + - odl_controller + - openstack_nodes diff --git a/settings/tester/tempest/tests/neutron_opendaylight.yml b/settings/tester/tempest/tests/neutron_opendaylight.yml new file mode 100644 index 000000000..20ae5e640 --- /dev/null +++ b/settings/tester/tempest/tests/neutron_opendaylight.yml @@ -0,0 +1,75 @@ +tester: + tempest: + test_regex: tempest\.api\.network\|tempest\.scenario\.*network + whitelist: [] + blacklist: [tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_in_tenant_traffic, + tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_multiple_security_groups, + tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_connectivity_between_vms_on_different_networks, + tempest.scenario.test_network_v6.TestGettingAddress.test_dhcp6_stateless_from_os, + tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_dhcp6_stateless_from_os, + tempest.scenario.test_snapshot_pattern.TestSnapshotPattern.test_snapshot_pattern, + tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_allocation_pools, + tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_gw_and_allocation_pools, + tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_slaac_subnet_with_ports, + tempest.api.network.test_allowed_address_pair.AllowedAddressPairIpV6TestJSON, + tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcp_stateful_fixedips, + tempest.api.network.test_ports.PortsAdminExtendedAttrsIpV6TestJSON.test_create_port_binding_ext_attr, + tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_default_gw, + tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcp_stateful_router, + tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_invalid_options, + tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_bulk_port, + tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_stateless_no_ra, + tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_two_subnets, + tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6, + tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port, + tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_port_with_second_ip, + tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_security_group_and_extra_attributes, + tempest.api.network.test_networks.NetworksIpV6TestJSON.test_create_delete_subnet_with_allocation_pools, + tempest.api.network.test_extra_dhcp_options.ExtraDHCPOptionsIpV6TestJSON.test_create_list_port_with_extra_dhcp_options, + tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_delete_network_with_subnet, + tempest.api.network.test_networks.NetworksIpV6TestJSON.test_create_delete_subnet_with_default_gw, + tempest.api.network.test_ports.PortsAdminExtendedAttrsIpV6TestJSON.test_update_port_binding_ext_attr, + tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_host_routes_and_dns_nameservers, + tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_update_subnet_gw_dns_host_routes_dhcp, + tempest.api.network.test_networks.NetworksIpV6TestJSON.test_create_delete_subnet_with_host_routes_and_dns_nameservers, + tempest.api.network.test_networks.NetworksIpV6TestJSON.test_create_list_subnet_with_no_gw64_one_network, + tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test, + tempest.api.network.test_ports.PortsTestJSON.test_create_port_with_no_securitygroups, + tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_two_security_groups_and_extra_attributes, + tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_port_security_disable_security_group, + tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_stateless_subnet_with_ports, + tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_all_attributes, + tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_list_subnet_with_no_gw64_one_network, + tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_64_subnet, + tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_v6_attributes_stateless, + tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_update_delete_network_subnet, + tempest.api.network.test_networks.NetworksIpV6TestJSON.test_create_delete_subnet_with_gw, + tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_stateless_eui64, + tempest.api.network.test_routers.RoutersIpV6Test, + tempest.api.network.test_networks.NetworksIpV6TestAttrs, + tempest.api.network.test_networks.NetworksIpV6TestJSON.test_create_delete_subnet_all_attributes, + tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools, + tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups, + tempest.api.network.test_networks.NetworksIpV6TestJSON.test_delete_network_with_subnet, + tempest.api.network.test_networks.NetworksIpV6TestJSON.test_update_subnet_gw_dns_host_routes_dhcp, + tempest.api.network.test_networks.NetworksIpV6TestJSON, + tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_security_group_and_extra_attributes, + tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_stateless_no_ra_no_dhcp, + tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_network, + tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_cross_tenant_traffic, + tempest.api.network.test_routers.RoutersIpV6Test.test_update_router_set_gateway_with_snat_explicit, + tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_subnet, + tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_port, + tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_v6_attributes_slaac, + tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_port_update_new_security_group, + tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_dhcpv6_stateless, + tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_multi_prefix_slaac, + tempest.scenario.test_network_v6.TestGettingAddress.test_dualnet_slaac_from_os, + tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_dhcpv6_stateless, + tempest.scenario.test_network_v6.TestGettingAddress.test_multi_prefix_slaac, + tempest.scenario.test_security_groups_cross_hosts.TestCrossHost, + tempest.api.network.test_ports.PortsIpV6TestJSON.test_port_list_filter_by_router_id, + tempest.api.network.test_ports.PortsIpV6TestJSON, + tempest.api.network.test_extra_dhcp_options.ExtraDHCPOptionsIpV6TestJSON, + tempest.api.network.test_ports.PortsAdminExtendedAttrsIpV6TestJSON.test_list_ports_binding_ext_attr, + tempest.scenario.test_network_v6.TestGettingAddress.test_slaac_from_os] From cbddb195b75e9a875f0f2b75ed23154e40aaa286 Mon Sep 17 00:00:00 2001 From: Mathieu Bultel Date: Wed, 3 Feb 2016 10:44:04 +0100 Subject: [PATCH 042/137] Minor fix for heat ansible module V1 Change-Id: Ifd5496bab149cd54f2aa3ded52f5dd7b7e9117c5 --- library/heat_stack.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/library/heat_stack.py b/library/heat_stack.py index 7c5135dc4..951dcf0a2 100644 --- a/library/heat_stack.py +++ b/library/heat_stack.py @@ -55,7 +55,7 @@ - Path of the template file to use for the stack creation required: false default: None - environment: + environment_files: description: - List of environment files that should be used for the stack creation required: false @@ -144,7 +144,7 @@ def _create_stack(module, heat): stack = heat.stacks.get(stack_id=uid).to_dict() sleep(5) if stack['stack_status'] == 'CREATE_COMPLETE': - return stack['stack']['id'] + return stack['id'] else: module.fail_json(msg = "Failure in creating stack: ".format(stack)) @@ -164,7 +164,7 @@ def _get_stack_id(module, heat): while True: try: stack = stacks.next() - if module.param['stack_name'] == stack.stack_name: + if module.params['stack_name'] == stack.stack_name: return stack.id except StopIteration: break @@ -176,7 +176,7 @@ def main(): argument_spec.update(dict( stack_name = dict(required=True), template = dict(default=None), - environment_files = dict(default=None, type='dict'), + environment_files = dict(default=None, type='list'), state = dict(default='present', choices=['absent', 'present']), tenant_name = dict(default=None), )) From 0da7eb278baf1ebf24140c832ca660e5f6ae76ca Mon Sep 17 00:00:00 2001 From: Arie Bregman Date: Wed, 3 Feb 2016 12:30:43 +0200 Subject: [PATCH 043/137] Fix heat_stack module heat_stack module fixed to allow non-admin users use the module for creating/remvoing stacks. Change-Id: I34871249c18d1c0c8df30b0757a7971e9a067073 --- library/heat_stack.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/library/heat_stack.py b/library/heat_stack.py index 951dcf0a2..0307bbda1 100644 --- a/library/heat_stack.py +++ b/library/heat_stack.py @@ -104,16 +104,16 @@ def _get_endpoint(module, ksclient): def _set_tenant_id(module): global _os_tenant_id if not module.params['tenant_name']: - tenant_name = module.params['login_tenant_name'] + _os_tenant_id = _os_keystone.tenant_id else: tenant_name = module.params['tenant_name'] - for tenant in _os_keystone.tenants.list(): - if tenant.name == tenant_name: - _os_tenant_id = tenant.id - break + for tenant in _os_keystone.tenants.list(): + if tenant.name == tenant_name: + _os_tenant_id = tenant.id + break if not _os_tenant_id: - module.fail_json(msg = "The tenant id cannot be found, please check the parameters") + module.fail_json(msg = "The tenant id cannot be found, please check the parameters") def _get_heat_client(module, kwargs): _ksclient = _get_ksclient(module, kwargs) From 450a31d5440bc74a398386e389c3f450fbbb800a Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Mon, 1 Feb 2016 14:52:32 -0500 Subject: [PATCH 044/137] clean out rhel/rhos/linux roles Change-Id: I6f438b87feb689b37171b25f361ec4d7f1103009 --- roles/linux/rhel/rhos/tasks/main.yml | 96 ---------------------------- 1 file changed, 96 deletions(-) diff --git a/roles/linux/rhel/rhos/tasks/main.yml b/roles/linux/rhel/rhos/tasks/main.yml index 5edd69fee..cc3605bf8 100644 --- a/roles/linux/rhel/rhos/tasks/main.yml +++ b/roles/linux/rhel/rhos/tasks/main.yml @@ -7,31 +7,6 @@ yum: name=rhos-release state=latest when: product.repo_type in ['poodle', 'puddle'] -- name: Execute rhos-release {{ product.version.major }} - command: "rhos-release {{ product.version.major }}" - when: (product.repo_type in ['puddle'] and installer.name not in ['instack', 'rdo-manager']) - -- name: Execute rhos-release for OSP-Director {{ product.full_version }} - command: "rhos-release {{ product.full_version }}" - when: (product.repo_type in ['puddle'] and installer.name in ['instack', 'rdo-manager']) - -#hack for the multiple products involved in setting up rdo-manager -- name: Create the RHOS Release Repository for rdo-manager - template: src=rhos-release.repo.j2 dest=/etc/yum.repos.d/rhos-release.repo - when: product_override_version is defined and product.repo_type_override == 'rhos-release' - -- name: install rhos-release for rdo-manager - yum: name=rhos-release state=latest - when: product_override_version is defined and product.repo_type_override == 'rhos-release' - -- name: Execute rhos-release for rdo-manager {{ product_override_version|int }} - command: "rhos-release {{ product_override_version|int }}" - when: product_override_version is defined and product.repo_type_override == 'rhos-release' - -- name: Execute rhos-release {{ product.version.major }}{{ installer_host_repo | default('')}} - command: "rhos-release {{ product.version.major }}{{ installer_host_repo | default('')}}" - when: installer is defined and installer.name == "foreman" and installer_host_repo | default('') != '' - - name: Change server location for repos in rhos-release replace: dest=/etc/yum.repos.d/rhos-release-{{ product.version.major }}{{ installer_host_repo | default('')}}-rhel-{{ ansible_distribution_version|string}}.repo @@ -46,69 +21,6 @@ replace=/{{ product.repo.puddle_pin_version }}/RH{{ ansible_distribution_major_version|string }} when: (product.repo.puddle_pin_version is defined and product.repo.puddle_pin_version != 'latest' and product.repo_type == 'puddle') -- name: Change Foreman version for repos in rhos-release - replace: - dest=/etc/yum.repos.d/rhos-release-{{ product.version.major }}{{ installer_host_repo | default('')}}-rhel-{{ ansible_distribution_version|string }}.repo - regexp=/Foreman/latest/ - replace=/Foreman/{{ product.repo.foreman_pin_version }}/ - when: (product.repo.foreman_pin_version is defined and product.repo.foreman_pin_version != 'latest') - -- name: Enable RHSM - shell: > - rhos-release -x; - rm -Rf /etc/yum.repos.d/rhos-release.repo; - subscription-manager register --username {{ distro.rhel.subscription.username }} --password {{ distro.rhel.subscription.password }}; - subscription-manager subscribe --pool {{ distro.rhel.subscription.pool }}; - subscription-manager repos --disable=*; - when: (product.repo_type == 'rhsm' and ansible_distribution_version|int == 7) - -- name: Enable RHSM yum repos - shell: > - subscription-manager repos --disable=*; - subscription-manager repos --enable=rhel-7-server-rpms; - subscription-manager repos --enable=rhel-7-server-optional-rpms; - subscription-manager repos --enable=rhel-7-server-extras-rpms; - subscription-manager repos --enable=rhel-7-server-openstack-{{ product.full_version }}-rpms; - yum-config-manager --setopt="rhel-7-server-openstack-{{ product.full_version }}-rpms.priority=1" --enable rhel-7-server-openstack-{{ product.full_version }}-rpms; - when: (product.repo_type == 'rhsm' and ansible_distribution_version|int == 7) - -- name: Enable RHSM for rdo-manager - shell: > - rm -Rf /etc/yum.repos.d/rhos-release.repo; - subscription-manager register --username {{ distro.rhel.subscription.username }} --password {{ distro.rhel.subscription.password }}; - subscription-manager subscribe --pool {{ distro.rhel.subscription.physical_pool }}; - subscription-manager repos --disable=*; - when: (product_repo_type_override is defined and product_repo_type_override == 'rhsm' and ansible_distribution_version|int == 7) - -- name: Enable RHSM yum repos for rdo-manager - shell: > - subscription-manager repos --disable=*; - subscription-manager repos --enable=rhel-7-server-rpms; - subscription-manager repos --enable=rhel-7-server-optional-rpms; - subscription-manager repos --enable=rhel-7-server-extras-rpms; - subscription-manager repos --enable=rhel-7-server-openstack-{{ product_override_version }}-rpms; - yum-config-manager --setopt="rhel-7-server-openstack-{{ product_override_version }}-rpms.priority=1" --enable rhel-7-server-openstack-{{ product_override_version }}-rpms; - when: (product_repo_type_override is defined and product_repo_type_override == 'rhsm' and ansible_distribution_version|int == 7) - - -# new advanced repos -- name: Create the RHOS Advanced repository - shell: "rhos-release -x" - when: product.repo_type == 'advanced' - -# poodle repos -- name: Create the RHOS poodle repository - shell: "rhos-release -x; rhos-release -d {{ product.version.major }}{{ installer_host_repo | default('')}}" - when: (product.repo_type in ['poodle'] and installer is defined and installer.name not in ['instack', 'rdo-manager']) - -- name: Create the OSP-Director poodle repository - shell: "rhos-release -x; rhos-release -d {{ product.full_version }}{{ installer_host_repo | default('')}}" - when: (product.repo_type in ['poodle'] and installer is defined and installer.name in ['instack', 'rdo-manager']) - -- name: Create the RHOS Advanced poodle repository - shell: "rhos-release -x; rhos-release -d {{ product.full_version }}a" - when: product.repo_type == 'poodle_advanced' - - name: Create the COPR repos required for component tests template: src=component-test-copr-repo.j2 dest=/etc/yum.repos.d/component-test-copr.repo when: (test.type.name is defined and (test.type.name == 'unit-test' or test.type.name == 'pep8-test') and ansible_distribution_version|int >= 6) @@ -117,18 +29,10 @@ shell: "rhos-release -x; rhos-release {{ product.version.major }} -d -p {{ product.repo.poodle_pin_version }}" when: (product.repo.poodle_pin_version is defined and product.repo.poodle_pin_version != 'latest|GA' and product.repo_type == 'poodle' and installer_host_repo | default('') == '') -- name: Change poodle version for repos in rhos-release for OFI installer host - shell: "rhos-release -x; rhos-release {{ product.version.major }}{{ installer_host_repo | default('')}} -d -p {{ product.repo.installer_poodle_pin_version }}" - when: (product.repo.installer_poodle_pin_version is defined and product.repo.installer_poodle_pin_version != 'latest|GA' and product.repo_type == 'poodle' and installer is defined and installer.name == "foreman" and installer_host_repo | default('') != '') - - name: Change poodle version for repos in rhos-release for GA -> Latest Poodle shell: "rhos-release -x; rhos-release {{ product.version.major }} -p {{ product.repo.poodle_pin_version }}" when: (product.repo.poodle_pin_version is defined and product.repo.poodle_pin_version == 'GA' and product.repo_type == 'poodle' and installer_host_repo | default('') == '') -- name: Change poodle version for repos in rhos-release for OFI installer host and GA-> latest Poodle - shell: "rhos-release -x; rhos-release {{ product.version.major }}{{ installer_host_repo | default('')}} -d -p {{ product.repo.installer_poodle_pin_version }}" - when: (product.repo.installer_poodle_pin_version is defined and product.repo.installer_poodle_pin_version == 'latest|GA' and product.repo_type == 'poodle' and installer is defined and installer.name == "foreman" and installer_host_repo | default('') != '') - # copr repos - name: enable tripleo copr repository shell: "sudo curl -o /etc/yum.repos.d/slagle-openstack-m.repo {{ product.repo.copr[ ansible_distribution ][distro.full_version] }}" From 42923a9b2388ddb79d1eee389e854abdc582ca26 Mon Sep 17 00:00:00 2001 From: Adriano Petrich Date: Wed, 3 Feb 2016 14:47:46 +0000 Subject: [PATCH 045/137] Change the name of the gerrit branch on dist-git The search order on rdopkg update-patches finds a remote branch before hitting the branch name that used to be created. this assures that we create the first choice possible that it hits Change-Id: I885ed6a111da27d4a9ee33c67e6ba3578ad3015a --- roles/patch_rpm/tasks/pre.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/patch_rpm/tasks/pre.yml b/roles/patch_rpm/tasks/pre.yml index c887bd879..500dd0ee3 100644 --- a/roles/patch_rpm/tasks/pre.yml +++ b/roles/patch_rpm/tasks/pre.yml @@ -33,7 +33,7 @@ git remote add -f patches {{ tmp_dir }}/dist-git/{{ patch.gerrit.name }}; git fetch patches; git fetch patches --tags; - git branch {{ branch_name }}-patches patches/gerrit-patch; + git branch {{ product.name }}-{{ product.version.major }}.{{ product.version.minor }}-patches patches/gerrit-patch; if [ "{{ patch.upstream is defined }}" == "True" ]; then git remote add -f upstream {{ tmp_dir }}/dist-git/{{ patch.upstream is defined and patch.upstream.name }}; git fetch upstream; From d73ef453fe276df1b01bbf883093509177087fe7 Mon Sep 17 00:00:00 2001 From: Attila Darazs Date: Tue, 2 Feb 2016 15:34:41 +0100 Subject: [PATCH 046/137] add workaround for rdo-manager liberty production Change-Id: I239f296edbcb81fe8dee77396c94c4cfb03e286f --- playbooks/installer/rdo-manager/images/run.yml | 7 +++++++ settings/product/rdo/version/liberty/repo/production.yml | 4 ++++ 2 files changed, 11 insertions(+) diff --git a/playbooks/installer/rdo-manager/images/run.yml b/playbooks/installer/rdo-manager/images/run.yml index f527c8e5b..e893ac93f 100644 --- a/playbooks/installer/rdo-manager/images/run.yml +++ b/playbooks/installer/rdo-manager/images/run.yml @@ -8,6 +8,13 @@ - name: build images on the virthost hosts: virthost tasks: + - name: install python-passlib to workaround rhbz1278972 + yum: + name: python-passlib + state: present + sudo: yes + when: workarounds.rhbz1278972 is defined + - name: setup environment vars template: src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/build-img-env.j2 dest=~/build-img-env mode=0755 diff --git a/settings/product/rdo/version/liberty/repo/production.yml b/settings/product/rdo/version/liberty/repo/production.yml index e58cb5944..744d6755f 100644 --- a/settings/product/rdo/version/liberty/repo/production.yml +++ b/settings/product/rdo/version/liberty/repo/production.yml @@ -3,3 +3,7 @@ product: repo_type: production # production repo details are in liberty.yml itself # since it is needed by default + +workarounds: + rhbz1278972: + enabled: True From a4ec65235b87e36390f39a1df52fb46ff6798da9 Mon Sep 17 00:00:00 2001 From: Jon Schlueter Date: Wed, 3 Feb 2016 16:39:58 -0500 Subject: [PATCH 047/137] Prune dead foreman-poodle.repo.j2 code from khaleesi after a quick search through kahleesi no current code uses that template and that template was the only place that the vars were referenced: foreman_pin_version foreman_poodle_pin_version stripping the varialbes from the settings files as well now Change-Id: I5ae6dac4d7114f71e07e4a3be6202b8a201b58e2 --- roles/linux/rhel/rhos/templates/foreman-poodle.repo.j2 | 5 ----- settings/product/rdo/version/juno/build/latest.yml | 2 -- settings/product/rdo/version/kilo/build/latest.yml | 2 -- settings/product/rdo/version/liberty/build/latest.yml | 2 -- settings/product/rdo/version/mitaka/build/latest.yml | 2 -- settings/product/rhos/version/5.0/build/latest.yml | 2 -- settings/product/rhos/version/5.0/build/staypuft_50_lkg.yml | 2 -- settings/product/rhos/version/6.0/build/latest.yml | 2 -- settings/product/rhos/version/7.0/build/latest.yml | 2 -- .../rhos/version/7_director/build/last_known_good.yml | 2 -- settings/product/rhos/version/7_director/build/latest.yml | 2 -- settings/product/rhos/version/8.0/build/latest.yml | 2 -- .../rhos/version/8_director/build/last_known_good.yml | 2 -- settings/product/rhos/version/8_director/build/latest.yml | 2 -- 14 files changed, 31 deletions(-) delete mode 100644 roles/linux/rhel/rhos/templates/foreman-poodle.repo.j2 diff --git a/roles/linux/rhel/rhos/templates/foreman-poodle.repo.j2 b/roles/linux/rhel/rhos/templates/foreman-poodle.repo.j2 deleted file mode 100644 index 35ef63351..000000000 --- a/roles/linux/rhel/rhos/templates/foreman-poodle.repo.j2 +++ /dev/null @@ -1,5 +0,0 @@ -[foreman-poodle] -name=foreman-poodle -baseurl={{ product.repo.foreman_poodle[ansible_distribution][ansible_distribution_version] }} -enabled=0 -gpgcheck=0 diff --git a/settings/product/rdo/version/juno/build/latest.yml b/settings/product/rdo/version/juno/build/latest.yml index d3f106168..b85b62d68 100644 --- a/settings/product/rdo/version/juno/build/latest.yml +++ b/settings/product/rdo/version/juno/build/latest.yml @@ -3,5 +3,3 @@ product: repo: puddle_pin_version: 'latest' poodle_pin_version: 'latest' - foreman_pin_version: 'latest' - foreman_poodle_pin_version: 'latest' diff --git a/settings/product/rdo/version/kilo/build/latest.yml b/settings/product/rdo/version/kilo/build/latest.yml index f7a3cd256..9e59095c2 100644 --- a/settings/product/rdo/version/kilo/build/latest.yml +++ b/settings/product/rdo/version/kilo/build/latest.yml @@ -4,5 +4,3 @@ product: repo: puddle_pin_version: 'latest' poodle_pin_version: 'latest' - foreman_pin_version: 'latest' - foreman_poodle_pin_version: 'latest' diff --git a/settings/product/rdo/version/liberty/build/latest.yml b/settings/product/rdo/version/liberty/build/latest.yml index fcac9d66a..61c4b599f 100644 --- a/settings/product/rdo/version/liberty/build/latest.yml +++ b/settings/product/rdo/version/liberty/build/latest.yml @@ -4,8 +4,6 @@ product: repo: puddle_pin_version: 'latest' poodle_pin_version: 'latest' - foreman_pin_version: 'latest' - foreman_poodle_pin_version: 'latest' installer: images: diff --git a/settings/product/rdo/version/mitaka/build/latest.yml b/settings/product/rdo/version/mitaka/build/latest.yml index fcac9d66a..61c4b599f 100644 --- a/settings/product/rdo/version/mitaka/build/latest.yml +++ b/settings/product/rdo/version/mitaka/build/latest.yml @@ -4,8 +4,6 @@ product: repo: puddle_pin_version: 'latest' poodle_pin_version: 'latest' - foreman_pin_version: 'latest' - foreman_poodle_pin_version: 'latest' installer: images: diff --git a/settings/product/rhos/version/5.0/build/latest.yml b/settings/product/rhos/version/5.0/build/latest.yml index d3f106168..b85b62d68 100644 --- a/settings/product/rhos/version/5.0/build/latest.yml +++ b/settings/product/rhos/version/5.0/build/latest.yml @@ -3,5 +3,3 @@ product: repo: puddle_pin_version: 'latest' poodle_pin_version: 'latest' - foreman_pin_version: 'latest' - foreman_poodle_pin_version: 'latest' diff --git a/settings/product/rhos/version/5.0/build/staypuft_50_lkg.yml b/settings/product/rhos/version/5.0/build/staypuft_50_lkg.yml index d3f106168..b85b62d68 100644 --- a/settings/product/rhos/version/5.0/build/staypuft_50_lkg.yml +++ b/settings/product/rhos/version/5.0/build/staypuft_50_lkg.yml @@ -3,5 +3,3 @@ product: repo: puddle_pin_version: 'latest' poodle_pin_version: 'latest' - foreman_pin_version: 'latest' - foreman_poodle_pin_version: 'latest' diff --git a/settings/product/rhos/version/6.0/build/latest.yml b/settings/product/rhos/version/6.0/build/latest.yml index d3f106168..b85b62d68 100644 --- a/settings/product/rhos/version/6.0/build/latest.yml +++ b/settings/product/rhos/version/6.0/build/latest.yml @@ -3,5 +3,3 @@ product: repo: puddle_pin_version: 'latest' poodle_pin_version: 'latest' - foreman_pin_version: 'latest' - foreman_poodle_pin_version: 'latest' diff --git a/settings/product/rhos/version/7.0/build/latest.yml b/settings/product/rhos/version/7.0/build/latest.yml index d3f106168..b85b62d68 100644 --- a/settings/product/rhos/version/7.0/build/latest.yml +++ b/settings/product/rhos/version/7.0/build/latest.yml @@ -3,5 +3,3 @@ product: repo: puddle_pin_version: 'latest' poodle_pin_version: 'latest' - foreman_pin_version: 'latest' - foreman_poodle_pin_version: 'latest' diff --git a/settings/product/rhos/version/7_director/build/last_known_good.yml b/settings/product/rhos/version/7_director/build/last_known_good.yml index 7492edbe9..c5c522dd4 100644 --- a/settings/product/rhos/version/7_director/build/last_known_good.yml +++ b/settings/product/rhos/version/7_director/build/last_known_good.yml @@ -6,8 +6,6 @@ product: puddle_pin_version: 'latest' puddle_director_pin_version: '2015-10-16.1' poodle_pin_version: 'latest' - foreman_pin_version: 'latest' - foreman_poodle_pin_version: 'latest' installer: images: diff --git a/settings/product/rhos/version/7_director/build/latest.yml b/settings/product/rhos/version/7_director/build/latest.yml index 82e9bde1e..5bad17fdb 100644 --- a/settings/product/rhos/version/7_director/build/latest.yml +++ b/settings/product/rhos/version/7_director/build/latest.yml @@ -6,8 +6,6 @@ product: puddle_pin_version: 'latest' puddle_director_pin_version: 'latest' poodle_pin_version: 'latest' - foreman_pin_version: 'latest' - foreman_poodle_pin_version: 'latest' installer: images: diff --git a/settings/product/rhos/version/8.0/build/latest.yml b/settings/product/rhos/version/8.0/build/latest.yml index d3f106168..b85b62d68 100644 --- a/settings/product/rhos/version/8.0/build/latest.yml +++ b/settings/product/rhos/version/8.0/build/latest.yml @@ -3,5 +3,3 @@ product: repo: puddle_pin_version: 'latest' poodle_pin_version: 'latest' - foreman_pin_version: 'latest' - foreman_poodle_pin_version: 'latest' diff --git a/settings/product/rhos/version/8_director/build/last_known_good.yml b/settings/product/rhos/version/8_director/build/last_known_good.yml index 0be886089..759cab59d 100644 --- a/settings/product/rhos/version/8_director/build/last_known_good.yml +++ b/settings/product/rhos/version/8_director/build/last_known_good.yml @@ -6,5 +6,3 @@ product: puddle_pin_version: 'latest' puddle_director_pin_version: 'latest' poodle_pin_version: 'latest' - foreman_pin_version: 'latest' - foreman_poodle_pin_version: 'latest' diff --git a/settings/product/rhos/version/8_director/build/latest.yml b/settings/product/rhos/version/8_director/build/latest.yml index fd08f7249..ac120b648 100644 --- a/settings/product/rhos/version/8_director/build/latest.yml +++ b/settings/product/rhos/version/8_director/build/latest.yml @@ -6,8 +6,6 @@ product: puddle_pin_version: 'latest' puddle_director_pin_version: 'latest' poodle_pin_version: 'latest' - foreman_pin_version: 'latest' - foreman_poodle_pin_version: 'latest' installer: images: From 45b98899a6dea61215a88ee3e9baa2fdd69867b3 Mon Sep 17 00:00:00 2001 From: Jon Schlueter Date: Thu, 4 Feb 2016 08:34:12 -0500 Subject: [PATCH 048/137] pruning dead code from roles/linux/rhel/rhos this role has been replaced by playbooks and other logic This code has long since bit-rotted past functional. Reasoning: * the paths referenced in the tasks have changed a while ago * no currently checked in code is using this role * the 4 templates from this review are not referenced in any code that I have found so far pruning the copy of component-test-copr-repo.j2 which has 2 other exact copies in the repo currently. Change-Id: Ia3a5373086cf3d1c8e89cfa56e51f01f9e37a7ed --- .../rhel/rhos/component-test-copr-repo.j2 | 7 -- roles/linux/rhel/rhos/meta/main.yml | 4 -- roles/linux/rhel/rhos/tasks/main.yml | 71 ------------------- .../rhel/rhos/templates/rhos-advanced.repo.j2 | 5 -- .../rhel/rhos/templates/rhos-poodle.repo.j2 | 5 -- .../rhel/rhos/templates/rhos-release.repo.j2 | 6 -- 6 files changed, 98 deletions(-) delete mode 100644 roles/linux/rhel/rhos/component-test-copr-repo.j2 delete mode 100644 roles/linux/rhel/rhos/meta/main.yml delete mode 100644 roles/linux/rhel/rhos/tasks/main.yml delete mode 100644 roles/linux/rhel/rhos/templates/rhos-advanced.repo.j2 delete mode 100644 roles/linux/rhel/rhos/templates/rhos-poodle.repo.j2 delete mode 100644 roles/linux/rhel/rhos/templates/rhos-release.repo.j2 diff --git a/roles/linux/rhel/rhos/component-test-copr-repo.j2 b/roles/linux/rhel/rhos/component-test-copr-repo.j2 deleted file mode 100644 index 89c52ecee..000000000 --- a/roles/linux/rhel/rhos/component-test-copr-repo.j2 +++ /dev/null @@ -1,7 +0,0 @@ -[abregman-el{{ ansible_distribution_version|int }}-rhos{{ product.version.major }}-test-deps] -name=Copr repo for el{{ ansible_distribution_version|int }}-rhos{{ product.version.major }}-test-deps owned by abregman -baseurl=https://copr-be.cloud.fedoraproject.org/results/abregman/el{{ ansible_distribution_version|int }}-rhos{{ product.version.major}}-test-deps/epel-{{ ansible_distribution_version|int }}-$basearch/ -skip_if_unavailable=True -gpgcheck=0 -gpgkey=https://copr-be.cloud.fedoraproject.org/results/abregman/el{{ ansible_distribution_version|int }}-rhos{{ product.version.major}}-test-deps/pubkey.gpg -enabled=1 diff --git a/roles/linux/rhel/rhos/meta/main.yml b/roles/linux/rhel/rhos/meta/main.yml deleted file mode 100644 index f18391d5c..000000000 --- a/roles/linux/rhel/rhos/meta/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -dependencies: -- { role: linux } -- { role: common } \ No newline at end of file diff --git a/roles/linux/rhel/rhos/tasks/main.yml b/roles/linux/rhel/rhos/tasks/main.yml deleted file mode 100644 index cc3605bf8..000000000 --- a/roles/linux/rhel/rhos/tasks/main.yml +++ /dev/null @@ -1,71 +0,0 @@ ---- -- name: Create the RHOS Release Repository - template: src=rhos-release.repo.j2 dest=/etc/yum.repos.d/rhos-release.repo - when: product.repo_type in ['poodle', 'puddle'] - -- name: install rhos-release - yum: name=rhos-release state=latest - when: product.repo_type in ['poodle', 'puddle'] - -- name: Change server location for repos in rhos-release - replace: - dest=/etc/yum.repos.d/rhos-release-{{ product.version.major }}{{ installer_host_repo | default('')}}-rhel-{{ ansible_distribution_version|string}}.repo - regexp={{ location.defaultrepo_string }} - replace={{ location.map[user_location] }} - when: user_location is defined - -- name: Change puddle version for repos in rhos-release - replace: - dest=/etc/yum.repos.d/rhos-release-{{ product.version.major }}{{ installer_host_repo | default('')}}-rhel-{{ ansible_distribution_version|string}}.repo - regexp=/latest/RH{{ ansible_distribution_major_version|string }} - replace=/{{ product.repo.puddle_pin_version }}/RH{{ ansible_distribution_major_version|string }} - when: (product.repo.puddle_pin_version is defined and product.repo.puddle_pin_version != 'latest' and product.repo_type == 'puddle') - -- name: Create the COPR repos required for component tests - template: src=component-test-copr-repo.j2 dest=/etc/yum.repos.d/component-test-copr.repo - when: (test.type.name is defined and (test.type.name == 'unit-test' or test.type.name == 'pep8-test') and ansible_distribution_version|int >= 6) - -- name: Change poodle version for repos in rhos-release - shell: "rhos-release -x; rhos-release {{ product.version.major }} -d -p {{ product.repo.poodle_pin_version }}" - when: (product.repo.poodle_pin_version is defined and product.repo.poodle_pin_version != 'latest|GA' and product.repo_type == 'poodle' and installer_host_repo | default('') == '') - -- name: Change poodle version for repos in rhos-release for GA -> Latest Poodle - shell: "rhos-release -x; rhos-release {{ product.version.major }} -p {{ product.repo.poodle_pin_version }}" - when: (product.repo.poodle_pin_version is defined and product.repo.poodle_pin_version == 'GA' and product.repo_type == 'poodle' and installer_host_repo | default('') == '') - -# copr repos -- name: enable tripleo copr repository - shell: "sudo curl -o /etc/yum.repos.d/slagle-openstack-m.repo {{ product.repo.copr[ ansible_distribution ][distro.full_version] }}" - when: product.repo.copr is defined - register: rdo_repo_output - -- name: print rdo_repo_output - debug: var=rdo_repo_output.stdout - when: product.repo.copr is defined - -- name: ensure yum-utils - yum: name={{ item }} state=present - with_items: - - yum-utils - -- name: Disable default foreman puddle rhelosp repo when using poodle - shell: /usr/bin/yum-config-manager --disable 'rhelosp-*-OS-Foreman' - when: product.repo_type == 'poodle' - -# custom repos -- name: enable a custom repository - yum: name="{{ installer.custom_repo }}" - when: installer.custom_repo is defined - register: rdo_repo_output - -- name: print rdo_repo_output - debug: var=rdo_repo_output.stdout - when: installer.custom_repo is defined - -- name: Remove any rhel repo created by rdo-ci #used when both rdo and rhos are in play - file: path=/etc/yum.repos.d/rhel_ci.repo state=absent - notify: - - Yum clean all - -- name: List available yum repositories - command: yum -d 9 repolist diff --git a/roles/linux/rhel/rhos/templates/rhos-advanced.repo.j2 b/roles/linux/rhel/rhos/templates/rhos-advanced.repo.j2 deleted file mode 100644 index abe9f5b78..000000000 --- a/roles/linux/rhel/rhos/templates/rhos-advanced.repo.j2 +++ /dev/null @@ -1,5 +0,0 @@ -[rhos-advanced] -name=rhos-advanced -baseurl={{ product.repo.advanced[ansible_distribution][ansible_distribution_version] }} -enabled=0 -gpgcheck=0 diff --git a/roles/linux/rhel/rhos/templates/rhos-poodle.repo.j2 b/roles/linux/rhel/rhos/templates/rhos-poodle.repo.j2 deleted file mode 100644 index 5e155485a..000000000 --- a/roles/linux/rhel/rhos/templates/rhos-poodle.repo.j2 +++ /dev/null @@ -1,5 +0,0 @@ -[rhos-poodle] -name=rhos-poodle -baseurl={{ product.repo.poodle[ansible_distribution][ansible_distribution_version] }} -enabled=0 -gpgcheck=0 diff --git a/roles/linux/rhel/rhos/templates/rhos-release.repo.j2 b/roles/linux/rhel/rhos/templates/rhos-release.repo.j2 deleted file mode 100644 index 2d98530fc..000000000 --- a/roles/linux/rhel/rhos/templates/rhos-release.repo.j2 +++ /dev/null @@ -1,6 +0,0 @@ -[rhos-release] -name=rhos-release -baseurl={{ product.rpmrepo[ansible_distribution] }} -enabled=1 -gpgcheck=0 - From a2fe00a9e3eb1345e8bba563fdb0446235f58a73 Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Thu, 4 Feb 2016 10:17:52 -0500 Subject: [PATCH 049/137] remove the selinux workaround for ospd-8 * the bug has been verified Change-Id: Ic5555c73b5fe928b34d2d42285d67842852590ce --- playbooks/installer/rdo-manager/undercloud/run.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/playbooks/installer/rdo-manager/undercloud/run.yml b/playbooks/installer/rdo-manager/undercloud/run.yml index 4a675c409..137946397 100644 --- a/playbooks/installer/rdo-manager/undercloud/run.yml +++ b/playbooks/installer/rdo-manager/undercloud/run.yml @@ -2,11 +2,6 @@ - name: install the undercloud hosts: undercloud tasks: - - name: set selinux to permissive for ospd-8 (workaround bug bz 1284133) - selinux: policy=targeted state=permissive - sudo: yes - when: (workarounds['rhbz1280101']['enabled'] is defined and workarounds['rhbz1280101']['enabled'] | bool) - - name: update hosts file for localhost.localhost (workaround for puppet, discovered on centos7) lineinfile: dest=/etc/hosts line="127.0.0.1 localhost localhost.localhost" sudo: yes From 25cf05a1c0f155e7f467c7ccb57e122c118fa180 Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Thu, 4 Feb 2016 20:27:15 -0500 Subject: [PATCH 050/137] update tempest skip for ospd-8 poodle Change-Id: I93b241ebd10c6b33fda0e9f4cb9231d4a92d2b98 --- .../files/tempest_skip/rdoci-rhos-8-director-rdo-manager | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager b/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager index dc63e9f64..6f6f53313 100644 --- a/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager +++ b/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager @@ -2,3 +2,12 @@ -tempest.api.identity.admin.v3 # rhbz1295561 -tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image +# rhbz1304930 +-tempest.api.compute.servers.test_create_server +-tempest.api.compute.servers.test_server_addresses +-tempest.api.compute.servers.test_server_actions +-tempest.scenario.test_network_basic_ops.TestNetworkBasicOps +-tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern +-tempest.scenario.test_volume_boot_pattern.TestVolumeBootPatternV2 +# rhbz1304933 +-tempest.api.telemetry.test_telemetry_notification_api From eddb76e4f7f18e468c12c5281ab7c5fcd1593fb4 Mon Sep 17 00:00:00 2001 From: Attila Darazs Date: Thu, 21 Jan 2016 18:03:15 +0100 Subject: [PATCH 051/137] IPv6 support for rdo-manager jobs - add vlan network variant - add missing extra poodle core rhos-release execution - add missing minimal_ha_ceph topology - replace rsync based shell copy with a proper copy module call that has better compatibility across systems - remove nic-config file search for virt jobs Co-Authored-By: Gabriele Cerami Change-Id: I6e6c00b94286e45b5936391eacb1fde923c69210 --- .../overcloud/heat-templates/pre-virthost.yml | 15 ++++----- .../overcloud/heat-templates/run.yml | 2 +- .../templates/deploy-overcloudrc.j2 | 33 ++++++++++++++----- .../rdo-manager/undercloud/post-virthost.yml | 9 ++++- .../rdo-manager/yum_repos/repo-rhos.yml | 8 +++++ .../neutron/isolation/bond_with_vlans.yml | 1 + .../network/neutron/isolation/default.yml | 1 + .../network/neutron/isolation/none.yml | 1 + .../neutron/isolation/single_nic_vlans.yml | 1 + .../isolation/single_nic_vlans_ipv6.yml | 5 +++ .../network/neutron/variant/vlan.yml | 3 ++ 11 files changed, 59 insertions(+), 20 deletions(-) create mode 100644 settings/installer/rdo_manager/network/neutron/isolation/single_nic_vlans_ipv6.yml create mode 100644 settings/installer/rdo_manager/network/neutron/variant/vlan.yml diff --git a/playbooks/installer/rdo-manager/overcloud/heat-templates/pre-virthost.yml b/playbooks/installer/rdo-manager/overcloud/heat-templates/pre-virthost.yml index 0695c3eea..a5a4c1661 100644 --- a/playbooks/installer/rdo-manager/overcloud/heat-templates/pre-virthost.yml +++ b/playbooks/installer/rdo-manager/overcloud/heat-templates/pre-virthost.yml @@ -9,18 +9,15 @@ #the long line in this task fails when broken up - name: copy over template file (virt) - local_action: > - shell pushd {{ base_dir }}/khaleesi; rsync --delay-updates -F --compress --archive --rsh \ - "ssh -F ssh.config.ansible -S none -o StrictHostKeyChecking=no" \ - {{base_dir}}/khaleesi-settings/hardware_environments/{{hw_env.env_type}}/network_configs/{{ installer.network.isolation }}/{{ installer.network.isolation }}.yml \ - undercloud:{{ instack_user_home }}/network-environment.yaml + copy: + src="{{ base_dir }}/khaleesi-settings/hardware_environments/{{ hw_env.env_type }}/network_configs/{{ installer.network.isolation }}/{{ installer.network.isolation }}.yml" + dest="{{ instack_user_home }}/network-environment.yaml" when: installer.network.isolation != 'none' #the long line in this task fails when broken up - name: copy over common environment file (virt) - local_action: > - shell pushd {{ base_dir }}/khaleesi; rsync --delay-updates -F --compress --archive --rsh \ - "ssh -F ssh.config.ansible -S none -o StrictHostKeyChecking=no" \ - {{base_dir}}/khaleesi-settings/hardware_environments/common/plan-parameter-neutron-bridge.yaml undercloud:{{ instack_user_home }}/plan-parameter-neutron-bridge.yaml + copy: + src="{{ base_dir }}/khaleesi-settings/hardware_environments/common/plan-parameter-neutron-bridge.yaml" + dest="{{ instack_user_home }}/plan-parameter-neutron-bridge.yaml" when: installer.network.isolation != 'none' and installer.deploy.type == 'plan' diff --git a/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml b/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml index 207cfd06b..3e7329c9c 100644 --- a/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml +++ b/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml @@ -9,7 +9,7 @@ #the long line in this task fails when broken up - name: copy over standard nic-configs default directory shell: > - cp /usr/share/openstack-tripleo-heat-templates/network/config/{{ installer.network.isolation | replace('_', '-') }}/*.yaml {{ instack_user_home }}/nic-configs + cp /usr/share/openstack-tripleo-heat-templates/network/config/{{ installer.network.isolation | replace('_', '-') | replace("-ipv6", "") }}/*.yaml {{ instack_user_home }}/nic-configs when: installer.network.isolation != 'none' and installer.network.isolation != 'default' #the long line in this task fails when broken up diff --git a/playbooks/installer/rdo-manager/templates/deploy-overcloudrc.j2 b/playbooks/installer/rdo-manager/templates/deploy-overcloudrc.j2 index 993492caa..24a0b549e 100644 --- a/playbooks/installer/rdo-manager/templates/deploy-overcloudrc.j2 +++ b/playbooks/installer/rdo-manager/templates/deploy-overcloudrc.j2 @@ -5,8 +5,6 @@ export DEPLOY_COMMAND="openstack overcloud deploy --debug \ {{ installer.deploy.command }} \ {{ installer.custom_deploy.command }} \ --libvirt-type=$OVERCLOUD_LIBVIRT_TYPE \ - --neutron-network-type {{ installer.network.variant }} \ - --neutron-tunnel-types {{ installer.network.variant }} \ --ntp-server {{ distro.config.ntp_server_ip }} \ --control-scale $CONTROLSCALE \ --compute-scale $COMPUTESCALE \ @@ -19,6 +17,14 @@ export DEPLOY_COMMAND="openstack overcloud deploy --debug \ --block-storage-flavor $BLOCKSTORAGEFLAVOR \ --swift-storage-flavor $SWIFTSTORAGEFLAVOR" +{% if installer.network.variant == 'vlan' %} +export DEPLOY_COMMAND="$DEPLOY_COMMAND --neutron-network-type vlan \ + --neutron-disable-tunneling" +{% else %} +export DEPLOY_COMMAND="$DEPLOY_COMMAND --neutron-network-type {{ installer.network.variant }} \ + --neutron-tunnel-types {{ installer.network.variant }}" +{% endif %} + {% if installer.env.type != "virthost" %} export NEUTRON_PUBLIC_INTERFACE={{ hw_env.neutron_public_interface }} export DEPLOY_COMMAND="$DEPLOY_COMMAND --neutron-public-interface=$NEUTRON_PUBLIC_INTERFACE " @@ -27,15 +33,24 @@ export DEPLOY_COMMAND="$DEPLOY_COMMAND --neutron-public-interface=$NEUTRON_PUBLI export DEPLOY_TIMEOUT={{ hw_env.deploy_timeout | default('90') }} export DEPLOY_COMMAND="$DEPLOY_COMMAND --timeout=$DEPLOY_TIMEOUT " -{% if installer.network.isolation != 'none' and installer.env.type != "virthost" %} -export DEPLOY_COMMAND="$DEPLOY_COMMAND -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml \ - -e ~/network-environment.yaml " +{% if installer.network.isolation != 'none' and installer.network.protocol == "ipv4" %} +export DEPLOY_COMMAND="$DEPLOY_COMMAND -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml" +{% endif %} + +{% if installer.network.isolation != 'none' and installer.network.protocol == "ipv6" %} +export DEPLOY_COMMAND="$DEPLOY_COMMAND -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation-v6.yaml" +{% endif %} + +{% if installer.network.isolation != 'none' and installer.env.type == "virthost" and installer.network.protocol == "ipv4" %} +export DEPLOY_COMMAND="$DEPLOY_COMMAND -e /usr/share/openstack-tripleo-heat-templates/environments/net-single-nic-with-vlans.yaml" +{% endif %} + +{% if installer.network.isolation != 'none' and installer.env.type == "virthost" and installer.network.protocol == "ipv6" %} +export DEPLOY_COMMAND="$DEPLOY_COMMAND -e /usr/share/openstack-tripleo-heat-templates/environments/net-single-nic-with-vlans-v6.yaml" {% endif %} -{% if installer.network.isolation != 'none' and installer.env.type == "virthost" %} -export DEPLOY_COMMAND="$DEPLOY_COMMAND -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml \ - -e /usr/share/openstack-tripleo-heat-templates/environments/net-single-nic-with-vlans.yaml \ - -e ~/network-environment.yaml " +{% if installer.network.isolation != 'none' %} +export DEPLOY_COMMAND="$DEPLOY_COMMAND -e ~/network-environment.yaml" {% endif %} {% if installer.network.isolation != 'none' and installer.deploy.type == 'plan' %} diff --git a/playbooks/installer/rdo-manager/undercloud/post-virthost.yml b/playbooks/installer/rdo-manager/undercloud/post-virthost.yml index a264bf1fb..f7149cfc7 100644 --- a/playbooks/installer/rdo-manager/undercloud/post-virthost.yml +++ b/playbooks/installer/rdo-manager/undercloud/post-virthost.yml @@ -2,9 +2,16 @@ - name: setup networking on virt for network isolation hosts: undercloud:&virthost tasks: - - name: net-iso virt setup vlans + - name: net-iso virt setup vlans (ipv4) shell: > source {{ instack_user_home }}/stackrc; sudo ovs-vsctl add-port br-ctlplane vlan10 tag=10 -- set interface vlan10 type=internal; sudo ip l set dev vlan10 up; sudo ip addr add 172.16.23.251/24 dev vlan10; when: installer.network.isolation == 'single_nic_vlans' + + - name: net-iso virt setup vlans (ipv6) + shell: > + source {{ instack_user_home }}/stackrc; + sudo ovs-vsctl add-port br-ctlplane vlan10 tag=10 -- set interface vlan10 type=internal; + sudo ip l set dev vlan10 up; sudo ip addr add 2001:db8:fd00:1000:dead:beef:cafe:f00/64 dev vlan10; + when: installer.network.isolation == 'single_nic_vlans_ipv6' diff --git a/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml b/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml index 01e1fd49c..3ad022ab3 100644 --- a/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml +++ b/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml @@ -62,6 +62,14 @@ register: pinned_poodle when: product.repo_type in ['poodle'] and product.repo.poodle_pin_version == 'latest' + - name: Execute rhos-release for core rhos poodle (osp) + sudo: yes + shell: > + rhos-release -d -P {{ product.repo.core_product_version }}; + rhos-release -d -r {{ distro.full_version }} -t {{installer.dib_dir}} -P {{ product.repo.core_product_version }}; + register: pinned_poodle + when: product.repo_type in ['poodle'] and product.repo.poodle_pin_version == 'latest' + - name: yum clean all command: yum clean all diff --git a/settings/installer/rdo_manager/network/neutron/isolation/bond_with_vlans.yml b/settings/installer/rdo_manager/network/neutron/isolation/bond_with_vlans.yml index 6c2cabd9a..9a8d36203 100644 --- a/settings/installer/rdo_manager/network/neutron/isolation/bond_with_vlans.yml +++ b/settings/installer/rdo_manager/network/neutron/isolation/bond_with_vlans.yml @@ -2,3 +2,4 @@ installer: network: isolation: bond_with_vlans + protocol: ipv4 diff --git a/settings/installer/rdo_manager/network/neutron/isolation/default.yml b/settings/installer/rdo_manager/network/neutron/isolation/default.yml index 780e512b0..3c7ec08b0 100644 --- a/settings/installer/rdo_manager/network/neutron/isolation/default.yml +++ b/settings/installer/rdo_manager/network/neutron/isolation/default.yml @@ -2,3 +2,4 @@ installer: network: isolation: default + protocol: ipv4 diff --git a/settings/installer/rdo_manager/network/neutron/isolation/none.yml b/settings/installer/rdo_manager/network/neutron/isolation/none.yml index 9d3b5a541..d44348689 100644 --- a/settings/installer/rdo_manager/network/neutron/isolation/none.yml +++ b/settings/installer/rdo_manager/network/neutron/isolation/none.yml @@ -2,3 +2,4 @@ installer: network: isolation: none + protocol: ipv4 diff --git a/settings/installer/rdo_manager/network/neutron/isolation/single_nic_vlans.yml b/settings/installer/rdo_manager/network/neutron/isolation/single_nic_vlans.yml index 240edc22e..6e3f4fa1b 100644 --- a/settings/installer/rdo_manager/network/neutron/isolation/single_nic_vlans.yml +++ b/settings/installer/rdo_manager/network/neutron/isolation/single_nic_vlans.yml @@ -2,3 +2,4 @@ installer: network: isolation: single_nic_vlans + protocol: ipv4 diff --git a/settings/installer/rdo_manager/network/neutron/isolation/single_nic_vlans_ipv6.yml b/settings/installer/rdo_manager/network/neutron/isolation/single_nic_vlans_ipv6.yml new file mode 100644 index 000000000..88f7efda2 --- /dev/null +++ b/settings/installer/rdo_manager/network/neutron/isolation/single_nic_vlans_ipv6.yml @@ -0,0 +1,5 @@ +--- +installer: + network: + isolation: single_nic_vlans_ipv6 + protocol: ipv6 diff --git a/settings/installer/rdo_manager/network/neutron/variant/vlan.yml b/settings/installer/rdo_manager/network/neutron/variant/vlan.yml new file mode 100644 index 000000000..e54983d08 --- /dev/null +++ b/settings/installer/rdo_manager/network/neutron/variant/vlan.yml @@ -0,0 +1,3 @@ +installer: + network: + variant: vlan From a3519eaa8e8cb13af8e32159eaa02ef2f94113fa Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Fri, 5 Feb 2016 10:20:15 -0500 Subject: [PATCH 052/137] blueprint for deploying osp-7 with ospd-8 undercloud Change-Id: I8d4463b0f9bbbe167dd2e5d87ee6003998a8e7e5 --- ...pd8-undercloud-deploys-ospd7-overcloud.rst | 61 +++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 blueprints/ospd8-undercloud-deploys-ospd7-overcloud.rst diff --git a/blueprints/ospd8-undercloud-deploys-ospd7-overcloud.rst b/blueprints/ospd8-undercloud-deploys-ospd7-overcloud.rst new file mode 100644 index 000000000..fc297e643 --- /dev/null +++ b/blueprints/ospd8-undercloud-deploys-ospd7-overcloud.rst @@ -0,0 +1,61 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +=========================== +Deploy a ospd-7 overcloud using an ospd-8 undercloud +=========================== + +We have some requirements from PM to deploy a ospd-7 overcloud using an +ospd-8 undercloud. PM would like this in CI's status jobs. + +Problem description +=================== + +Consult PM + +Proposed change +=============== + +- Deploy the undercloud +- Remove the tripleo-heat-templates for opsd-8 +- Install the tripleo-heat-tempeates for ospd-7 +- Rerun ksgen for ospd-8 +- Deploy + +Alternatives +------------ + +None + +Implementation +============== + +Assignee(s) +----------- +whayutin@redhat.com + +Milestones +---------- + +- Deploy the undercloud +- Remove the tripleo-heat-templates for opsd-8 +- Install the tripleo-heat-tempeates for ospd-7 +- Deploy + +Work Items +---------- + +- test deployment in a dev enviornment +- build POC job +- build new jjb builder, template +- test POC job +- test w/ baremetal +- push to production + +Dependencies +============ + +- The playbooks must be able to be called independently From 9ab0d63f2b550b63fe5a563572d11c118961bfeb Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Sun, 7 Feb 2016 11:44:09 -0500 Subject: [PATCH 053/137] new failures discovered with the same root cause as doc'd bugs Change-Id: Id24431a9591f2a947b3faf02e67828959ed75ca6 --- .../files/tempest_skip/rdoci-rhos-8-director-rdo-manager | 3 +++ 1 file changed, 3 insertions(+) diff --git a/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager b/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager index 6f6f53313..f67da5b36 100644 --- a/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager +++ b/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager @@ -1,12 +1,15 @@ # rhbz1266947 -tempest.api.identity.admin.v3 +-tempest.api.identity.v3.test_api_discovery # rhbz1295561 -tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image # rhbz1304930 -tempest.api.compute.servers.test_create_server -tempest.api.compute.servers.test_server_addresses -tempest.api.compute.servers.test_server_actions +-tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON -tempest.scenario.test_network_basic_ops.TestNetworkBasicOps +-tempest.scenario.test_server_basic_ops.TestServerBasicOps -tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern -tempest.scenario.test_volume_boot_pattern.TestVolumeBootPatternV2 # rhbz1304933 From 4dcceb20c6c51e0e7786ee2087b43820a70852ef Mon Sep 17 00:00:00 2001 From: David Sariel Date: Mon, 8 Feb 2016 09:33:18 +0200 Subject: [PATCH 054/137] Adding to blacklist failing tests to pass CI Adding temporary the following tests until will be fixed. They should be removed as soon as the tests will be fixed. - tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV1Test.test_volume_backup_create_get_detailed_list_restore_delete - tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV2Test.test_volume_backup_create_get_detailed_list_restore_delete - tempest.scenario.test_minimum_basic.TestMinimumBasicScenario.test_minimum_basic_scenario - tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_cryptsetup - tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_luks Change-Id: I1e7a66cde27bd89973f06d04bd7eed52161ffb33 --- settings/tester/tempest/tests/cinder_full.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/settings/tester/tempest/tests/cinder_full.yml b/settings/tester/tempest/tests/cinder_full.yml index 94d1079d2..e6b7e7f37 100644 --- a/settings/tester/tempest/tests/cinder_full.yml +++ b/settings/tester/tempest/tests/cinder_full.yml @@ -6,5 +6,9 @@ tester: tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps.test_multiple_security_groups, tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_connectivity_between_vms_on_different_networks, tempest.scenario.test_network_v6.TestGettingAddress.test_dhcp6_stateless_from_os, - tempest.scenario.test_snapshot_pattern.TestSnapshotPattern.test_snapshot_pattern] - + tempest.scenario.test_snapshot_pattern.TestSnapshotPattern.test_snapshot_pattern, + tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV1Test.test_volume_backup_create_get_detailed_list_restore_delete, # 7,6,5 + tempest.api.volume.admin.test_volumes_backup.VolumesBackupsV2Test.test_volume_backup_create_get_detailed_list_restore_delete, # 7,6,5 + tempest.scenario.test_minimum_basic.TestMinimumBasicScenario.test_minimum_basic_scenario, # only 7 + tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_cryptsetup, # only 6 and 5 + tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_luks] # only 5 From 9f7b7dd56da1d32067b0498d80d47c41bd921909 Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Thu, 4 Feb 2016 15:58:30 -0500 Subject: [PATCH 055/137] the global post install is really a packstack post install Considering devstack is skipped, let's move the global post install into the packstack post. Change-Id: If140728804c913778555e163576692a6e58f5deb --- playbooks/install.yml | 38 ------------------------- playbooks/installer/packstack/post.yml | 39 ++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 38 deletions(-) diff --git a/playbooks/install.yml b/playbooks/install.yml index 46af13315..b85549715 100644 --- a/playbooks/install.yml +++ b/playbooks/install.yml @@ -27,41 +27,3 @@ shell: /sbin/iptables-save >/etc/sysconfig/iptables - include: installer/{{ installer.type }}/main.yml - -- name: Global post install - hosts: controller - gather_facts: yes - tasks: - # TODO(tkammer): move all params into khaleesi-settings - - name: Create external network - neutron - quantum_network: - state: present - auth_url: "http://{{ hostvars[inventory_hostname].ansible_default_ipv4.address }}:35357/v2.0/" - login_username: admin - login_password: "{{ hostvars[inventory_hostname].admin_password | default('redhat') }}" - login_tenant_name: admin - name: "{{ installer.network.name }}" - provider_network_type: "{{ installer.network.external.provider_network_type }}" - provider_physical_network: "{{ installer.network.label }}" - provider_segmentation_id: "{{ installer.network.external.vlan.tag|default(omit) }}" - router_external: yes - shared: no - admin_state_up: yes - when: installer is defined and installer.network.type == 'neutron' and installer.type != 'devstack' - - - name: Create subnet for external network - neutron - quantum_subnet: - state: present - auth_url: "http://{{ hostvars[inventory_hostname].ansible_default_ipv4.address }}:35357/v2.0/" - login_username: admin - login_password: "{{ hostvars[inventory_hostname].admin_password | default('redhat') }}" - login_tenant_name: admin - tenant_name: admin - network_name: "{{ installer.network.name }}" - name: external-subnet - enable_dhcp: False - gateway_ip: "{{ provisioner.network.network_list.external.nested.subnet_gateway }}" - cidr: "{{ provisioner.network.network_list.external.nested.subnet_cidr}}" - allocation_pool_start: "{{ provisioner.network.network_list.external.nested.allocation_pool_start }}" - allocation_pool_end: "{{ provisioner.network.network_list.external.nested.allocation_pool_end }}" - when: installer is defined and installer.network.type == 'neutron' and installer.type != 'devstack' diff --git a/playbooks/installer/packstack/post.yml b/playbooks/installer/packstack/post.yml index 95a4a68ed..6c55bd8f2 100644 --- a/playbooks/installer/packstack/post.yml +++ b/playbooks/installer/packstack/post.yml @@ -191,3 +191,42 @@ name: neutron-server state: restarted when: ha|changed or portsec|changed + +- name: Packstack post install + hosts: controller + gather_facts: yes + tasks: + # TODO(tkammer): move all params into khaleesi-settings + - name: Create external network - neutron + quantum_network: + state: present + auth_url: "http://{{ hostvars[inventory_hostname].ansible_default_ipv4.address }}:35357/v2.0/" + login_username: admin + login_password: "{{ hostvars[inventory_hostname].admin_password | default('redhat') }}" + login_tenant_name: admin + name: "{{ installer.network.name }}" + provider_network_type: "{{ installer.network.external.provider_network_type }}" + provider_physical_network: "{{ installer.network.label }}" + provider_segmentation_id: "{{ installer.network.external.vlan.tag|default(omit) }}" + router_external: yes + shared: no + admin_state_up: yes + when: installer is defined and installer.network.type == 'neutron' + + - name: Create subnet for external network - neutron + quantum_subnet: + state: present + auth_url: "http://{{ hostvars[inventory_hostname].ansible_default_ipv4.address }}:35357/v2.0/" + login_username: admin + login_password: "{{ hostvars[inventory_hostname].admin_password | default('redhat') }}" + login_tenant_name: admin + tenant_name: admin + network_name: "{{ installer.network.name }}" + name: external-subnet + enable_dhcp: False + gateway_ip: "{{ provisioner.network.network_list.external.nested.subnet_gateway }}" + cidr: "{{ provisioner.network.network_list.external.nested.subnet_cidr}}" + allocation_pool_start: "{{ provisioner.network.network_list.external.nested.allocation_pool_start }}" + allocation_pool_end: "{{ provisioner.network.network_list.external.nested.allocation_pool_end }}" + when: installer is defined and installer.network.type == 'neutron' + From 6d3a12cb53e42ddda0fb16db867d84bcca581ffd Mon Sep 17 00:00:00 2001 From: Attila Darazs Date: Tue, 15 Dec 2015 14:00:10 +0100 Subject: [PATCH 056/137] Create experimental upstream gate jobs These jobs are testing instack-undercloud on the liberty and master branches. The jobs are set to silent, they won't comment on the upstream changes until we have sufficient stability. This patch also fixes the errors in the delorean rpm generation and copy. Change-Id: Ie94c48f06abdffeda0403ca1f980cf9901ea2112 --- jenkins-jobs/builders.yaml | 80 +++++++++++++++++++++++ jenkins-jobs/defaults.yaml | 26 ++++++++ jenkins-jobs/upstream.yaml | 58 ++++++++++++++++ roles/delorean/tasks/copy-rpm.yml | 29 ++++++-- roles/delorean/tasks/install.yml | 9 +-- roles/delorean/templates/delorean_rpms.j2 | 2 +- roles/delorean_rpms/tasks/main.yml | 28 ++++---- 7 files changed, 206 insertions(+), 26 deletions(-) create mode 100644 jenkins-jobs/upstream.yaml diff --git a/jenkins-jobs/builders.yaml b/jenkins-jobs/builders.yaml index f4961f5b5..495bc47f9 100644 --- a/jenkins-jobs/builders.yaml +++ b/jenkins-jobs/builders.yaml @@ -87,6 +87,86 @@ exit $result +- builder: + name: ksgen-builder-upstream + builders: + - shining-panda: + build-environment: virtualenv + python-version: system-CPython-2.7 + nature: shell + clear: false + use-distribute: true + system-site-packages: false + ignore-exit-code: false + command: | + pip install -U ansible==1.9.2 > ansible_build; ansible --version + + # install ksgen + pushd khaleesi/tools/ksgen + python setup.py install + popd + + pushd khaleesi + + cp ansible.cfg.example ansible.cfg + touch ssh.config.ansible + echo "" >> ansible.cfg + echo "[ssh_connection]" >> ansible.cfg + echo "ssh_args = -F ssh.config.ansible" >> ansible.cfg + + # fetch dependent gating changes + if [ $GERRIT_CHANGE_COMMIT_MESSAGE ]; then + ansible-playbook -i local_hosts -vv playbooks/depends-on.yml + fi + + # generate config + ksgen --config-dir=settings generate \ + --provisioner=centosci \ + --provisioner-site=default \ + --provisioner-distro=centos \ + --provisioner-distro-version={provisioner-distro-version} \ + --provisioner-site-user=rdo \ + --product={product} \ + --product-version={product-version} \ + --product-version-build={pin} \ + --product-version-repo={product-version-repo} \ + --distro={distro} \ + --installer={installer} \ + --installer-deploy={installer-deploy} \ + --installer-env={installer-env} \ + --installer-images={installer-images} \ + --installer-network={network} \ + --installer-network-isolation={network-isolation} \ + --installer-network-variant={network-variant} \ + --installer-post_action={installer-post_action} \ + --installer-topology={installer-topology} \ + --installer-tempest={installer-tempest} \ + --rpm=use-delorean \ + --workarounds=enabled \ + --extra-vars @../khaleesi-settings/hardware_environments/virt/network_configs/{network-isolation}/hw_settings.yml \ + ksgen_settings.yml + + # get nodes and run test + set +e + anscmd="stdbuf -oL -eL ansible-playbook -vv --extra-vars @ksgen_settings.yml" + + $anscmd -i local_hosts playbooks/gate.yml + result=$? + + infra_result=0 + $anscmd -i hosts playbooks/collect_logs.yml &> collect_logs.txt || infra_result=1 + $anscmd -i local_hosts playbooks/cleanup.yml &> cleanup.txt || infra_result=2 + + if [[ "$infra_result" != "0" && "$result" = "0" ]]; then + # if the job/test was ok, but collect_logs/cleanup failed, + # print out why the job is going to be marked as failed + result=$infra_result + cat collect_logs.txt + cat cleanup.txt + fi + + exit $result + - builder: name: ksgen-builder-rdo-manager-promote builders: diff --git a/jenkins-jobs/defaults.yaml b/jenkins-jobs/defaults.yaml index bd7e6ba67..350bca194 100644 --- a/jenkins-jobs/defaults.yaml +++ b/jenkins-jobs/defaults.yaml @@ -159,6 +159,32 @@ keep-long-stdio: False test-stability: True +- trigger: + name: trigger-upstream-gate-rdo-manager + triggers: + - gerrit: + server-name: 'rdo-ci-openstack.org' + trigger-on: + - patchset-created-event + - comment-added-contains-event: + comment-contains-value: '(?i)^(Patch Set [0-9]+:)?( [\w\\+-]*)*(\n\n)?\s*(rdo)? ?(recheck)' + projects: + - project-compare-type: 'PLAIN' + project-pattern: 'openstack/{project}' + branches: + - branch-compare-type: 'PLAIN' + branch-pattern: '{branch}' + skip-vote: + successful: true + failed: true + unstable: true + notbuilt: true + failure-message: 'FAILURE' + successful-message: 'SUCCESS' + unstable-message: 'UNSTABLE' + custom-url: "* $JOB_NAME $BUILD_URL" + silent: true + - trigger: name: trigger-rdo-manager-gate-khaleesi triggers: diff --git a/jenkins-jobs/upstream.yaml b/jenkins-jobs/upstream.yaml new file mode 100644 index 000000000..7a81945c4 --- /dev/null +++ b/jenkins-jobs/upstream.yaml @@ -0,0 +1,58 @@ +- job-template: + name: 'upstream-gate-{project}-{installer}-{product}-{product-version}-{installer-tempest}' + defaults: rdo-manager-defaults + triggers: + - trigger-upstream-gate-rdo-manager: + branch: '{branch}' + project: '{project}' + scm: + - repo-khaleesi + - repo-khaleesi-settings + builders: + - ksgen-builder-upstream: + provisioner-distro: '{provisioner-distro}' + provisioner-distro-version: '{provisioner-distro-version}' + provisioner-options: 'skip_provision' + product: '{product}' + product-version: '{product-version}' + product-version-repo: '{product-version-repo}' + distro: '{distro}' + installer: '{installer}' + installer-deploy: '{installer-deploy}' + installer-env: '{installer-env}' + installer-images: '{installer-images}' + installer-post_action: '{installer-post_action}' + installer-topology: '{installer-topology}' + installer-tempest: '{installer-tempest}' + network: '{network}' + network-isolation: '{network-isolation}' + network-variant: '{network-variant}' + pin: '{pin}' + +- project: + name: upstream-gate-jobs-rdo-manager-centosci + project: instack-undercloud + installer: rdo_manager + installer-deploy: templates + installer-env: virthost + installer-images: build + installer-post_action: none + installer-topology: minimal_no_ceph + installer-tempest: smoke + network: neutron + network-isolation: none + network-variant: ml2-vxlan + product: rdo + product-version-repo: delorean + distro: centos-7.0 + provisioner-distro: centos + provisioner-distro-version: 7 + pin: last_known_good + + jobs: + - 'upstream-gate-{project}-{installer}-{product}-{product-version}-{installer-tempest}': + product-version: liberty + branch: stable/liberty + - 'upstream-gate-{project}-{installer}-{product}-{product-version}-{installer-tempest}': + product-version: mitaka + branch: master diff --git a/roles/delorean/tasks/copy-rpm.yml b/roles/delorean/tasks/copy-rpm.yml index f5994fb3a..9b3c6edc1 100644 --- a/roles/delorean/tasks/copy-rpm.yml +++ b/roles/delorean/tasks/copy-rpm.yml @@ -1,7 +1,26 @@ - name: Create a directory to hold the delorean rpms - file: path={{ ansible_env.HOME }}/rpms state=directory + file: + path: "{{ ansible_env.HOME }}/delorean_rpms" + state: directory -- name: Copy and rename the generated rpms - shell: > - cp {{ ansible_env.HOME }}/delorean/repos/*/*/*/*.rpm {{ ansible_env.HOME }}/rpms/; - rm -rf {{ ansible_env.HOME }}/delorean; +- name: Copy the generated rpms + shell: | + find {{ ansible_env.HOME }}/delorean/data/repos -type f -name '*.rpm' -print0| xargs -0 cp -t {{ ansible_env.HOME }}/delorean_rpms/ + rm -rf {{ ansible_env.HOME }}/delorean + +- name: Run createrepo on generated rpms + sudo: yes + shell: "createrepo delorean_rpms" + args: + chdir: "{{ ansible_env.HOME }}" + +- name: Compress the repo before fetching + shell: "tar czf delorean_rpms.tar.gz delorean_rpms" + args: + chdir: "{{ ansible_env.HOME }}" + +- name: Fetch the repo to the slave + fetch: + flat: yes + src: "{{ ansible_env.HOME }}/delorean_rpms.tar.gz" + dest: "{{ base_dir }}/delorean_rpms.tar.gz" diff --git a/roles/delorean/tasks/install.yml b/roles/delorean/tasks/install.yml index 8c1884e72..f35bae064 100644 --- a/roles/delorean/tasks/install.yml +++ b/roles/delorean/tasks/install.yml @@ -1,5 +1,5 @@ - name: Ensure delorean package dependencies - yum: name=mock,python-virtualenv state=installed + yum: name=createrepo,mock,python-virtualenv,rpm-build state=installed sudo: yes - name: Create mock group @@ -8,7 +8,7 @@ - name: Add user to mock group sudo: yes - user: name=rhos-ci groups=mock + user: name={{ ansible_ssh_user }} groups=mock - name: Create virtualenv for Delorean command: virtualenv {{ ansible_env.HOME }}/delorean-venv creates='{{ ansible_env.HOME }}/delorean-venv' @@ -28,8 +28,3 @@ pip: name: tox virtualenv: '{{ ansible_env.HOME }}/delorean-venv' - -- name: Apply temporary fix - shell: 'git fetch https://review.gerrithub.io/openstack-packages/delorean refs/changes/75/255375/2 && git checkout FETCH_HEAD' - args: - chdir: "{{ ansible_env.HOME }}/delorean" diff --git a/roles/delorean/templates/delorean_rpms.j2 b/roles/delorean/templates/delorean_rpms.j2 index 1282a30e5..e99f028f0 100644 --- a/roles/delorean/templates/delorean_rpms.j2 +++ b/roles/delorean/templates/delorean_rpms.j2 @@ -1,6 +1,6 @@ [delorean-rpms] name=Delorean rpms -baseurl=file:///home/{{ ansible_ssh_user }}/delorean_rpms +baseurl=file://{{ ansible_env.HOME }}/delorean_rpms enabled=1 gpgcheck=0 priority=1 diff --git a/roles/delorean_rpms/tasks/main.yml b/roles/delorean_rpms/tasks/main.yml index 0e5cd8e71..b758d0d2a 100644 --- a/roles/delorean_rpms/tasks/main.yml +++ b/roles/delorean_rpms/tasks/main.yml @@ -9,22 +9,24 @@ command: "rhos-release {{ product.full_version|int }} {{ product.repo.rhos_release.extra_args|join(' ') }}" when: product.rpm is defined and product.rpm -- name: Install createrepo - sudo: yes - yum: name=createrepo state=present - -- name: Create repo folder - file: path=/home/{{ ansible_ssh_user }}/delorean state=directory +- name: Unpack the repo + unarchive: + src: "{{ base_dir }}/delorean_rpms.tar.gz" + dest: "{{ ansible_env.HOME }}" -- name: copy the generated rpms - copy: src={{ item }} dest=/home/{{ ansible_ssh_user }}/delorean_rpms - with_fileglob: - - "{{ ansible_env.HOME }}/rpms/*.rpm" +- name: Lower repo priorities from one + sudo: yes + shell: > + for file in /etc/yum.repos.d/*.repo; do + sed -i 's/priority=1/priority=2/' $file; + done - name: Setup repository configuration sudo: yes - template: "src={{ lookup('env', 'PWD') }}/roles/delorean/templates/delorean_rpms.j2 dest=/etc/yum.repos.d/delorean_rpms.repo" + template: + src: "{{ base_dir }}/khaleesi/roles/delorean/templates/delorean_rpms.j2" + dest: "/etc/yum.repos.d/delorean_rpms.repo" -- name: Run createrepo to setup repo for patched rpm +- name: print out current repo config sudo: yes - shell: "createrepo /home/{{ ansible_ssh_user }}/delorean_rpms" + command: yum -d 7 repolist From fe5c3a80e1adee756e194a56c4d0b11478ed3cc5 Mon Sep 17 00:00:00 2001 From: Attila Darazs Date: Mon, 8 Feb 2016 16:39:25 +0100 Subject: [PATCH 057/137] remove unused builder parameter skip_provision Clean up an old builder option that remained in JJBs. Change-Id: I0126ba7e3279081013ece437bd5f05338626ef8b --- jenkins-jobs/features.yml | 1 - jenkins-jobs/promote.yml | 1 - jenkins-jobs/rdo-manager.yaml | 5 ----- jenkins-jobs/upstream.yaml | 1 - 4 files changed, 8 deletions(-) diff --git a/jenkins-jobs/features.yml b/jenkins-jobs/features.yml index 708af6efa..d52061f71 100644 --- a/jenkins-jobs/features.yml +++ b/jenkins-jobs/features.yml @@ -20,7 +20,6 @@ provisioner-site-user: 'rdo' provisioner-distro: '{provisioner-distro}' provisioner-distro-version: '{provisioner-distro-version}' - provisioner-options: 'skip_provision' product: '{product}' product-version: '{product-version}' product-version-repo: '{product-version-repo}' diff --git a/jenkins-jobs/promote.yml b/jenkins-jobs/promote.yml index 34e036510..2de77e30a 100644 --- a/jenkins-jobs/promote.yml +++ b/jenkins-jobs/promote.yml @@ -52,7 +52,6 @@ - ksgen-builder-rdo-manager-promote: provisioner-distro: '{provisioner-distro}' provisioner-distro-version: '{provisioner-distro-version}' - provisioner-options: 'skip_provision' product: '{product}' product-version: '{product-version}' product-version-repo: '{product-version-repo}' diff --git a/jenkins-jobs/rdo-manager.yaml b/jenkins-jobs/rdo-manager.yaml index d0c306b01..6bad625d0 100644 --- a/jenkins-jobs/rdo-manager.yaml +++ b/jenkins-jobs/rdo-manager.yaml @@ -10,7 +10,6 @@ - ksgen-builder-rdo-manager: provisioner-distro: '{provisioner-distro}' provisioner-distro-version: '{provisioner-distro-version}' - provisioner-options: 'skip_provision' product: '{product}' product-version: '{product-version}' product-version-repo: '{product-version-repo}' @@ -53,7 +52,6 @@ - ksgen-builder-rdo-manager: provisioner-distro: '{provisioner-distro}' provisioner-distro-version: '{provisioner-distro-version}' - provisioner-options: 'skip_provision' product: '{product}' product-version: '{product-version}' product-version-repo: '{product-version-repo}' @@ -83,7 +81,6 @@ - ksgen-builder-rdo-manager: provisioner-distro: '{provisioner-distro}' provisioner-distro-version: '{provisioner-distro-version}' - provisioner-options: 'skip_provision' product: '{product}' product-version: '{product-version}' product-version-repo: '{product-version-repo}' @@ -112,7 +109,6 @@ - ksgen-builder-rdo-manager: provisioner-distro: '{provisioner-distro}' provisioner-distro-version: '{provisioner-distro-version}' - provisioner-options: 'skip_provision' product: '{product}' product-version: '{product-version}' product-version-repo: '{product-version-repo}' @@ -153,7 +149,6 @@ - ksgen-builder-rdo-manager: provisioner-distro: '{provisioner-distro}' provisioner-distro-version: '{provisioner-distro-version}' - provisioner-options: 'skip_provision' product: '{product}' product-version: '{product-version}' product-version-repo: '{product-version-repo}' diff --git a/jenkins-jobs/upstream.yaml b/jenkins-jobs/upstream.yaml index 7a81945c4..6cbba30e1 100644 --- a/jenkins-jobs/upstream.yaml +++ b/jenkins-jobs/upstream.yaml @@ -12,7 +12,6 @@ - ksgen-builder-upstream: provisioner-distro: '{provisioner-distro}' provisioner-distro-version: '{provisioner-distro-version}' - provisioner-options: 'skip_provision' product: '{product}' product-version: '{product-version}' product-version-repo: '{product-version-repo}' From 667405e693342b341931554ae529220f2a8ce135 Mon Sep 17 00:00:00 2001 From: Harry Rybacki Date: Tue, 2 Feb 2016 12:04:40 -0500 Subject: [PATCH 058/137] Add line length exception to best_practices After running into problems breaking parts of multi-line tasks down to match our best practices, it has come to our attention that this is not always possible and advise developers to annotate such occurences for clarity. Change-Id: Id10f5a01ace35b4273e4421858dbb918fe915ae0 --- doc/best_practices.rst | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/doc/best_practices.rst b/doc/best_practices.rst index 989e0d1d0..e1e2b99ae 100644 --- a/doc/best_practices.rst +++ b/doc/best_practices.rst @@ -55,7 +55,9 @@ Examples:: **Rule: Line Length** - Keep text under 100 characters per line. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -For ease of readability, keep text to a uniform length of 100 characters or less. +For ease of readability, keep text to a uniform length of 100 characters or less. Some modules +are known to have issues with multi-line formatting and should be commented on if it is an issue +within your change. Examples:: @@ -71,11 +73,19 @@ Examples:: -P Compute-1::NovaEnableRbdBackend=true; when: installer.deploy.type == 'plan' + # EXCEPTION: - When a module breaks from multi-line use, add a comment to indicate it + # The long line in this task fails when broken down + - name: copy over common environment file (virt) + local_action: > + shell pushd {{ base_dir }}/khaleesi; rsync --delay-updates -F --compress --archive --rsh \ + "ssh -F ssh.config.ansible -S none -o StrictHostKeyChecking=no" \ + {{base_dir}}/khaleesi-settings/hardware_environments/common/plan-parameter-neutron-bridge.yaml undercloud:{{ instack_user_home }}/plan-parameter-neutron-bridge.yaml -**Rule: Using Quotes** - Use single quotes. + +**Rule: Using Quotes** - Use single quotes. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Use single quotes throughout playbooks except when double quotes are required +Use single quotes throughout playbooks except when double quotes are required for ``shell`` commands or enclosing ``when`` statements. Examples:: From 5c39d6b9296d4f68c5a130c6e5fca197dcb9d0ff Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Thu, 10 Dec 2015 14:57:59 -0500 Subject: [PATCH 059/137] update rdo-manager to add hosts to break out the overcloud nodes * add host entries for compute, coontrollers and ceph individually as well as addressing the overcloud as a whole * the overcloud nodes are broken into their components properly * browbeat @ https://github.com/jtaleric/browbeat Change-Id: If4f332c1b38d7b02f4ae125a09e4249ccb24ff3a --- .../overcloud/ansible-inventory.yml | 29 +++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/playbooks/installer/rdo-manager/overcloud/ansible-inventory.yml b/playbooks/installer/rdo-manager/overcloud/ansible-inventory.yml index ff6db4090..7570813c8 100644 --- a/playbooks/installer/rdo-manager/overcloud/ansible-inventory.yml +++ b/playbooks/installer/rdo-manager/overcloud/ansible-inventory.yml @@ -18,16 +18,41 @@ register: overcloud_nodes ignore_errors: yes - - name: add each overcloud node to ansible + - name: add each overcloud controller node to ansible + with_dict: overcloud_nodes.stdout + ignore_errors: yes add_host: name={{ item.key }} - groups=overcloud + groups=overcloud,controller ansible_ssh_host={{ item.key }} ansible_fqdn={{ item.value }} ansible_ssh_user="heat-admin" ansible_ssh_private_key_file="{{ lookup('env', 'PWD') }}/id_rsa_undercloud" + when: item.key.startswith('overcloud-controller') + + - name: add each overcloud compute node to ansible + with_dict: overcloud_nodes.stdout ignore_errors: yes + add_host: + name={{ item.key }} + groups=overcloud,compute + ansible_ssh_host={{ item.key }} + ansible_fqdn={{ item.value }} + ansible_ssh_user="heat-admin" + ansible_ssh_private_key_file="{{ lookup('env', 'PWD') }}/id_rsa_undercloud" + when: item.key.startswith('overcloud-compute') + + - name: add each overcloud ceph node to ansible with_dict: overcloud_nodes.stdout + ignore_errors: yes + add_host: + name={{ item.key }} + groups=overcloud,ceph + ansible_ssh_host={{ item.key }} + ansible_fqdn={{ item.value }} + ansible_ssh_user="heat-admin" + ansible_ssh_private_key_file="{{ lookup('env', 'PWD') }}/id_rsa_undercloud" + when: item.key.startswith('overcloud-ceph') - name: regenerate the inventory file after adding hosts hosts: localhost From 6b6169d4cfaf302482305f1c8991203cf69c5aff Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Sun, 10 Jan 2016 11:09:46 +0200 Subject: [PATCH 060/137] [Provisioner:Openstack] Replace "wait_for_ssh" with builtin wait_for Set timeout to 10min to allow for QEOS7 delays hostvars[inventory_hostname] is still necessary. Still required for host reboot Change-Id: I4ca4147fd2cf09bef54662355e842bd3922bea57 --- playbooks/provisioner/openstack/main.yml | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/playbooks/provisioner/openstack/main.yml b/playbooks/provisioner/openstack/main.yml index 808af70f4..819f6a5cb 100644 --- a/playbooks/provisioner/openstack/main.yml +++ b/playbooks/provisioner/openstack/main.yml @@ -153,14 +153,14 @@ hosts: openstack_nodes gather_facts: no max_fail_percentage: 0 + sudo: no tasks: - name: Wait for Reachable Nodes - wait_for_ssh: - host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" - user: "{{ hostvars[inventory_hostname].ansible_ssh_user }}" - key: "{{ hostvars[inventory_hostname].ansible_ssh_private_key_file }}" - timeout: "{{ provisioner.ssh_timeout | default(omit) }}" - sudo: no + wait_for: + host: "{{ ansible_ssh_host }}" + port: 22 + search_regex: OpenSSH + timeout: 600 delegate_to: localhost - name: Ensure hostname is configured properly @@ -198,7 +198,13 @@ line: "IPADDR={{ hostvars[inventory_hostname].eth1_interface_ip }}" register: update_ifcfg1 - - local_action: - module: wait_for_ssh reboot_first=true host={{ hostvars[inventory_hostname].ansible_ssh_host }} user={{ hostvars[inventory_hostname].ansible_ssh_user }} key={{ hostvars[inventory_hostname].ansible_ssh_private_key_file }} + - name: reboot and wait for ssh when: update_ifcfgs|changed or update_ifcfg1|changed + delegate_to: localhost sudo: no + wait_for_ssh: + reboot_first: "true" + # delegate_to changes the context for ansible_vars + host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" + user: "{{ hostvars[inventory_hostname].ansible_ssh_user }}" + key: "{{ hostvars[inventory_hostname].ansible_ssh_private_key_file }}" From 7f280ea536c8da07f8a6a6743564639afbf98fbd Mon Sep 17 00:00:00 2001 From: Adriano Petrich Date: Tue, 9 Feb 2016 09:56:09 +0000 Subject: [PATCH 061/137] Blueprint for tls on tht Change-Id: I3fd2f2c47f46971bb62452f1019ccdab7dca263c --- blueprints/tls_support_for_tht.rst | 93 ++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 blueprints/tls_support_for_tht.rst diff --git a/blueprints/tls_support_for_tht.rst b/blueprints/tls_support_for_tht.rst new file mode 100644 index 000000000..fc8fcdc82 --- /dev/null +++ b/blueprints/tls_support_for_tht.rst @@ -0,0 +1,93 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + +========================================== +Add tls support for Tripleo Heat Templates +========================================== + +In order for us to have a ssl installation of openstack we need some configuration templates files in place. + +I'm proposing a python library file to extract the templates from tht and export the filled up files were we can use to create the overcloud + +Problem description +=================== + +The problem is two fold and arises that the specific templates are versioned on tht, storing them on khaleesi +(keeping two versions on different projects of the same file) just creates technical debt. + + +The second part of the problem is that the templates themselves are not good for sed-ing in the parameters +that we need due that we have to add the certificate and key pems into a yaml + +This is a snip of the template:: + + parameter_defaults: + SSLKey: | + The contents of the private key go here + ... + +We need it filled like this:: + + parameter_defaults: + SSLKey: | + -----BEGIN PRIVATE KEY----- + MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC3TWYoCCDKdgFA + 0q4/OcbB3N15h8lWtM3BI2KzqOMa4sUhVcPvT5Sp7c0hNPu7yDwfRVBFCr+eyEIC + o+1pSffmnp8Gzo4VeDWFcRjFymWXTw/fi8j1lj8jMZGH6lkqpgLDd1koUIxXpOKm + HCkz2rdEdYpjjmGkNCUnAe3xlwolJAFXwg1GyPXPxAJ/r6Ylp+/9COxR7OCpbPGr + lrN2q1rvntI4SrTfo+lX3lIlqvKbCnko7AECpGocC6+uV3au1T3OBwFAPh0u8OA3 + nJHKYkb8PAPpNRiUp1KX4o/9Y+zdm8uN/AIxcm7JtzFf8jIISjYMD4i0E6JrjYzw + nVAQVmbNAgMBAAECggEBAJ8sWP9+P2tQmbn+uU0yEMSb1L8KCO6ARwPmhHlauQvJ + zEEsRt7zDjeZxr2FUuw37u2AtTmfIdLyN1AvpaP+lYTwTUwN5hgCsQdVtJtdLGb+ + QtxueG26sM0Q6D1MZW3BhzjR1NxLRfN9vUtdvPHIhcivASN+qo96sKB07nkSHb8t + UVLVJORYqlwIHZP5q+U4QFHftCwpE6WvrR4CuSc0PmnBualb9I0BVeAteVecifQG + CTooUPFfF9enYuQSjnGpuaVunNxpJB69TR3YHP5N8GUXXqIXmEtuuoSpLXKCP2Xc + 7lg+uH4+VpKT2tlZOmhJU1Kk6OKoLLvDMG0Zf0eAaoECgYEA7DuHs/to81sm8fps + EHcDCl9YNzGuDbGQ2bi73ff7QWm9FyULsm1Vp1Eu3uvsQa3RRaC5oPrtRU58OcoD + n/4MEUKTDxjGUfw1NOfYPHeCSQHqM82L6AwwOMRH4qRV33hR16NDy8vhm+YseRNK + AQcMFflp/tBO8aUc6P2Ui69UftECgYEAxqQFfFRo4ItBiMT/7i2oiJ3u3YDq/C2w + l1DX+g7BD2SKHWmtxJJ4IiczxCL73tDrhLwp3kpqnyd5+6gdhgHtC/sJjx+RoThl + A5To5Fd+vCvtoJlexsXPEgnIkZFNqwstRUAaI0iTIdt/Nzymzpn/iYp0Qc0p9/vo + Unlq791C/z0CgYAnSxueZ2IkoHPQ6huRfYpG7mcI/z15T6DNZjnxiO8FCWaHdAUH + D8KgixNlxw5MOnJFx5841KQk1BI7tot10FcHg/BcIX3TY0UiYLIKFMLaC/R922G7 + HlPjDVr7quQRwLy0RpbfTjFfsiCRnxC/LQHooczsso9/CDzP0GYl+ervEQKBgATe + JBxF3UQTZYm6eiMWD1k5tY7MB/YiEH/ExWYlUmnUJuZNnqqAhF0h5MzbppxxNjRM + gCIoZLB9wSl/lymfhnWSs0tElMcEoMUTsxlVY4+s6+fRmlb4pfhlMPsQOn0Eixl1 + Vq6iqqhbvqRV4iiR8YcnU24BXxPqomjS/OHf5DJpAoGBAIIzvr9nR2W7ci3m55l0 + 5/EjeChqpVUBKiy6PGWWqj6kfeGlKnDbCJL3DBu5agc46WlJG143I3SvbgtVBwhy + MJVRj77Zqk7BnOjAczTTxP2N/Ga7ZsWzJj8AlKpxBUEB6chdj2BLL3y+/JcuOEjg + 8LMslpo4Fx5NBmNcdvie06tf + -----END PRIVATE KEY----- + ... + +Proposed change +=============== + +A simple python library can get the templates from the tht installation read the templates +as a yaml structure do the required changes and output the fully templated files to where we can call +with the -e parameter to openstack overcloud deploy + + +Alternatives +------------ + +The alternative is storing those templates on khaleesi but I feel that just brings techinical debt. + + +Implementation +============== + +I have a implementation here: https://review.gerrithub.io/#/c/259773/ + + +Assignee(s) +----------- + +Primary assignee: + +Adriano Petrich + + From 44a20f31da665390dbaa5129a5527f874ac5d89a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filip=20Hub=C3=ADk?= Date: Tue, 2 Feb 2016 12:43:58 +0100 Subject: [PATCH 062/137] [OSPd] Add debug output for undercloud installation We need to see what went wrong in UC installation right away, this can obviously really simplify debugging process. Change-Id: I1c85a2f6800b0b6b8a2e8d469670940f9f14e306 --- .../installer/rdo-manager/undercloud/run.yml | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/playbooks/installer/rdo-manager/undercloud/run.yml b/playbooks/installer/rdo-manager/undercloud/run.yml index 137946397..76c323aab 100644 --- a/playbooks/installer/rdo-manager/undercloud/run.yml +++ b/playbooks/installer/rdo-manager/undercloud/run.yml @@ -8,6 +8,18 @@ - name: install the undercloud shell: openstack undercloud install --debug &> {{ instack_user_home }}/undercloud_install_initial_install.log + ignore_errors: yes + register: uc_status + + - name: get overview about what went wrong in undercloud installation + shell: | + tail -n 200 {{ instack_user_home }}/undercloud_install_initial_install.log + ignore_errors: yes + when: uc_status.rc != 0 + + - name: check if undercloud failed + fail: msg="Undercloud install failed" + when: uc_status.rc != 0 - name: copy files to home sudo: yes @@ -28,4 +40,15 @@ tasks: - name: install the undercloud shell: openstack undercloud install &> {{ instack_user_home }}/undercloud_install_idempotent_check.log + ignore_errors: yes + register: uc_idemp_status + + - name: get overview about what went wrong in idempotent undercloud installation + shell: | + tail -n 200 {{ instack_user_home }}/undercloud_install_idempotent_check.log + ignore_errors: yes + when: uc_idemp_status.rc != 0 + - name: check if idempotent undercloud installation failed + fail: msg="Undercloud install failed" + when: uc_idemp_status.rc != 0 From ff3696cdef5a22632b64d67cc08c2b2981108569 Mon Sep 17 00:00:00 2001 From: Adriano Petrich Date: Tue, 9 Feb 2016 15:58:19 +0000 Subject: [PATCH 063/137] Add --tags on the fetch for upstream for patch-rpm we added the --tags for the gerrit but ironic needs it for the upstream as well Change-Id: I1ec7a45be34c283a0e60a3039fc4b304360afc06 --- roles/patch_rpm/tasks/pre.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/patch_rpm/tasks/pre.yml b/roles/patch_rpm/tasks/pre.yml index 500dd0ee3..ef23f7214 100644 --- a/roles/patch_rpm/tasks/pre.yml +++ b/roles/patch_rpm/tasks/pre.yml @@ -37,6 +37,7 @@ if [ "{{ patch.upstream is defined }}" == "True" ]; then git remote add -f upstream {{ tmp_dir }}/dist-git/{{ patch.upstream is defined and patch.upstream.name }}; git fetch upstream; + git fetch upstream --tags; fi; args: chdir: "{{ tmp_dir }}/dist-git/{{ patch.dist_git.name }}" From c0915ad43c43bfaa9ee346559c79b5e5c77e0ab3 Mon Sep 17 00:00:00 2001 From: Luigi Toscano Date: Thu, 28 Jan 2016 10:24:32 +0100 Subject: [PATCH 064/137] Allow to define specific roles for test users The accounts created for testing purposes can now define which roles they should be part of (sometimes needed by some services). Change-Id: Ic192212425bf352f22c9f0c6551376c805d9ec4f --- roles/openstack/create_users/tasks/main.yml | 16 ++++++++++++++++ settings/tester/api.yml | 1 + .../tester/integration/component/horizon.yml | 1 + 3 files changed, 18 insertions(+) diff --git a/roles/openstack/create_users/tasks/main.yml b/roles/openstack/create_users/tasks/main.yml index a5983b9e5..ecae97999 100644 --- a/roles/openstack/create_users/tasks/main.yml +++ b/roles/openstack/create_users/tasks/main.yml @@ -26,3 +26,19 @@ endpoint: "{{ controller_auth_url }}" state: present with_items: tester.accounts + +# assign the roles to users +# TODO: Ansible 2, use skip_missing: yes +- keystone_user: + role: "{{ item.1 }}" + user: "{{ item.0.username }}" + password: "{{ item.0.password }}" + tenant: "{{ item.0.tenant_name }}" + login_user: admin + login_password: "{{ admin_password }}" + login_tenant_name: admin + endpoint: "{{ controller_auth_url }}" + state: present + with_subelements: + - tester.accounts + - roles diff --git a/settings/tester/api.yml b/settings/tester/api.yml index 9d86689f8..89a0c785d 100644 --- a/settings/tester/api.yml +++ b/settings/tester/api.yml @@ -9,6 +9,7 @@ tester: - username: 'demo' tenant_name: 'demo' password: 'secrete' + roles: [] node: prefix: diff --git a/settings/tester/integration/component/horizon.yml b/settings/tester/integration/component/horizon.yml index 2bccae96a..2c4bdd6da 100644 --- a/settings/tester/integration/component/horizon.yml +++ b/settings/tester/integration/component/horizon.yml @@ -59,3 +59,4 @@ tester: - username: 'demo' tenant_name: 'demo' password: 'redhat' + roles: [] From 6490a96b9544d804faf9c0713ab32a3ee1f47c0e Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Mon, 8 Feb 2016 17:54:13 -0500 Subject: [PATCH 065/137] create more diskspace for the overcloud nodes Change-Id: I09c27edc6ba9947e917865a23a8a578cd545a345 --- .../rdo-manager/templates/virt-setup-env.j2 | 4 ++++ .../rdo-manager/overcloud-prep-ceph.yml | 20 +++++++++++++++++++ .../rdo-manager/overcloud-test.yml | 1 + .../rdo_manager/flavor/justright.yml | 1 + 4 files changed, 26 insertions(+) create mode 100644 playbooks/post-deploy/rdo-manager/overcloud-prep-ceph.yml diff --git a/playbooks/installer/rdo-manager/templates/virt-setup-env.j2 b/playbooks/installer/rdo-manager/templates/virt-setup-env.j2 index eb170f9dc..8036302b5 100644 --- a/playbooks/installer/rdo-manager/templates/virt-setup-env.j2 +++ b/playbooks/installer/rdo-manager/templates/virt-setup-env.j2 @@ -39,6 +39,10 @@ export NODE_MEM={{ installer.nodes.node_mem | default('4096') }} export NODE_CPU={{ installer.nodes.node_cpu | default('1') }} {%endif %} +{% if installer.nodes.node_disk is defined %} +export NODE_DISK={{ installer.nodes.node_disk | default('50') }} +{%endif %} + {% if installer.nodes.undercloud_node_mem is defined %} export UNDERCLOUD_NODE_MEM={{ installer.nodes.undercloud_node_mem | default('4096') }} {%endif %} diff --git a/playbooks/post-deploy/rdo-manager/overcloud-prep-ceph.yml b/playbooks/post-deploy/rdo-manager/overcloud-prep-ceph.yml new file mode 100644 index 000000000..4659f5357 --- /dev/null +++ b/playbooks/post-deploy/rdo-manager/overcloud-prep-ceph.yml @@ -0,0 +1,20 @@ +--- +- name: setup the physical volumes for ceph volume tests + hosts: controller + tasks: + - name: get the physical volume name + sudo: yes + shell: pvscan | awk '{print $2}' | head -n 1; + register: physical_volume + + - name: debug + debug: var=physical_volume.stdout + + - name: expand the physical volume + sudo: yes + shell: pvresize --setphysicalvolumesize 10G {{ physical_volume.stdout }} + + - name: display the physical volume + sudo: yes + shell: pvscan + diff --git a/playbooks/post-deploy/rdo-manager/overcloud-test.yml b/playbooks/post-deploy/rdo-manager/overcloud-test.yml index d94beca2f..980fcb7c6 100644 --- a/playbooks/post-deploy/rdo-manager/overcloud-test.yml +++ b/playbooks/post-deploy/rdo-manager/overcloud-test.yml @@ -1,6 +1,7 @@ --- - include: overcloud-network.yml - include: overcloud-test-images.yml +- include: overcloud-prep-ceph.yml - name: Group hosts by post action - tempest hosts: undercloud diff --git a/settings/installer/rdo_manager/flavor/justright.yml b/settings/installer/rdo_manager/flavor/justright.yml index 70ac642a3..ecd301320 100644 --- a/settings/installer/rdo_manager/flavor/justright.yml +++ b/settings/installer/rdo_manager/flavor/justright.yml @@ -3,5 +3,6 @@ installer: nodes: node_mem: 6144 node_cpu: 1 + node_disk: 50 undercloud_node_mem: 8192 undercloud_node_cpu: 4 From 6a889ced15706d06fc9b2c23eef3b572e91d58b7 Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Wed, 10 Feb 2016 09:49:45 -0500 Subject: [PATCH 066/137] update tempest skip files with current bugs Change-Id: I4d640e38428bda3c4fd445c4ed1e67c77b13e4bb --- .../files/tempest_skip/rdoci-rhos-7-director-rdo-manager | 2 +- .../files/tempest_skip/rdoci-rhos-8-director-rdo-manager | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-7-director-rdo-manager b/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-7-director-rdo-manager index dc63e9f64..fc25f71c2 100644 --- a/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-7-director-rdo-manager +++ b/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-7-director-rdo-manager @@ -1,4 +1,4 @@ # rhbz1266947 -tempest.api.identity.admin.v3 -# rhbz1295561 +# rhbz1284845 -tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image diff --git a/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager b/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager index f67da5b36..64787dfca 100644 --- a/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager +++ b/playbooks/post-deploy/rdo-manager/files/tempest_skip/rdoci-rhos-8-director-rdo-manager @@ -1,7 +1,7 @@ # rhbz1266947 -tempest.api.identity.admin.v3 -tempest.api.identity.v3.test_api_discovery -# rhbz1295561 +# rhbz1284845 -tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image # rhbz1304930 -tempest.api.compute.servers.test_create_server From 8c33369b746a1a05d5653d766e8fd030d639ae8e Mon Sep 17 00:00:00 2001 From: Steve Linabery Date: Mon, 1 Feb 2016 22:54:30 -0600 Subject: [PATCH 067/137] Support use of http-proxy-based installation This allows the user to specify details of an http proxy server to be used by the virthost and undercloud nodes when access to some network resources is only available via the proxy. For example if a remote install needs to use rhos-release to access repos on the vpn, one can run squid proxy on a device attached to the vpn and point their install at the proxy. The user creates a file in the settings/installer/rdo_manager/proxy (see default none.yml for needed values) and specifies it by setting installer-proxy with ksgen. Change-Id: I0673484fda4a92f9499e97a425d6a0adc9c384ad --- .../virthost/instack-virt-setup/run.yml | 2 ++ playbooks/installer/rdo-manager/images/run.yml | 2 ++ .../rdo-manager/templates/build-img-env.j2 | 4 ++++ .../rdo-manager/templates/rpm.macros.proxy.j2 | 2 ++ .../rdo-manager/templates/virt-setup-env.j2 | 4 ++++ .../rdo-manager/yum_repos/repo-rhos.yml | 17 +++++++++++++++++ settings/installer/rdo_manager.yml | 1 + settings/installer/rdo_manager/proxy/none.yml | 6 ++++++ 8 files changed, 38 insertions(+) create mode 100644 playbooks/installer/rdo-manager/templates/rpm.macros.proxy.j2 create mode 100644 settings/installer/rdo_manager/proxy/none.yml diff --git a/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/run.yml b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/run.yml index 073c88012..818e8a640 100644 --- a/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/run.yml +++ b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/run.yml @@ -7,6 +7,8 @@ - name: get the guest-image sudo: yes + environment: + http_proxy: "{{ installer.http_proxy_url }}" get_url: > url="{{ distro.images[distro.name][distro.full_version].remote_file_server }}{{ distro.images[distro.name][distro.full_version].guest_image_name }}" dest=/root/{{ distro.images[distro.name][distro.full_version].guest_image_name }} diff --git a/playbooks/installer/rdo-manager/images/run.yml b/playbooks/installer/rdo-manager/images/run.yml index e893ac93f..8642ec99f 100644 --- a/playbooks/installer/rdo-manager/images/run.yml +++ b/playbooks/installer/rdo-manager/images/run.yml @@ -93,6 +93,8 @@ - name: import images hosts: undercloud + environment: + http_proxy: "{{ installer.http_proxy_url }}" tasks: - name: ensure wget is installed yum: name=wget state=latest diff --git a/playbooks/installer/rdo-manager/templates/build-img-env.j2 b/playbooks/installer/rdo-manager/templates/build-img-env.j2 index c13871d3d..445bd463a 100644 --- a/playbooks/installer/rdo-manager/templates/build-img-env.j2 +++ b/playbooks/installer/rdo-manager/templates/build-img-env.j2 @@ -31,3 +31,7 @@ export DELOREAN_TRUNK_REPO="{{ product.repo['delorean'][ansible_distribution][di export DELOREAN_REPO_FILE="{{ product.repo.delorean.repo_file }}" export NODE_DIST=centos7 {%endif %} + +{% if installer.proxy != 'none' %} +export http_proxy={{ installer.http_proxy_url }} +{%endif %} diff --git a/playbooks/installer/rdo-manager/templates/rpm.macros.proxy.j2 b/playbooks/installer/rdo-manager/templates/rpm.macros.proxy.j2 new file mode 100644 index 000000000..6d666b999 --- /dev/null +++ b/playbooks/installer/rdo-manager/templates/rpm.macros.proxy.j2 @@ -0,0 +1,2 @@ +%_httpproxy {{ installer.http_proxy_host }} +%_httpport {{ installer.http_proxy_port }} diff --git a/playbooks/installer/rdo-manager/templates/virt-setup-env.j2 b/playbooks/installer/rdo-manager/templates/virt-setup-env.j2 index 8036302b5..e929c8715 100644 --- a/playbooks/installer/rdo-manager/templates/virt-setup-env.j2 +++ b/playbooks/installer/rdo-manager/templates/virt-setup-env.j2 @@ -58,3 +58,7 @@ export TESTENV_ARGS=" --baremetal-bridge-names 'brbm' --vlan-trunk-ids='10 20 30 {% if product.full_version != "7-director" and installer.network.isolation != "none" %} export TESTENV_ARGS=" --baremetal-bridge-names 'brbm' " {%endif %} + +{% if installer.proxy != 'none' %} +export http_proxy={{ installer.http_proxy_url }} +{%endif %} diff --git a/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml b/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml index 3ad022ab3..9df920870 100644 --- a/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml +++ b/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml @@ -5,10 +5,27 @@ hosts: "{{ repo_host }}:&RedHat" vars: - ansible_ssh_user: root + environment: + http_proxy: "{{ installer.http_proxy_url }}" gather_facts: yes tasks: + - name: set proxy server for yum configuration + sudo: yes + lineinfile: dest=/etc/yum.conf line="proxy={{ installer.http_proxy_url }}" + when: installer.proxy not in ['none'] + + - name: rpm macro for proxy + sudo: yes + template: src=../templates/rpm.macros.proxy.j2 dest=/etc/rpm/macros.proxy + when: installer.proxy not in ['none'] + + - name: Install release tool on machine + command: "rpm -i {{ product.rpm }}" + when: installer.proxy not in ['none'] + - name: Install release tool on machine command: "yum localinstall -y {{ product.rpm }}" + when: installer.proxy in ['none'] #this will uncouple the virthost version from the undercloud and overcloud rhel versions - name: create directory for DIB yum repo configurations diff --git a/settings/installer/rdo_manager.yml b/settings/installer/rdo_manager.yml index 3f6b35a30..3d9e38f3c 100644 --- a/settings/installer/rdo_manager.yml +++ b/settings/installer/rdo_manager.yml @@ -61,3 +61,4 @@ defaults: updates: none custom_deploy: none introspection_method: bulk + proxy: none diff --git a/settings/installer/rdo_manager/proxy/none.yml b/settings/installer/rdo_manager/proxy/none.yml new file mode 100644 index 000000000..89107df60 --- /dev/null +++ b/settings/installer/rdo_manager/proxy/none.yml @@ -0,0 +1,6 @@ +--- +installer: + proxy: 'none' + http_proxy_host: '' + http_proxy_port: '' + http_proxy_url: '' From f91b778ed6aaed2c8785c3f2b130ca1b496eaba1 Mon Sep 17 00:00:00 2001 From: Attila Darazs Date: Mon, 8 Feb 2016 16:53:57 +0100 Subject: [PATCH 068/137] beaker provisioner: remove redundant option The manual provisioner should be used when no provisioning is necessary, thus the skip_provision is superfluous. Change-Id: I6b33a89ff317dfd86128a2d9bc6d6bb90aba3183 --- playbooks/provisioner/beaker/main.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/playbooks/provisioner/beaker/main.yml b/playbooks/provisioner/beaker/main.yml index 75e00e351..58e81a155 100644 --- a/playbooks/provisioner/beaker/main.yml +++ b/playbooks/provisioner/beaker/main.yml @@ -6,9 +6,6 @@ - name: Group by provisioner type group_by: key={{ provisioner.type }} - - name: Group for skipping the provisioning step - group_by: key={{ provisioner.skip }} - - name: Add the host to the inventory add_host: name="host0" @@ -19,7 +16,7 @@ ansible_ssh_host="{{ lookup('env', 'BEAKER_MACHINE') }}" - name: Use beaker to provision the machine - hosts: localhost:!skip_provision + hosts: localhost tasks: - name: Check if beakerCheckOut.sh script exists stat: path="{{base_dir}}/khaleesi-settings/beakerCheckOut.sh" From 64172a9e6a1779d4edab37c05262ee49d070ae0d Mon Sep 17 00:00:00 2001 From: Adriano Petrich Date: Thu, 14 Jan 2016 10:00:53 +0000 Subject: [PATCH 069/137] Generate the configuration for tht and ssl Adding a library to do the tht template manipulation to install using ssl Change-Id: I76edfef04f2478a4bd755380097e7dc0e4ad63e2 --- library/tls_tht | 1 + library/tls_tht.py | 129 ++++++++++++++++++ .../overcloud/heat-templates/run.yml | 19 +++ .../templates/deploy-overcloudrc.j2 | 5 + settings/installer/rdo_manager.yml | 2 + settings/installer/rdo_manager/ssl/off.yml | 2 + settings/installer/rdo_manager/ssl/on.yml | 2 + 7 files changed, 160 insertions(+) create mode 120000 library/tls_tht create mode 100644 library/tls_tht.py create mode 100644 settings/installer/rdo_manager/ssl/off.yml create mode 100644 settings/installer/rdo_manager/ssl/on.yml diff --git a/library/tls_tht b/library/tls_tht new file mode 120000 index 000000000..1d01251ad --- /dev/null +++ b/library/tls_tht @@ -0,0 +1 @@ +tls_tht.py \ No newline at end of file diff --git a/library/tls_tht.py b/library/tls_tht.py new file mode 100644 index 000000000..0ef393ac5 --- /dev/null +++ b/library/tls_tht.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# coding: utf-8 -*- + +# (c) 2016, Adriano Petrich +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +DOCUMENTATION = ''' +--- +module: tls_tht +version_added: "1.9" +short_description: Generate the tht templates for enabled ssl +description: + - Generate the tht templates for enabled ssl +options: + source_dir: + description: + - directory to copy the templates from + required: false + default: "/usr/share/openstack-tripleo-heat-templates/" + dest_dir: + description: + - were to copy the files to + required: false + default: "" + cert_filename: + description: + - the cert pem filename + required: false + default: cert.pem + cert_ca_filename: + description: + - the key pem filename + required: false + default: key.pem + key_filename: + description: + - the CA cert pem filename + required: false + default: cert.pem + + +''' + +EXAMPLES = ''' +# Generate the tht templates for enabled ssl +- tls_tht: +''' + +import yaml +from ansible.module_utils.basic import * # noqa + + +def _open_yaml(filename): + with open(filename, "r") as stream: + tmp_dict = yaml.load(stream) + return tmp_dict + + +def create_enable_file(certpem, keypem, source_dir, dest_dir): + output_dict = _open_yaml("{}environments/enable-tls.yaml".format(source_dir)) + + for key in output_dict["parameter_defaults"]["EndpointMap"]: + if output_dict["parameter_defaults"]["EndpointMap"][key]["host"] == "CLOUDNAME": + output_dict["parameter_defaults"]["EndpointMap"][key]["host"] = "IP_ADDRESS" + + output_dict["parameter_defaults"]["SSLCertificate"] = certpem + output_dict["parameter_defaults"]["SSLKey"] = keypem + + output_dict["resource_registry"]["OS::TripleO::NodeTLSData"] = \ + "{}/puppet/extraconfig/tls/tls-cert-inject.yaml".format(source_dir) + + with open("{}enable-tls.yaml".format(dest_dir), "w") as stream: + yaml.safe_dump(output_dict, stream, default_style='|') + + +def create_anchor_file(cert_ca_pem, source_dir, dest_dir): + output_dict = _open_yaml( + "{}environments/inject-trust-anchor.yaml".format(source_dir) + ) + + output_dict["parameter_defaults"]["SSLRootCertificate"] = cert_ca_pem + + output_dict["resource_registry"]["OS::TripleO::NodeTLSCAData"] = \ + "{}/puppet/extraconfig/tls/tls-cert-inject.yaml".format(source_dir) + + with open("{}inject-trust-anchor.yaml".format(dest_dir), "w") as stream: + yaml.safe_dump(output_dict, stream, default_style='|') + + +def main(): + module = AnsibleModule( + argument_spec=dict( + source_dir=dict(default="/usr/share/openstack-tripleo-heat-templates/", + required=False), + dest_dir=dict(default="", required=False), + cert_filename=dict(default="cert.pem", required=False), + cert_ca_filename=dict(default="cert.pem", required=False), + key_filename=dict(default="key.pem", required=False), + ) + ) + + with open(module.params["cert_filename"], "r") as stream: + certpem = stream.read() + + with open(module.params["cert_ca_filename"], "r") as stream: + cert_ca_pem = stream.read() + + with open(module.params["key_filename"], "r") as stream: + keypem = stream.read() + + create_enable_file(certpem, keypem, module.params["source_dir"], module.params["dest_dir"]) + create_anchor_file(cert_ca_pem, module.params["source_dir"], module.params["dest_dir"]) + module.exit_json(changed=True) + + +if __name__ == '__main__': + main() diff --git a/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml b/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml index 3e7329c9c..491c36edd 100644 --- a/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml +++ b/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml @@ -55,3 +55,22 @@ - swift-storage.yaml - ceph-storage.yaml when: item in nic_configs.stdout_lines + + - name: create self-signed SSL cert + command: openssl req -x509 -nodes -newkey rsa:2048 -subj "/CN={{ hw_env.ExternalVIP }}" -days 3650 -keyout overcloud-privkey.pem -out overcloud-cacert.pem -extensions v3_ca + when: installer.ssl + + - name: fetch template from single remote host + tls_tht: + dest_dir: "{{ instack_user_home }}/" + cert_filename: "overcloud-cacert.pem" + cert_ca_filename: "overcloud-cacert.pem" + key_filename: "overcloud-privkey.pem" + when: installer.ssl + + - name: copy the self-signed SSL cert + shell: > + cp overcloud-cacert.pem /etc/pki/ca-trust/source/anchors/; + update-ca-trust extract; + sudo: true + when: installer.ssl diff --git a/playbooks/installer/rdo-manager/templates/deploy-overcloudrc.j2 b/playbooks/installer/rdo-manager/templates/deploy-overcloudrc.j2 index 24a0b549e..bd1705740 100644 --- a/playbooks/installer/rdo-manager/templates/deploy-overcloudrc.j2 +++ b/playbooks/installer/rdo-manager/templates/deploy-overcloudrc.j2 @@ -57,6 +57,11 @@ export DEPLOY_COMMAND="$DEPLOY_COMMAND -e ~/network-environment.yaml" export DEPLOY_COMMAND="$DEPLOY_COMMAND -e ~/plan-parameter-neutron-bridge.yaml " {% endif %} +{% if installer.ssl == True %} +export DEPLOY_COMMAND="$DEPLOY_COMMAND -e ~/enable-tls.yaml \ + -e ~/inject-trust-anchor.yaml " +{% endif %} + {% if installer.deploy.type == 'templates' and product.build is defined and product.build != 'ga' %} if [ "$CEPHSTORAGESCALE" -gt "0" ]; then export DEPLOY_COMMAND="$DEPLOY_COMMAND -e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml" diff --git a/settings/installer/rdo_manager.yml b/settings/installer/rdo_manager.yml index 3d9e38f3c..78d978cc6 100644 --- a/settings/installer/rdo_manager.yml +++ b/settings/installer/rdo_manager.yml @@ -44,6 +44,7 @@ job: - /home/stack/*.log - /home/stack/*.json - /home/stack/*.conf + - /home/stack/*.yml - /home/stack/deploy-overcloudrc - /home/stack/network-environment.yaml - /home/stack/tempest/*.xml @@ -62,3 +63,4 @@ defaults: custom_deploy: none introspection_method: bulk proxy: none + ssl: "off" diff --git a/settings/installer/rdo_manager/ssl/off.yml b/settings/installer/rdo_manager/ssl/off.yml new file mode 100644 index 000000000..1df3e36ec --- /dev/null +++ b/settings/installer/rdo_manager/ssl/off.yml @@ -0,0 +1,2 @@ +installer: + ssl: false diff --git a/settings/installer/rdo_manager/ssl/on.yml b/settings/installer/rdo_manager/ssl/on.yml new file mode 100644 index 000000000..3284cbaa1 --- /dev/null +++ b/settings/installer/rdo_manager/ssl/on.yml @@ -0,0 +1,2 @@ +installer: + ssl: true From d6de7dc55e7555b33fc5e48ecfbe47c69745f723 Mon Sep 17 00:00:00 2001 From: Adriano Petrich Date: Mon, 8 Feb 2016 16:21:15 +0000 Subject: [PATCH 070/137] dnf examples for packagebuild Also fixed some issues with installation of rdopkg Change-Id: I8662dac24fb291496c41f76fbf93f8b24b3ff70d --- doc/cookbook.rst | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/doc/cookbook.rst b/doc/cookbook.rst index adc3ce453..c6e5c89a2 100644 --- a/doc/cookbook.rst +++ b/doc/cookbook.rst @@ -54,7 +54,7 @@ or on Fedora 22:: sudo dnf install -y python-virtualenv gcc -Create the virtual envionment, install ansible, and ksgen util:: +Create the virtual environment, install ansible, and ksgen util:: virtualenv venv source venv/bin/activate @@ -155,24 +155,27 @@ Setup Configuration: ```````````````````` What you will need: -Ansible 1.9 installed I would recomend on a virtualenv:: +Ansible 1.9 installed I would recommend on a virtualenv:: virtualenv foobar source foobar/bin/activate - pip install ansible==1.9.4 + pip install ansible==1.9.2 ``rdopkg`` is what is going to do the heavy lifting https://github.com/redhat-openstack/rdopkg -.. Note:: The yum version is a bit old so it is better to install from source +There's a public repo for the up to date version that can be installed like this:: + + wget https://copr.fedoraproject.org/coprs/jruzicka/rdopkg/repo/epel-7/jruzicka-rdopkg-epel-7.repo + sudo cp jruzicka-rdopkg-epel-7.repo /etc/yum.repos.d + + yum install -y rdopkg -Like this:: +Newer fedora versions uses dnf instead of yum so for the last step use:: - git clone https://github.com/redhat-openstack/rdopkg - cd rdopkg - python setup.py develop --user + dnf install -y rdopkg You will aslo need a ``rhpkg`` or a ``fedpkg`` those can be obtained from yum or dnf:: @@ -182,6 +185,12 @@ or:: yum install -y fedpkg +Again for newer fedora versions replace yum for dnf:: + + dnf install -y rhpkg + dnf install -y fedpkg + + In khaleesi will build the package locally (on a /tmp/tmp.patch_rpm_* directory) but in order to do that it needs a file called ``hosts_local`` on your khaleesi folder From c053c7a67474083fbdaa97a8598d59d0276ccd38 Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Wed, 10 Feb 2016 15:55:39 -0500 Subject: [PATCH 071/137] update the settings for the Cinder loop back devices Change-Id: I53aea78e02d9c8c627afad3c96cb2cac1ac55718 --- .../overcloud/heat-templates/run.yml | 5 +++++ .../templates/default-overcloud-settings.j2 | 3 +++ .../templates/deploy-overcloudrc.j2 | 2 ++ .../rdo-manager/overcloud-prep-ceph.yml | 20 ------------------- .../rdo-manager/overcloud-test.yml | 1 - 5 files changed, 10 insertions(+), 21 deletions(-) create mode 100644 playbooks/installer/rdo-manager/templates/default-overcloud-settings.j2 delete mode 100644 playbooks/post-deploy/rdo-manager/overcloud-prep-ceph.yml diff --git a/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml b/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml index 491c36edd..ac87358ae 100644 --- a/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml +++ b/playbooks/installer/rdo-manager/overcloud/heat-templates/run.yml @@ -74,3 +74,8 @@ update-ca-trust extract; sudo: true when: installer.ssl + + - name: Copy default heat settings template + template: src={{ base_dir }}/khaleesi/playbooks/installer/rdo-manager/templates/default-overcloud-settings.j2 + dest={{ instack_user_home }}/default-overcloud-settings.yaml + mode=0755 diff --git a/playbooks/installer/rdo-manager/templates/default-overcloud-settings.j2 b/playbooks/installer/rdo-manager/templates/default-overcloud-settings.j2 new file mode 100644 index 000000000..961da0c1e --- /dev/null +++ b/playbooks/installer/rdo-manager/templates/default-overcloud-settings.j2 @@ -0,0 +1,3 @@ +parameters: + CinderLVMLoopDeviceSize: 10000 + diff --git a/playbooks/installer/rdo-manager/templates/deploy-overcloudrc.j2 b/playbooks/installer/rdo-manager/templates/deploy-overcloudrc.j2 index bd1705740..3854a2577 100644 --- a/playbooks/installer/rdo-manager/templates/deploy-overcloudrc.j2 +++ b/playbooks/installer/rdo-manager/templates/deploy-overcloudrc.j2 @@ -75,3 +75,5 @@ export DEPLOY_COMMAND="$DEPLOY_COMMAND -e ~/openstack-virtual-baremetal/template {% if product.full_version != '7-director' %} export DEPLOY_COMMAND="$DEPLOY_COMMAND -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml" {% endif %} + +export DEPLOY_COMMAND="$DEPLOY_COMMAND -e ~/default-overcloud-settings.yaml" diff --git a/playbooks/post-deploy/rdo-manager/overcloud-prep-ceph.yml b/playbooks/post-deploy/rdo-manager/overcloud-prep-ceph.yml deleted file mode 100644 index 4659f5357..000000000 --- a/playbooks/post-deploy/rdo-manager/overcloud-prep-ceph.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: setup the physical volumes for ceph volume tests - hosts: controller - tasks: - - name: get the physical volume name - sudo: yes - shell: pvscan | awk '{print $2}' | head -n 1; - register: physical_volume - - - name: debug - debug: var=physical_volume.stdout - - - name: expand the physical volume - sudo: yes - shell: pvresize --setphysicalvolumesize 10G {{ physical_volume.stdout }} - - - name: display the physical volume - sudo: yes - shell: pvscan - diff --git a/playbooks/post-deploy/rdo-manager/overcloud-test.yml b/playbooks/post-deploy/rdo-manager/overcloud-test.yml index 980fcb7c6..d94beca2f 100644 --- a/playbooks/post-deploy/rdo-manager/overcloud-test.yml +++ b/playbooks/post-deploy/rdo-manager/overcloud-test.yml @@ -1,7 +1,6 @@ --- - include: overcloud-network.yml - include: overcloud-test-images.yml -- include: overcloud-prep-ceph.yml - name: Group hosts by post action - tempest hosts: undercloud From 5753c436e4eac40174bad3805e665b438b6a6c24 Mon Sep 17 00:00:00 2001 From: Wesley Hayutin Date: Wed, 3 Feb 2016 21:19:17 -0500 Subject: [PATCH 072/137] rdo-manager quickstart blueprint Change-Id: Id804d3f2c097374a31814a18ba67c9861a49d2c4 --- .../break-out-overcloud-playbooks.rst | 0 blueprints/tripleo-quickstart.rst | 115 ++++++++++++++++++ 2 files changed, 115 insertions(+) rename blueprints/{templates => }/break-out-overcloud-playbooks.rst (100%) create mode 100644 blueprints/tripleo-quickstart.rst diff --git a/blueprints/templates/break-out-overcloud-playbooks.rst b/blueprints/break-out-overcloud-playbooks.rst similarity index 100% rename from blueprints/templates/break-out-overcloud-playbooks.rst rename to blueprints/break-out-overcloud-playbooks.rst diff --git a/blueprints/tripleo-quickstart.rst b/blueprints/tripleo-quickstart.rst new file mode 100644 index 000000000..bb7e619a4 --- /dev/null +++ b/blueprints/tripleo-quickstart.rst @@ -0,0 +1,115 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + + +=========================== +Replace instack-virt-setup with tripleo quickstart +=========================== + +instack-virt-setup is the official way to setup a poc virt environment for tripleo [1] + +A replacement for instack-virt-setup has been adopted by the rdo community [2][3] + + +[1] http://tripleo.org/environments/environments.html#virtual-environment +[2] https://www.rdoproject.org/rdo-manager/ +[3] https://github.com/redhat-openstack/tripleo-quickstart + +Problem description +=================== + +instack-virt-setup itself is not tested in tripleo, nor is it supported downstream. +It's not an idempotent setup of the tripleo environement and it's also not very configurable. + + +Proposed change +=============== + +- Add support for executing the tripleo quickstart to setup the undercloud and overcloud +nodes in virtual environments and then hand off to khaleesi for the overcloud deployment. + +- Update tripleo quickstart to work with the downstream ospd content. + +- Once completed this work will bring the downstream virtual deployments in line with the accepted +upstream virtual deployment + +- For puddle's the goal is to have an undercloud appliance that is simply imported and started. +The appliance will be built with the quickstart playbooks. + +- In the tripleo, rdo, or poodle workflow, if patches or updates need to be applied to the +undercloud appliance, the quickstart is already built to handle updates. + +- Provide a community standard for building the undercloud when needed. It will be much easier +to push this standard if the code is single purpose and not commingled with khaleesi. + +- Other tools whether they be ansible, python, or shell based can all interface with khaleesi +via the hosts and ssh config file. A well defined interface into khlaeesi than try to +include *everything* in khaleesi itself may prove to be valuable. + +Alternatives +------------ + +Create other tools and workflows that call libvirtd to stand up and provision virt environments +for rdo-manager/ospd + +- libvirt implementations, e.g. https://review.gerrithub.io/#/c/259615/ + +- no-op or manual + +Implementation +============== + +Assignee(s) +----------- + +myoung@redhat.com +sshnaidm@redhat.com +whayutin@redhat.com +trown@redhat.com + + +Primary assignee: + + RDO: sshnaidm@redhat.com + OSPD: myoung@redhat.com + +Milestones +---------- + +Target Milestone for completion: + + M1. Proof of Concept - create beta code and jobs to test tripelo quickstart + M2. Proof of Concept - create a branch of tripleo quickstart for downstream ospd use + M3. Design for Production - create a design for upstream/downstream quickstart + M4. Implementation + M5. Production deployment + + +Work Items +---------- + +Work items or tasks -- break the feature up into the things that need to be +done to implement it. Those parts might end up being done by different people, +but we're mostly trying to understand the time line for implementation. + + - POC rdo-manager job that executes the khaleesi provisioner, tripleo quickstart to setup the + undercloud, and hands off to khaleesi for the overcloud deployment, test and log collection. + - JJB created for rdo-manager jobs in ci.centos for the above workflow + - A ospd-7 undercloud qcow is created + - The tripleo quickstart is branched for ospd and updated to use the downstream yum repos and + adjustments are made for ospd-7 and ospd-8 + - A POC ospd job that executes the khaleesi provisioner, tripleo quickstart (ospd) to setup the + undercloud, and hands off to khaleesi for the overcloud deployment. + - A design is created for tripleo quickstart to elegently and efficently handle the subtle differences + between setting up rdo-manager and ospd director for all the supported versions. + - The design for M6 is implemented + - tripleo quickstart is formally supported in CI + + +Dependencies +============ + + From 561fbcfef7e79ca9f370a9eed0e4d0e8deeeb797 Mon Sep 17 00:00:00 2001 From: Gabriele Cerami Date: Thu, 11 Feb 2016 11:42:45 +0100 Subject: [PATCH 073/137] jjb: modified scale feature jobs to run daily Change-Id: I21787ff13cc1d50960b30cf7618eb7cd674655d9 --- jenkins-jobs/features.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/jenkins-jobs/features.yml b/jenkins-jobs/features.yml index d52061f71..3581f2e39 100644 --- a/jenkins-jobs/features.yml +++ b/jenkins-jobs/features.yml @@ -46,6 +46,8 @@ - tests-publishers - email: recipients: whayutin@redhat.com adarazs@redhat.com + triggers: + - timed: '@daily' - project: name: rdo-manager-centosci-feature-jobs From 31c1554237bfe53de223fd85266c8bdd9d7baf6c Mon Sep 17 00:00:00 2001 From: Adriano Petrich Date: Thu, 4 Feb 2016 13:33:18 +0000 Subject: [PATCH 074/137] Make depends-on generate the rpm building settings Change the way depends-on work if we know how to build packages for the dependence it will generate an extra_settings_.yml for each package that can be called with ansible-playbook -i hosts -e @ksgen_settings.yml -e @extra_settings_1.yml playbooks/build_gate_rpm.yml to build the extra packages if we don't have the settings/rpms/.yml but we have a directory there it will fetch the patch for that directory ( this is the case for depends-on changes to khaleesi and khaleesi-settings ) these are not real dependencies it is just for gating depends-on:I911f6c47f@codeng depends-on:I6503cb7e6@codeng Change-Id: I6bfa13de7bbf989ddaf2a982f4300c7e74c989d7 --- jenkins-jobs/builders.yaml | 12 +-- playbooks/depends-on-repo.yml | 5 + playbooks/depends-on-rpm.yml | 5 + playbooks/depends-on.yml | 5 - roles/depends-on/files/depends_on.py | 148 ++++++++++++++++++++++++--- roles/depends-on/tasks/main.yml | 4 +- 6 files changed, 150 insertions(+), 29 deletions(-) create mode 100644 playbooks/depends-on-repo.yml create mode 100644 playbooks/depends-on-rpm.yml delete mode 100644 playbooks/depends-on.yml diff --git a/jenkins-jobs/builders.yaml b/jenkins-jobs/builders.yaml index 495bc47f9..49b62e9e6 100644 --- a/jenkins-jobs/builders.yaml +++ b/jenkins-jobs/builders.yaml @@ -27,7 +27,7 @@ # fetch dependent gating changes if [ $GERRIT_CHANGE_COMMIT_MESSAGE ]; then - ansible-playbook -i local_hosts -vv playbooks/depends-on.yml + ansible-playbook -i local_hosts -vv playbooks/depends-on-repo.yml fi # generate config @@ -116,7 +116,7 @@ # fetch dependent gating changes if [ $GERRIT_CHANGE_COMMIT_MESSAGE ]; then - ansible-playbook -i local_hosts -vv playbooks/depends-on.yml + ansible-playbook -i local_hosts -vv playbooks/depends-on-repo.yml fi # generate config @@ -196,7 +196,7 @@ # fetch dependent gating changes if [ $GERRIT_CHANGE_COMMIT_MESSAGE ]; then - ansible-playbook -i local_hosts -vv playbooks/depends-on.yml + ansible-playbook -i local_hosts -vv playbooks/depends-on-repo.yml fi # generate config @@ -299,7 +299,7 @@ # fetch dependent gating changes if [ $GERRIT_CHANGE_COMMIT_MESSAGE ]; then - ansible-playbook -i local_hosts -vv playbooks/depends-on.yml + ansible-playbook -i local_hosts -vv playbooks/depends-on-repo.yml fi # generate config @@ -379,7 +379,7 @@ # fetch dependent gating changes if [ $GERRIT_CHANGE_COMMIT_MESSAGE ]; then - ansible-playbook -i local_hosts -vv playbooks/depends-on.yml + ansible-playbook -i local_hosts -vv playbooks/depends-on-repo.yml fi # generate config @@ -462,7 +462,7 @@ # fetch dependent gating changes if [ $GERRIT_CHANGE_COMMIT_MESSAGE ]; then - ansible-playbook -i local_hosts -vv playbooks/depends-on.yml + ansible-playbook -i local_hosts -vv playbooks/depends-on-repo.yml fi # generate config diff --git a/playbooks/depends-on-repo.yml b/playbooks/depends-on-repo.yml new file mode 100644 index 000000000..2a1bdb4b8 --- /dev/null +++ b/playbooks/depends-on-repo.yml @@ -0,0 +1,5 @@ +--- +- name: fetch commit dependencies on repos + roles: + - { role: depends-on, update: "repo" } + hosts: localhost diff --git a/playbooks/depends-on-rpm.yml b/playbooks/depends-on-rpm.yml new file mode 100644 index 000000000..f1bca4767 --- /dev/null +++ b/playbooks/depends-on-rpm.yml @@ -0,0 +1,5 @@ +--- +- name: fetch commit dependencies and build rpms + roles: + - { role: depends-on, update: "rpm" } + hosts: localhost diff --git a/playbooks/depends-on.yml b/playbooks/depends-on.yml deleted file mode 100644 index 26c54f14a..000000000 --- a/playbooks/depends-on.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- name: fetch commit dependencies - roles: - - depends-on - hosts: localhost diff --git a/roles/depends-on/files/depends_on.py b/roles/depends-on/files/depends_on.py index 82d5eb5f6..94c58c972 100755 --- a/roles/depends-on/files/depends_on.py +++ b/roles/depends-on/files/depends_on.py @@ -28,10 +28,15 @@ import subprocess import sys import urlparse +import yaml +from argparse import ArgumentParser +from glob import glob +from jinja2 import Template # we ignore any other host reference ALLOWED_HOSTS = ["", "codeng", "review.gerrithub.io:29418"] + def parse_commit_msg(msg=None): """Look for dependency links in the commit message.""" if msg is None: @@ -59,6 +64,7 @@ def parse_commit_msg(msg=None): tag.group(1), tag.group(2)) return tags + def get_refspec_urls(tags): """Parsing the necessary url info for the referenced changes""" def_host = os.getenv("GERRIT_HOST", 'review.gerrithub.io') @@ -79,10 +85,16 @@ def get_refspec_urls(tags): output = subprocess.check_output(shlex.split(cmd)).splitlines()[0] # parse it to json data = json.loads(output) + if "currentPatchSet" not in data: logging.warning("failed to fetch data from gerrit for " "Change-Id: %s", change) continue + if data.get("status") not in ["NEW"]: + logging.warning("Patch already merged " + "Change-Id: %s", change) + continue + parsed_url = urlparse.urlparse(data["url"]) # gerrit does not provide the repo URL in the reply, we have to # construct it from the clues @@ -94,42 +106,144 @@ def get_refspec_urls(tags): # get the repo name from the last part after the slash repo_folder = data["project"].split("/")[-1] repo_ref = data["currentPatchSet"]["ref"] - targets.append([repo_folder, repo_url, repo_ref]) + repo_branch = data["branch"] + + targets.append([repo_folder, repo_url, repo_ref, repo_branch]) logging.debug("data query result for Change-Id: %s, server: %s:%s, " - "folder %s, url: %s, ref: %s", - change, host, port, repo_folder, repo_url, repo_ref) + "folder %s, url: %s, ref: %s, branch:%s", + change, host, port, repo_folder, + repo_url, repo_ref, repo_branch) return targets -def checkout_changes(targets, basedir="."): - """Fetch and checkout the changes for the target repos""" + +def update_repo(project, url, ref, branch, basedir): checkout_cmd = "git checkout FETCH_HEAD" - for folder, url, ref in targets: - folder_path = os.path.join(basedir, folder) + try: + # I didn't find the settings/rpm for the project + # so I'm going to try to fetch the changes if the + # directory for that project exists in the tree + folder_path = os.path.join(basedir, project) logging.debug("changing working dir to %s", folder_path) os.chdir(folder_path) fetch_cmd = "git fetch %s %s" % (url, ref) logging.debug("fetch command: %s", fetch_cmd) subprocess.Popen(shlex.split(fetch_cmd)).wait() subprocess.Popen(shlex.split(checkout_cmd)).wait() + except OSError: + logging.warning( + "Directory not found for {} skipping".format(project) + ) + + +def update_rpm(project, ref, branch, basedir, ksgen, filenumber): + output_dict = {} + # doing a late evaluation on ksgen_settings existence + # because it might not be needed + if not ksgen: + logging.error( + "ksgen_settings not found" + ) + sys.exit(1) + + rpm_instructions = glob("{}/khaleesi/settings/rpm/*{}.yml".format(basedir, project)) + if not rpm_instructions: + logging.warning( + "khaleesi/settings/rpm/*{}.yml not found in {}".format(project, basedir) + ) + return + + with open(rpm_instructions[0], "r") as fd: + # the replace here is important because !lookup is not + # valid jinja2 template and it will be used later + output_dict = yaml.load(fd.read().replace("!lookup", "")) + + # do the changes needed for this patch + output_dict["patch"]["gerrit"]["branch"] = branch + output_dict["patch"]["gerrit"]["refspec"] = ref + + # but the change still leaves two private urls + # like private.building.gerrit.url + # luckly those exist in ksgen_settings and using + # jinja2 templates here will fill those values + t = Template(yaml.safe_dump(output_dict, default_flow_style=False)) -def test_module(): + with open("{}/khaleesi/extra_settings_{}.yml".format(basedir, filenumber), "w") as fd: + fd.write(t.render(ksgen)) + fd.write("\n") # for extra niceness + logging.warning( + "wrote {}/khaleesi/extra_settings_{}.yml for {}".format(basedir, filenumber, project) + ) + + +def generate_config(targets, basedir=".", update=None, ksgenfile=None): + """ + This works in two ways + + if we know how to build the package (ie. it exists on settings/rpms/) + we generate one extra_settings_.yml for each of the packages + + if we do not know how to build it but there's a directory with that name + under basedir we will update that to the ref specified and check that out + + """ + if not ksgenfile: + ksgenfile = "{}/khaleesi/ksgen_settings.yml".format(basedir) + + try: + with open(ksgenfile, "r") as fd: + ksgen = yaml.load(fd) + except IOError: + ksgen = None + + filenumber = 1 + for project, url, ref, branch in targets: + if update == "repo": + update_repo(project, url, ref, branch, basedir) + + elif update == "rpm": + update_rpm(project, ref, branch, basedir, ksgen, filenumber) + filenumber += 1 + + +def test_module(basedir, update, ksgenfile): """Test with some known working Change-Ids""" test_msg = ("This is a test commit message.\n\n" "Depends-On: If4cea049\n" - "Depends-On: I1c3f14ba@codeng") + "Depends-On: Id0aef5ee6dcb@review.gerrithub.io:29418\n" + "Depends-On: I1c3f14ba@codeng\n" + "Depends-On: I62e3c43afd@codeng\n" + "Depends-On: I02c15311@codeng") test_tags = parse_commit_msg(base64.b64encode(test_msg)) test_targets = get_refspec_urls(test_tags) - checkout_changes(test_targets, "/tmp") + generate_config(test_targets, basedir, update, ksgenfile) + -def run(repo_dir): +def run(basedir, update, ksgenfile): + logging.warning( + "getting dependencies for {}".format(update) + ) run_tags = parse_commit_msg() run_targets = get_refspec_urls(run_tags) - checkout_changes(run_targets, repo_dir) + if run_targets: + generate_config(run_targets, basedir, update, ksgenfile) + else: + logging.warning("Nothing to do. Exiting") + if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) - if len(sys.argv) < 2: - print "Usage: %s " % (sys.argv[0]) - else: - run(sys.argv[1]) - #test_module() + ap = ArgumentParser("Generate changes to repos or rpm settings based on depends-on gerrit comments") + ap.add_argument('basedir', + default=".", + help="basedir to work from") + ap.add_argument('build', + default="repo", + choices=['repo', 'rpm'], + nargs='?', + help="What to build") + ap.add_argument('ksgen_settings', + nargs='?', + help="where to find the ksgen_settings.yml file") + args = ap.parse_args() + run(args.basedir, args.build, args.ksgen_settings) + # test_module(args.basedir, args.build, args.ksgen_settings) diff --git a/roles/depends-on/tasks/main.yml b/roles/depends-on/tasks/main.yml index d2e2c70d4..52191a72c 100644 --- a/roles/depends-on/tasks/main.yml +++ b/roles/depends-on/tasks/main.yml @@ -8,6 +8,8 @@ ignore_errors: yes register: is_internal +- debug: msg="getting dependencies for {{ update }}" + - name: search and fetch dependent changes - script: depends_on.py {{ lookup('env', 'WORKSPACE') }} + script: depends_on.py {{ lookup('env', 'WORKSPACE') }} {{ update }} when: ("\"\" != \"{{ lookup('env', 'GERRIT_CHANGE_COMMIT_MESSAGE') }}\"" and {{ is_internal.rc }} == 0) From add382a03e12a01886d31566977758fa31df231a Mon Sep 17 00:00:00 2001 From: Adriano Petrich Date: Tue, 2 Feb 2016 10:30:48 +0000 Subject: [PATCH 075/137] depends-on patch_rpm blueprint Change-Id: Id0aef5ee6dcb29d645cf87a4d6d4b023083b792f --- blueprints/depends-on_rpm_build.rst | 129 ++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 blueprints/depends-on_rpm_build.rst diff --git a/blueprints/depends-on_rpm_build.rst b/blueprints/depends-on_rpm_build.rst new file mode 100644 index 000000000..458b8cd0c --- /dev/null +++ b/blueprints/depends-on_rpm_build.rst @@ -0,0 +1,129 @@ +.. + This work is licensed under a Creative Commons Attribution 3.0 Unported + License. + + http://creativecommons.org/licenses/by/3.0/legalcode + + +=========================== +rpm build for depends-on +=========================== + +Depends-on functionality is broken at this moment. + + +Problem description +=================== + +If a comment on a patch has a depends-on: [:codeng] + +It is supposed to get those patchs and add to the current run. For exemple is a tripleoclient needs + +a review from tht it is expected that we will gate the tripleoclient with a patched tht with that review. + +But, right now that funtionality is not working. As we are no longer building on repos under a git + +clone on the {{ base_dir }} the way that depends-on is doing right now does not work. What we need + +is for it to use the patch-rpm and build package playbook to create the packages so we can upload + +it to the test run. Furthermore to make things more complicated there are two kinds of depends-on. + +The filepath-related changes like when a patch depends on another patch from khaleesi and + +khaleesi-settings and the rpm-related when a patch depends on a change on another rpm + + +Proposed change +=============== + + +The idea is to split the depends-on playbook into two playbooks + +depends-on-repo +--------------- + +That it will update the current HEAD of the repos under the base_dir + + +depends-on-rpm +-------------- + +This will generate an extra small ksgen_settings.yml probably called extra_settings_{{num}}.yml + +that is going to be passed together with ksgen_settings.yml + +The extra_settings_.yml would be jus the needed change to the ksgen_settings and it would be + +something like: + + +.. code-block:: yaml + gating_repo: openstack-tripleo-heat-templates + patch: + dist_git: + branch: + 7-director: rhos-7.0-pmgr-rhel-7 + 8-director: rhos-8.0-director-rhel-7 + name: openstack-tripleo-heat-templates + url: 'http://pkgs.devel.redhat.com/cgit/rpms/openstack-tripleo-heat-templates' + gerrit: + branch: rhos-7.0-patches # the filled up branch from the dependend review + name: gerrit-openstack-tripleo-heat-templates + refspec: refs/changes/41/65241/9 # the filled up refspec from the dependend review + url: 'https://code.engineering.redhat.com/gerrit/openstack-tripleo-heat-templates' + upstream: + name: upstream-openstack-tripleo-heat-templates + url: https://git.openstack.org/openstack/tripleo-heat-templates + + +So a job would look like this: + + +.. code-block:: bash + # fetch dependent gating changes for khaleesi and khaleesi-settings + if [ $GERRIT_CHANGE_COMMIT_MESSAGE ]; then + ansible-playbook -i local_hosts -vv playbooks/depends-on-repo.yml + fi + + # generate config + ksgen --config-dir settings generate \ + + ... yada yada yada + + --extra-vars @../khaleesi-settings/settings/product/rhos/private_settings/redhat_internal.yml \ + ksgen_settings.yml + + # fetch dependent gating changes for related rpms + if [ $GERRIT_CHANGE_COMMIT_MESSAGE ]; then + ansible-playbook -i local_hosts -vv playbooks/depends-on-rpm.yml + fi + + for extra_settings in extra_settings_*.yml; do + if [ -e "$extra_settings" ] ; then + ansible-playbook -vv --extra-vars @ksgen_settings.yml --extra_vars @$extra_settings -i local_hosts playbooks/build_gate_rpm.yml; + fi; + done + #now the built rpms are in the base_dir/generated_rpms/*.rpm + + ... continue with the deployment ... + + +The second extra-vars will overwrite the common parameters of the ksgen_settings allowing us to + +build multiple packages The downside is that it will only work for the packages that we know how + +to build rpms. + + +Implementation +============== + +Assignee(s) +----------- + +Primary assignee: + + apetrich@redhat.com + + From 22d29565bd6cad830efe01a5fbaaf7651bd3ec04 Mon Sep 17 00:00:00 2001 From: Attila Darazs Date: Thu, 11 Feb 2016 11:53:22 +0100 Subject: [PATCH 076/137] explicitly define RDO_RELEASE for image building This will fix the liberty production rdo-manager jobs where the default is incorrect. Change-Id: I1bfcef6901413f483e51f74c4f3a9c6bd17668d0 --- playbooks/installer/rdo-manager/templates/build-img-env.j2 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/playbooks/installer/rdo-manager/templates/build-img-env.j2 b/playbooks/installer/rdo-manager/templates/build-img-env.j2 index 445bd463a..78326b530 100644 --- a/playbooks/installer/rdo-manager/templates/build-img-env.j2 +++ b/playbooks/installer/rdo-manager/templates/build-img-env.j2 @@ -1,5 +1,9 @@ export DIB_LOCAL_IMAGE={{ distro.images[distro.name][distro.full_version].guest_image_name }} +{% if product.name == 'rdo' %} +export RDO_RELEASE={{ product.full_version }} +{%endif %} + {% if product.repo_type is defined and product.repo_type in ["poodle", "puddle"] %} export DIB_YUM_REPO_CONF="{{installer.dib_dir}}/rhos-release-{{product.repo.core_product_version}}-director.repo {{installer.dib_dir}}/rhos-release-{{product.repo.core_product_version}}.repo {{installer.dib_dir}}/rhos-release-rhel-{{distro.full_version}}.repo" export USE_DELOREAN_TRUNK=0 From 396e6dcfabe6c24a5c087bfb977a28f3a30763f4 Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Thu, 11 Feb 2016 14:40:40 +0200 Subject: [PATCH 077/137] Explicit host ref when delegate_to is used "delegate_to" might change the context of ansible vars. Change-Id: I98ba11b65985519f46e7de8ee69d14fa17eb4996 --- playbooks/provisioner/foreman/main.yml | 2 +- playbooks/provisioner/openstack/main.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/playbooks/provisioner/foreman/main.yml b/playbooks/provisioner/foreman/main.yml index 5d9c87f96..cfe176ae4 100644 --- a/playbooks/provisioner/foreman/main.yml +++ b/playbooks/provisioner/foreman/main.yml @@ -26,7 +26,7 @@ auth_url: "{{ provisioner.foreman.auth_url }}" username: "{{ provisioner.foreman.username }}" password: "{{ provisioner.foreman.password }}" - host_id: "{{ ansible_ssh_host }}" + host_id: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" rebuild: "{{ rebuild }}" wait_for_host: "{{ provisioner.foreman.wait_for_host|lower }}" retries: 4 diff --git a/playbooks/provisioner/openstack/main.yml b/playbooks/provisioner/openstack/main.yml index 819f6a5cb..06adb4089 100644 --- a/playbooks/provisioner/openstack/main.yml +++ b/playbooks/provisioner/openstack/main.yml @@ -157,7 +157,7 @@ tasks: - name: Wait for Reachable Nodes wait_for: - host: "{{ ansible_ssh_host }}" + host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" port: 22 search_regex: OpenSSH timeout: 600 From f819f9c517ab19ec8aa37d4d307126bb6dc7a98f Mon Sep 17 00:00:00 2001 From: Mathieu Bultel Date: Fri, 12 Feb 2016 16:44:46 +0100 Subject: [PATCH 078/137] Refactor the heat_stack module and return all stack metadatas Change-Id: I55ab8ac9cadee5444a7eed862a2fa9f93ab009f3 --- library/heat_stack.py | 218 ++++++++++++++++++++++-------------------- 1 file changed, 112 insertions(+), 106 deletions(-) diff --git a/library/heat_stack.py b/library/heat_stack.py index 0307bbda1..1427d9196 100644 --- a/library/heat_stack.py +++ b/library/heat_stack.py @@ -2,13 +2,13 @@ #coding: utf-8 -*- try: - from time import sleep + import time from keystoneclient.v2_0 import client as ksclient from heatclient.client import Client from heatclient.common import template_utils from heatclient.common import utils except ImportError: - print("failed=True msg='heatclient, keystoneclient are required'") + print("failed=True msg='heatclient and keystoneclient is required'") DOCUMENTATION = ''' --- @@ -67,109 +67,100 @@ # Create a stack with given template and environment files - name: create stack heat_stack: - stack_name: test - state: present login_username: admin login_password: admin - auth_url: http://192.168.1.14:5000/v2.0 - login_tenant_name: admin + auth_url: "http://192.168.1.14:5000/v2.0" tenant_name: admin - template: /home/stack/test.yaml + stack_name: test + state: present + template: "/home/stack/ovb/templates/quintupleo.yaml" + environment_files: ['/home/stack/ovb/templates/resource-registry.yaml','/home/stack/ovb/templates/env.yaml'] + + - name: delete stack + heat_stack: + stack_name: test + state: absent + login_username: admin + login_password: admin + auth_url: "http://192.168.1.14:5000/v2.0" + tenant_name: admin ''' -_os_keystone = None -_os_tenant_id = None -_os_network_id = None -_inc = 0 - -def _get_ksclient(module, kwargs): - try: - kclient = ksclient.Client(username=kwargs.get('login_username'), - password=kwargs.get('login_password'), - tenant_name=kwargs.get('login_tenant_name'), - auth_url=kwargs.get('auth_url')) - except Exception, e: - module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message) - global _os_keystone - _os_keystone = kclient - return kclient - -def _get_endpoint(module, ksclient): - try: - endpoint = ksclient.service_catalog.url_for(service_type='orchestration', endpoint_type='publicURL') - except Exception, e: - module.fail_json(msg = "Error getting network endpoint: %s" % e.message) - return endpoint - -def _set_tenant_id(module): - global _os_tenant_id - if not module.params['tenant_name']: - _os_tenant_id = _os_keystone.tenant_id - else: - tenant_name = module.params['tenant_name'] - - for tenant in _os_keystone.tenants.list(): - if tenant.name == tenant_name: - _os_tenant_id = tenant.id - break - if not _os_tenant_id: - module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - -def _get_heat_client(module, kwargs): - _ksclient = _get_ksclient(module, kwargs) - token = _ksclient.auth_token - endpoint = _get_endpoint(module, _ksclient) - try: - heat = Client('1', endpoint=endpoint, token=token) - except Exception, e: - module.fail_json(msg = " Error in connecting to heat: %s" % e.message) - return heat - -def _create_stack(module, heat): - heat.format = 'json' - template_file = module.params['template'] - env_file = module.params['environment_files'] - tpl_files, template = template_utils.get_template_contents(template_file) - env_files, env = template_utils.process_multiple_environments_and_files(env_paths=env_file) - - stack = heat.stacks.create(stack_name=module.params['stack_name'], - template=template, - environment=env, - files=dict(list(tpl_files.items()) + list(env_files.items())), - parameters={}) - uid = stack['stack']['id'] - - stack = heat.stacks.get(stack_id=uid).to_dict() - while stack['stack_status'] == 'CREATE_IN_PROGRESS': - stack = heat.stacks.get(stack_id=uid).to_dict() - sleep(5) - if stack['stack_status'] == 'CREATE_COMPLETE': - return stack['id'] - else: - module.fail_json(msg = "Failure in creating stack: ".format(stack)) - -def _list_stack(module, heat): - fields = ['id', 'stack_name', 'stack_status', 'creation_time', - 'updated_time'] - uids = [] - stacks = heat.stacks.list() - return utils.print_list(stacks, fields) - -def _delete_stack(module, heat): - heat.stacks.delete(module.param['stack_name']) - return _list_stack - -def _get_stack_id(module, heat): - stacks = heat.stacks.list() - while True: - try: - stack = stacks.next() - if module.params['stack_name'] == stack.stack_name: - return stack.id - except StopIteration: - break +def obj_gen_to_dict(gen): + """Enumerate through generator of object and return lists of dictonaries. + """ + obj_list = [] + for obj in gen: + obj_list.append(obj.to_dict()) + return obj_list + + +class Stack(object): + + def __init__(self, kwargs): + self.client = self._get_client(kwargs) + + def _get_client(self, kwargs, endpoint_type='publicURL'): + """ get heat client """ + kclient = ksclient.Client(**kwargs) + token = kclient.auth_token + endpoint = kclient.service_catalog.url_for(service_type='orchestration', + endpoint_type=endpoint_type) + kwargs = { + 'token': token, + } + return Client('1', endpoint=endpoint, token=token) + + def create(self, name, + template_file, + env_file=None, + format='json'): + """ create heat stack with the given template and environment files """ + self.client.format = format + tpl_files, template = template_utils.get_template_contents(template_file) + env_files, env = template_utils.process_multiple_environments_and_files(env_paths=env_file) + + stack = self.client.stacks.create(stack_name=name, + template=template, + environment=env, + files=dict(list(tpl_files.items()) + list(env_files.items())), + parameters={}) + uid = stack['stack']['id'] + + stack = self.client.stacks.get(stack_id=uid).to_dict() + while stack['stack_status'] == 'CREATE_IN_PROGRESS': + stack = self.client.stacks.get(stack_id=uid).to_dict() + time.sleep(5) + if stack['stack_status'] == 'CREATE_COMPLETE': + return stack + else: return False + def list(self): + """ list created stacks """ + fields = ['id', 'stack_name', 'stack_status', 'creation_time', + 'updated_time'] + uids = [] + stacks = self.client.stacks.list() + utils.print_list(stacks, fields) + return obj_gen_to_dict(stacks) + + def delete(self, name): + """ delete stack with the given name """ + self.client.stacks.delete(name) + return self.list() + + def get_id(self, name): + """ get stack id by name """ + stacks = self.client.stacks.list() + while True: + try: + stack = stacks.next() + if name == stack.stack_name: + return stack.id + except StopIteration: + break + return False def main(): argument_spec = openstack_argument_spec() @@ -181,24 +172,39 @@ def main(): tenant_name = dict(default=None), )) module = AnsibleModule(argument_spec=argument_spec) - heat = _get_heat_client(module, module.params) - _set_tenant_id(module) + state = module.params['state'] + stack_name = module.params['stack_name'] + template = module.params['template'] + environment_files = module.params['environment_files'] + kwargs = { + 'username': module.params['login_username'], + 'password': module.params['login_password'], + 'tenant_name': module.params['tenant_name'], + 'auth_url': module.params['auth_url'] + } + + stack = Stack(kwargs) if module.params['state'] == 'present': - stack_id = _get_stack_id(module, heat) + stack_id = stack.get_id(stack_name) if not stack_id: - stack_id = _create_stack(module, heat) - module.exit_json(changed = True, result = "Created" , id = stack_id) + stack = stack.create(name=stack_name, + template_file=template, + env_file=environment_files) + if not stack: + module.fail_json(msg="Failed to create stack") + module.exit_json(changed = True, result = "Created" , stack = stack) else: module.exit_json(changed = False, result = "success" , id = stack_id) else: - stack_id = _get_stack_id(module, stack) + stack_id = stack.get_id(stack_name) if not stack_id: module.exit_json(changed = False, result = "success") else: - _delete_stack(module, stack, stack_id) + stack.delete(stack_name) module.exit_json(changed = True, result = "deleted") # this is magic, see lib/ansible/module.params['common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() +if __name__ == '__main__': + main() From c9840e97bc8865d4df55298758b3805b34aee718 Mon Sep 17 00:00:00 2001 From: Mike Burns Date: Thu, 11 Feb 2016 18:11:05 -0500 Subject: [PATCH 079/137] don't download deploy-ramdisk-ironic for osp8 and later the bash ramdisk is deprecated Change-Id: I9193ddcfb6062f236528786532d19ccaab2e338f --- settings/installer/rdo_manager/images/import.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/settings/installer/rdo_manager/images/import.yml b/settings/installer/rdo_manager/images/import.yml index 39e7a251b..cdc272599 100644 --- a/settings/installer/rdo_manager/images/import.yml +++ b/settings/installer/rdo_manager/images/import.yml @@ -9,7 +9,6 @@ installer: - overcloud-full '8-director': files: - - deploy-ramdisk-ironic - ironic-python-agent - overcloud-full url: From fc24a1ede560f1bb2ce20086ebd020804e556975 Mon Sep 17 00:00:00 2001 From: Jon Schlueter Date: Fri, 5 Feb 2016 15:08:09 -0500 Subject: [PATCH 080/137] [component-test] allow component tests to enable additional repos allow components to specify additional repos in jenkins-config.yml Also add the ability to disable repos from jenkins-config.yml this would look like an entry to setup called enable_repos or disable_repos Several components have need of either overrides or rhel-extras or other installed but disabled repos to be enabled to install all of the required rpms to do their component testing. This allows for easier method of doing that. the workaround was to hack it into the run section with something like sudo yum-config-manager --enable ; sudo yum install -y ; also added informational "yum repolist all" right after enabling/disabling repos to make debugging jenkins-config.yml setups easier for component ci guys Also removed the yum repolist -d7 call from playbooks/installer/package/pre.yml Change-Id: Iac29053cc5314f659b297fc12b4b7c0714c0d354 --- doc/khaleesi.rst | 10 ++++++++++ playbooks/installer/project/pre.yml | 3 --- roles/component-test/pre/tasks/packages.yml | 17 +++++++++++++++++ 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/doc/khaleesi.rst b/doc/khaleesi.rst index 8ef71a613..0bc23e07f 100644 --- a/doc/khaleesi.rst +++ b/doc/khaleesi.rst @@ -392,6 +392,14 @@ rpm_deps: [ gcc, git, "{{ hostvars[inventory_hostname][tester.component.tox_targ # The RPMs that shouldn't be installed when running tests, no matter which tester chosen remove_rpm: [ "{{ hostvars[inventory_hostname][tester.component.tox_target]['remove_rpm'] }}" ] +# Any additional repos besides defaults that should be enabled to support testing +# the repos need to be already installed. this just allows you to enable them. +add_additional_repos: [ ] + +# Any repos to be disabled to support testing +# this just allows you to disable them. +remove_additional_repos: [ ] + # Common pre-run steps for all testers neutron_virt_run_config: run: > @@ -410,6 +418,8 @@ test_config: virt: RedHat-7: setup: + enable_repos: "{{ add_additional_repos }}" # Optional. When you would like to look in additional places for RPMs + disable_repos: "{{ remove_additional_repos }}" # Optional. When you would like to remove repos to search install: "{{ rpm_deps }}" # Optional. When you would like to install requirements remove: "{{ remove_rpm }}" # Optional. When you would like to remove packages run: "{{ neutron_virt_run_config.run }}" # A must. The actual command used to run the tests diff --git a/playbooks/installer/project/pre.yml b/playbooks/installer/project/pre.yml index e37f3a6d4..b856e8c74 100644 --- a/playbooks/installer/project/pre.yml +++ b/playbooks/installer/project/pre.yml @@ -50,9 +50,6 @@ shell: "rhos-release -x; rhos-release -d {{ product.version.major }}" when: product.repo.type is defined and product.repo.type in ['poodle'] - - name: Print installed repositores - shell: "yum repolist -d 7" - - name: print out test env hosts: controller gather_facts: yes diff --git a/roles/component-test/pre/tasks/packages.yml b/roles/component-test/pre/tasks/packages.yml index 650c541c5..c21d2c7bf 100644 --- a/roles/component-test/pre/tasks/packages.yml +++ b/roles/component-test/pre/tasks/packages.yml @@ -1,4 +1,21 @@ --- +- name: disable any repos specified + sudo: yes + shell: yum-config-manager --disable {{ item }} + with_items: test_cfg.setup.disable_repos + when: + test_cfg.setup | default(false) and test_cfg.setup.disable_repos | default(false) + +- name: enable any additional repos to be used + sudo: yes + shell: yum-config-manager --enable {{ item }} + with_items: test_cfg.setup.enable_repos + when: + test_cfg.setup | default(false) and test_cfg.setup.enable_repos | default(false) + +- name: grab short reposlist + shell: yum repolist all + - name: install test dependencies rpm needed to run test sudo: yes yum: pkg={{ item }} state=latest From 7b9194f934fcf2c746816a55bc8dc7c71cff6a17 Mon Sep 17 00:00:00 2001 From: Attila Darazs Date: Mon, 15 Feb 2016 14:05:57 +0100 Subject: [PATCH 081/137] fix ironic debug output Switch to use shell module, command does not support shell pipes. Change-Id: Ib9be7847cae4dd8259d2c708c82ca6691b6a7363 --- playbooks/installer/rdo-manager/overcloud/status.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/installer/rdo-manager/overcloud/status.yml b/playbooks/installer/rdo-manager/overcloud/status.yml index f60addee6..8b389e5a6 100644 --- a/playbooks/installer/rdo-manager/overcloud/status.yml +++ b/playbooks/installer/rdo-manager/overcloud/status.yml @@ -53,7 +53,7 @@ - name: grep for errors in the ironic logs when: overcloud_deployment_result is defined and overcloud_deployment_result != "0" sudo: yes - command: "cat /var/log/ironic/* | grep -v ERROR_FOR_DIVISION_BY_ZERO | grep ERROR" + shell: "cat /var/log/ironic/* | grep -v ERROR_FOR_DIVISION_BY_ZERO | grep ERROR" ignore_errors: yes - name: show ironic nodes create template From 626a2e2cbdd8a993057fe242dd1fbb617131fb49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filip=20Hub=C3=ADk?= Date: Tue, 2 Feb 2016 16:51:05 +0100 Subject: [PATCH 082/137] Add info about which job triggered beaker provisioning Useful when debugging beaker issues (stucked jobs). Adds relation (link) to specific jenkins job which started beaker provisioning directly to whiteboard field in description, therefore job can be quickly found. Otherwise we are not able to figure out who started what. POC: https://beaker.engineering.redhat.com/recipes/2488170, see Whiteboard field. Change-Id: Iedfd7a3d1ad6ef99ce45790c2eec62e096daa5fe --- settings/provisioner/beaker/site/bkr.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/settings/provisioner/beaker/site/bkr.yml b/settings/provisioner/beaker/site/bkr.yml index d4d126d6c..3064f05ce 100644 --- a/settings/provisioner/beaker/site/bkr.yml +++ b/settings/provisioner/beaker/site/bkr.yml @@ -1,7 +1,9 @@ provisioner: beaker_checkout_script: 'khaleesi-settings/scripts/beaker/beakerCheckOut.sh' host_lab_controller: !env [BEAKER_HOST_CONTROLLER, lab-02.rhts.eng.brq.redhat.com] - whiteboard_message: 'InstackTesting' + whiteboard_prefix: 'InstackTesting' + whiteboard_triggering_job: !env BUILD_URL + whiteboard_message: '{{ !lookup provisioner.whiteboard_prefix }},triggered_from:{{ !lookup provisioner.whiteboard_triggering_job }}' network: public_subnet_cidr: 172.17.0.0/16 public_allocation_start: 172.17.0.200 From 1e55e418a6a0fb58b3c8b3b2228c37c68ef2f216 Mon Sep 17 00:00:00 2001 From: Steve Linabery Date: Thu, 28 Jan 2016 13:01:36 -0600 Subject: [PATCH 083/137] Fix use of 'True' in ignore_errors in playbooks Cleanup. There were a few places where we were using 'ignore_errors: True' when the correct case should be 'true'. Change-Id: Id8a9c22f7061ee89e9f0e1bbd5b95d7dfa2b037d --- .../installer/rdo-manager/yum_repos/repo-rhos.yml | 2 +- playbooks/provisioner/foreman/main.yml | 10 +++++----- playbooks/provisioner/virsh/main.yml | 12 ++++++------ playbooks/tester/integration/horizon/run.yml | 2 +- playbooks/tester/jenkins/builders/run.yml | 2 +- playbooks/tester/rally/post.yml | 4 ++-- playbooks/tester/rally/pre.yml | 2 +- playbooks/tester/rally/run.yml | 2 +- playbooks/tester/tempest/run.yml | 2 +- 9 files changed, 19 insertions(+), 19 deletions(-) diff --git a/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml b/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml index 9df920870..d43e8d0a9 100644 --- a/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml +++ b/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml @@ -119,7 +119,7 @@ - name: Find if a new kernel was installed shell: find /boot/ -anewer /proc/1/stat -name 'initramfs*' | egrep ".*" register: new_kernel - ignore_errors: True + ignore_errors: true when: "'{{ repo_host }}' == 'virthost'" - name: reboot host diff --git a/playbooks/provisioner/foreman/main.yml b/playbooks/provisioner/foreman/main.yml index cfe176ae4..37160ae76 100644 --- a/playbooks/provisioner/foreman/main.yml +++ b/playbooks/provisioner/foreman/main.yml @@ -48,27 +48,27 @@ tasks: - name: Check if CPU supports INTEL based KVM shell: egrep -c 'vmx' /proc/cpuinfo - ignore_errors: True + ignore_errors: true register: kvm_intel - name: Check if CPU supports AMD based KVM shell: egrep -c 'svm' /proc/cpuinfo - ignore_errors: True + ignore_errors: true register: kvm_amd - name: Enable KVM modules modprobe: name=kvm - ignore_errors: True + ignore_errors: true when: kvm_intel.rc == 0 or kvm_amd.rc == 0 - name: Enable Intel KVM module modprobe: name=kvm_intel - ignore_errors: True + ignore_errors: true when: kvm_intel.rc == 0 - name: Enable AMD KVM module modprobe: name=kvm_amd - ignore_errors: True + ignore_errors: true when: kvm_amd.rc == 0 - name: Install required QEMU-KVM packages diff --git a/playbooks/provisioner/virsh/main.yml b/playbooks/provisioner/virsh/main.yml index 4e4413a92..cf9e85813 100644 --- a/playbooks/provisioner/virsh/main.yml +++ b/playbooks/provisioner/virsh/main.yml @@ -37,7 +37,7 @@ tasks: - name: Check if CPU supports INTEL based KVM shell: egrep -c 'vmx' /proc/cpuinfo - ignore_errors: True + ignore_errors: true register: kvm_intel - name: Set fact for Intel based KVM @@ -47,7 +47,7 @@ - name: Check if CPU supports AMD based KVM shell: egrep -c 'svm' /proc/cpuinfo - ignore_errors: True + ignore_errors: true register: kvm_amd - name: Set fact for AMD based KVM @@ -81,14 +81,14 @@ modprobe: name: "kvm_{{ kvm_base }}" state: absent - ignore_errors: True + ignore_errors: true when: kvm_base is defined - name: Load KVM module modprobe: name: "kvm_{{ kvm_base }}" state: present - ignore_errors: True + ignore_errors: true when: kvm_base is defined - name: Install required QEMU-KVM packages @@ -100,14 +100,14 @@ modprobe: name: "vhost-net" state: absent - ignore_errors: True + ignore_errors: true when: kvm_base is defined - name: Load KVM module modprobe: name: "vhost-net" state: present - ignore_errors: True + ignore_errors: true - name: Validate virtualization supported on host hosts: virthost diff --git a/playbooks/tester/integration/horizon/run.yml b/playbooks/tester/integration/horizon/run.yml index 5cbcfb750..2cc13433a 100644 --- a/playbooks/tester/integration/horizon/run.yml +++ b/playbooks/tester/integration/horizon/run.yml @@ -14,6 +14,6 @@ shell: | [ -d ~/{{ tester.venv_dir }} ] && source ~/{{ tester.venv_dir }}/bin/activate nosetests -v -a "{{ tester.integration.tests_tag }}" --with-xunit --xunit-file=horizon.xml openstack_dashboard/test/integration_tests/tests chdir=~/{{ tester.dir }} - ignore_errors: True + ignore_errors: true async: 21600 poll: 30 \ No newline at end of file diff --git a/playbooks/tester/jenkins/builders/run.yml b/playbooks/tester/jenkins/builders/run.yml index db29a69fd..d6c85625c 100644 --- a/playbooks/tester/jenkins/builders/run.yml +++ b/playbooks/tester/jenkins/builders/run.yml @@ -21,7 +21,7 @@ - name: Set the slave with the ansible playbook register: setup_slave - ignore_errors: True + ignore_errors: true shell: > ANSIBLE_ROLES_PATH=`pwd`/roles ANSIBLE_SSH_ARGS="" diff --git a/playbooks/tester/rally/post.yml b/playbooks/tester/rally/post.yml index d437fa0d7..fd535fc03 100644 --- a/playbooks/tester/rally/post.yml +++ b/playbooks/tester/rally/post.yml @@ -45,14 +45,14 @@ args: creates: "{{ tester.rally.outputdir }}/sla.txt" # register: sla_check - ignore_errors: True + ignore_errors: true - name: SLA Check JSON shell: "{{ tester.rally.path }}/bin/rally task sla_check --json > {{ tester.rally.outputdir }}/sla.json" args: creates: "{{ tester.rally.outputdir }}/sla.json" # register: sla_check - ignore_errors: True + ignore_errors: true # These need to be archived by Jenkins Somehow diff --git a/playbooks/tester/rally/pre.yml b/playbooks/tester/rally/pre.yml index 69df9d2ef..10dd6953f 100644 --- a/playbooks/tester/rally/pre.yml +++ b/playbooks/tester/rally/pre.yml @@ -52,7 +52,7 @@ shell: "source /root/keystonerc_admin && nova flavor-create m1.nano 42 64 0 1" sudo: yes # ignore errors if flavor already created - ignore_errors: True + ignore_errors: true - name: Create Glance Image glance_image: diff --git a/playbooks/tester/rally/run.yml b/playbooks/tester/rally/run.yml index 93d98d26a..9afeb2e37 100644 --- a/playbooks/tester/rally/run.yml +++ b/playbooks/tester/rally/run.yml @@ -36,7 +36,7 @@ - name: Create Rally deployment shell: "source {{ tester.rally.dir }}/keystonerc_admin && {{ tester.rally.path }}/bin/rally deployment create --fromenv --name {{ tester.rally.deployment }} | awk '/{{ tester.rally.deployment }}/ {print $2}'" register: rally_deployment_uuid - ignore_errors: True + ignore_errors: true - debug: var=rally_deployment_uuid diff --git a/playbooks/tester/tempest/run.yml b/playbooks/tester/tempest/run.yml index 9c5cd6383..39f147dac 100644 --- a/playbooks/tester/tempest/run.yml +++ b/playbooks/tester/tempest/run.yml @@ -36,5 +36,5 @@ - name: run tempest shell: "{{ tester.dir }}/with_venv ./tools/run-tests.sh {{ tester.tempest.testr_args|default('') }} {{ tester.tempest.test_regex }} {{ skipfile }}" - ignore_errors: True + ignore_errors: true when: tester.tempest.test_regex is defined or (tester.tempest.whitelist is defined and tester.tempest.whitelist) From 9587ecdf6a6fdb2d17f16dfa28fa32fb2cf641e6 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Wed, 13 Jan 2016 13:08:58 +0200 Subject: [PATCH 084/137] Adds osp-d undercloud deployment This patch adds the necessary parts to deploy an undercloud Change-Id: I9f2f737420d92ba9e50f9f5616fe33170211457a --- playbooks/installer/ospd/main.yml | 4 ++ playbooks/installer/ospd/post.yml | 5 ++ playbooks/installer/ospd/pre.yml | 5 ++ playbooks/installer/ospd/run.yml | 3 + playbooks/installer/ospd/undercloud/main.yml | 5 ++ playbooks/installer/ospd/undercloud/post.yml | 5 ++ playbooks/installer/ospd/undercloud/pre.yml | 61 +++++++++++++++++++ playbooks/installer/ospd/undercloud/run.yml | 29 +++++++++ settings/installer/ospd.yml | 38 ++++++++++++ settings/installer/ospd/build/latest.yml | 4 ++ settings/installer/ospd/images/import.yml | 10 +++ .../ospd/images/import/version/liberty.yml | 9 +++ .../ospd/images/import/version/mitaka.yml | 9 +++ settings/installer/ospd/network/neutron.yml | 4 ++ .../ospd/network/neutron/variant/gre.yml | 5 ++ .../ospd/network/neutron/variant/sriov.yml | 6 ++ .../ospd/network/neutron/variant/vlan.yml | 5 ++ .../ospd/network/neutron/variant/vxlan.yml | 5 ++ settings/installer/ospd/version/7.yml | 9 +++ settings/installer/ospd/version/7/minor/1.yml | 5 ++ settings/installer/ospd/version/7/minor/2.yml | 5 ++ settings/installer/ospd/version/8.yml | 9 +++ settings/installer/ospd/version/8/minor/1.yml | 5 ++ settings/installer/ospd/version/8/minor/2.yml | 5 ++ 24 files changed, 250 insertions(+) create mode 100644 playbooks/installer/ospd/main.yml create mode 100644 playbooks/installer/ospd/post.yml create mode 100644 playbooks/installer/ospd/pre.yml create mode 100644 playbooks/installer/ospd/run.yml create mode 100644 playbooks/installer/ospd/undercloud/main.yml create mode 100644 playbooks/installer/ospd/undercloud/post.yml create mode 100644 playbooks/installer/ospd/undercloud/pre.yml create mode 100644 playbooks/installer/ospd/undercloud/run.yml create mode 100644 settings/installer/ospd.yml create mode 100644 settings/installer/ospd/build/latest.yml create mode 100644 settings/installer/ospd/images/import.yml create mode 100644 settings/installer/ospd/images/import/version/liberty.yml create mode 100644 settings/installer/ospd/images/import/version/mitaka.yml create mode 100644 settings/installer/ospd/network/neutron.yml create mode 100644 settings/installer/ospd/network/neutron/variant/gre.yml create mode 100644 settings/installer/ospd/network/neutron/variant/sriov.yml create mode 100644 settings/installer/ospd/network/neutron/variant/vlan.yml create mode 100644 settings/installer/ospd/network/neutron/variant/vxlan.yml create mode 100644 settings/installer/ospd/version/7.yml create mode 100644 settings/installer/ospd/version/7/minor/1.yml create mode 100644 settings/installer/ospd/version/7/minor/2.yml create mode 100644 settings/installer/ospd/version/8.yml create mode 100644 settings/installer/ospd/version/8/minor/1.yml create mode 100644 settings/installer/ospd/version/8/minor/2.yml diff --git a/playbooks/installer/ospd/main.yml b/playbooks/installer/ospd/main.yml new file mode 100644 index 000000000..bf08a0fbf --- /dev/null +++ b/playbooks/installer/ospd/main.yml @@ -0,0 +1,4 @@ +--- +- include: pre.yml +- include: run.yml +- include: post.yml diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml new file mode 100644 index 000000000..67e9bf804 --- /dev/null +++ b/playbooks/installer/ospd/post.yml @@ -0,0 +1,5 @@ +--- +# Any step that should happen after the deployment of the osp-d playbook +# This could be create ssh forwarding to the nodes, validation of installation, etc +- name: Post tasks + hosts: localhost diff --git a/playbooks/installer/ospd/pre.yml b/playbooks/installer/ospd/pre.yml new file mode 100644 index 000000000..1ec855cf0 --- /dev/null +++ b/playbooks/installer/ospd/pre.yml @@ -0,0 +1,5 @@ +--- +# Any step that should happen before initiating the osp-d playbook +# This could be validation of the hosts, package installation that is assumed as per the guide, etc.. +- name: Pre tasks + hosts: localhost diff --git a/playbooks/installer/ospd/run.yml b/playbooks/installer/ospd/run.yml new file mode 100644 index 000000000..65d07dfbd --- /dev/null +++ b/playbooks/installer/ospd/run.yml @@ -0,0 +1,3 @@ +--- +- include: undercloud/main.yml +- include: overcloud/main.yml diff --git a/playbooks/installer/ospd/undercloud/main.yml b/playbooks/installer/ospd/undercloud/main.yml new file mode 100644 index 000000000..7da503b86 --- /dev/null +++ b/playbooks/installer/ospd/undercloud/main.yml @@ -0,0 +1,5 @@ +--- +- include: pre.yml +- include: run.yml +- include: post.yml + diff --git a/playbooks/installer/ospd/undercloud/post.yml b/playbooks/installer/ospd/undercloud/post.yml new file mode 100644 index 000000000..3d53ccd66 --- /dev/null +++ b/playbooks/installer/ospd/undercloud/post.yml @@ -0,0 +1,5 @@ +--- +# Any step that should happen after the deployment of the undercloud playbook +# This could be preparation creation of resources, validation of installation, etc +- name: Post tasks + hosts: localhost diff --git a/playbooks/installer/ospd/undercloud/pre.yml b/playbooks/installer/ospd/undercloud/pre.yml new file mode 100644 index 000000000..9e935b869 --- /dev/null +++ b/playbooks/installer/ospd/undercloud/pre.yml @@ -0,0 +1,61 @@ +--- +- name: Prepare the undercloud for installation + hosts: undercloud + tasks: + - name: set hostname + hostname: + name: undercloud.redhat.local + + - name: update /etc/hosts with undercloud details + lineinfile: + dest: "/etc/hosts" + line: "{{ ansible_default_ipv4.address }} undercloud.redhat.local undercloud" + state: present + + - name: remove all repos + command: "rm -f /etc/yum.repos.d/*" + + - name: install the rhos-release RPM + shell: "yum localinstall -y {{ product.rpm }}" + + - name: create necessary repos with for director using rhos-release + command: "rhos-release {{ product.full_version }} -p {{ installer.build }}" + + - name: create necessary repos for core using rhos-release + command: "rhos-release {{ product.version.major }} -p {{ product.build }}" + + - name: update system packages + shell: "yum update -y" + + - name: reboot the undercloud + shell: "sleep 2 && shutdown -r now" + async: 1 + poll: 0 + ignore_errors: true + +- name: Waiting for host to startup + hosts: localhost + tasks: + - name: Waiting for the undercloud to be available + wait_for: + host: "undercloud" + timeout: 120 + +- name: Continue with the undercloud installation + hosts: undercloud + tasks: + - name: Install the oscplugin + yum: + name: "python-rdomanager-oscplugin" + state: present + + - name: create user + user: + name: "{{ installer.user.name }}" + state: present + password: "{{ installer.user.password | password_hash('sha512') }}" + + - name: add user to sudoers + lineinfile: + dest: "/etc/sudoers" + line: "{{ installer.user.name }} ALL=(root) NOPASSWD:ALL" diff --git a/playbooks/installer/ospd/undercloud/run.yml b/playbooks/installer/ospd/undercloud/run.yml new file mode 100644 index 000000000..7d954e5fc --- /dev/null +++ b/playbooks/installer/ospd/undercloud/run.yml @@ -0,0 +1,29 @@ +--- +- name: Configuring the undercloud + hosts: undercloud + become: yes + become_user: "{{ installer.user.name }}" + tasks: + - name: fetch sample configuration file + fetch: + src: "/usr/share/instack-undercloud/undercloud.conf.sample" + dest: "/tmp/undercloud.conf" + flat: yes + + - name: edit configuration file per our needs + ini_file: + dest: "/tmp/undercloud.conf" + section: "DEFAULT" + option: "{{ item.key }}" + value: "{{ item.value }}" + with_dict: "{{ installer.undercloud.config }}" + sudo: no + delegate_to: localhost + + - name: copy our configuration to the undercloud + copy: + src: "/tmp/undercloud.conf" + dest: "/home/{{ installer.user.name }}/undercloud.conf" + + - name: install the undercloud (should take ~30 minutes :) ) + shell: "openstack undercloud install" diff --git a/settings/installer/ospd.yml b/settings/installer/ospd.yml new file mode 100644 index 000000000..4d60e5acd --- /dev/null +++ b/settings/installer/ospd.yml @@ -0,0 +1,38 @@ +--- !extends:common/common.yml + +installer: + type: ospd + short_type: ospd + user: + name: stack + password: stack + + undercloud: + config: + local_ip: 172.16.0.1/24 + undercloud_public_vip: 172.16.0.10 + undercloud_admin_vip: 172.16.0.11 + local_interface: eth0 + masquerade_network: 172.16.0.0/24 + dhcp_start: 172.16.0.20 + dhcp_end: 172.16.0.120 + network_cidr: 172.16.0.0/24 + network_gateway: 172.16.0.1 + discovery_iprange: 172.16.0.150,172.16.0.180 + +defaults: + build: latest + images: import + version: 7 + network: neutron + +job: + archive: + - /home/stack/.instack/install-undercloud.log + - /home/stack/stackrc + - /home/stack/*.log + - /home/stack/*.json + - /home/stack/*.conf + - /home/stack/deploy-overcloudrc + - /home/stack/network-environment.yaml + - /usr/share/openstack-tripleo-heat-templates diff --git a/settings/installer/ospd/build/latest.yml b/settings/installer/ospd/build/latest.yml new file mode 100644 index 000000000..a3de63654 --- /dev/null +++ b/settings/installer/ospd/build/latest.yml @@ -0,0 +1,4 @@ +--- + +installer: + build: latest diff --git a/settings/installer/ospd/images/import.yml b/settings/installer/ospd/images/import.yml new file mode 100644 index 000000000..400db86a6 --- /dev/null +++ b/settings/installer/ospd/images/import.yml @@ -0,0 +1,10 @@ +--- + +installer: + images: + base_url: !lookup private.installer.images[ !lookup product.build ] + task: import + + +defaults: + version: liberty diff --git a/settings/installer/ospd/images/import/version/liberty.yml b/settings/installer/ospd/images/import/version/liberty.yml new file mode 100644 index 000000000..ecd7e3e70 --- /dev/null +++ b/settings/installer/ospd/images/import/version/liberty.yml @@ -0,0 +1,9 @@ +--- + +installer: + images: + overcloud: + files: + discovery: "discovery-ramdisk.tar" + deployment: "deploy-ramdisk-ironic.tar" + overcloud: "overcloud-full.tar" diff --git a/settings/installer/ospd/images/import/version/mitaka.yml b/settings/installer/ospd/images/import/version/mitaka.yml new file mode 100644 index 000000000..53077c36f --- /dev/null +++ b/settings/installer/ospd/images/import/version/mitaka.yml @@ -0,0 +1,9 @@ +--- + +installer: + images: + overcloud: + files: + discovery: "ironic-python-agent.tar" + deployment: "deploy-ramdisk-ironic.tar" + overcloud: "overcloud-full.tar" diff --git a/settings/installer/ospd/network/neutron.yml b/settings/installer/ospd/network/neutron.yml new file mode 100644 index 000000000..88d3d64c8 --- /dev/null +++ b/settings/installer/ospd/network/neutron.yml @@ -0,0 +1,4 @@ +--- + +defaults: + variant: vxlan diff --git a/settings/installer/ospd/network/neutron/variant/gre.yml b/settings/installer/ospd/network/neutron/variant/gre.yml new file mode 100644 index 000000000..185fe6bf5 --- /dev/null +++ b/settings/installer/ospd/network/neutron/variant/gre.yml @@ -0,0 +1,5 @@ +--- + +installer: + network: + variant: gre diff --git a/settings/installer/ospd/network/neutron/variant/sriov.yml b/settings/installer/ospd/network/neutron/variant/sriov.yml new file mode 100644 index 000000000..d91da3550 --- /dev/null +++ b/settings/installer/ospd/network/neutron/variant/sriov.yml @@ -0,0 +1,6 @@ +--- + +installer: + network: + variant: sriov + diff --git a/settings/installer/ospd/network/neutron/variant/vlan.yml b/settings/installer/ospd/network/neutron/variant/vlan.yml new file mode 100644 index 000000000..48c736529 --- /dev/null +++ b/settings/installer/ospd/network/neutron/variant/vlan.yml @@ -0,0 +1,5 @@ +--- + +installer: + network: + variant: vlan diff --git a/settings/installer/ospd/network/neutron/variant/vxlan.yml b/settings/installer/ospd/network/neutron/variant/vxlan.yml new file mode 100644 index 000000000..d3eded44c --- /dev/null +++ b/settings/installer/ospd/network/neutron/variant/vxlan.yml @@ -0,0 +1,5 @@ +--- + +installer: + network: + variant: vxlan diff --git a/settings/installer/ospd/version/7.yml b/settings/installer/ospd/version/7.yml new file mode 100644 index 000000000..9beadeac8 --- /dev/null +++ b/settings/installer/ospd/version/7.yml @@ -0,0 +1,9 @@ +--- + +installer: + version: + major: 7 + minor: 0 + +defaults: + minor: 2 diff --git a/settings/installer/ospd/version/7/minor/1.yml b/settings/installer/ospd/version/7/minor/1.yml new file mode 100644 index 000000000..3eb89ed53 --- /dev/null +++ b/settings/installer/ospd/version/7/minor/1.yml @@ -0,0 +1,5 @@ +--- + +installer: + version: + minor: 1 diff --git a/settings/installer/ospd/version/7/minor/2.yml b/settings/installer/ospd/version/7/minor/2.yml new file mode 100644 index 000000000..1d1897025 --- /dev/null +++ b/settings/installer/ospd/version/7/minor/2.yml @@ -0,0 +1,5 @@ +--- + +installer: + version: + minor: 2 diff --git a/settings/installer/ospd/version/8.yml b/settings/installer/ospd/version/8.yml new file mode 100644 index 000000000..a0b339998 --- /dev/null +++ b/settings/installer/ospd/version/8.yml @@ -0,0 +1,9 @@ +--- + +installer: + version: + major: 8 + minor: 0 + +defaults: + minor: 2 diff --git a/settings/installer/ospd/version/8/minor/1.yml b/settings/installer/ospd/version/8/minor/1.yml new file mode 100644 index 000000000..3eb89ed53 --- /dev/null +++ b/settings/installer/ospd/version/8/minor/1.yml @@ -0,0 +1,5 @@ +--- + +installer: + version: + minor: 1 diff --git a/settings/installer/ospd/version/8/minor/2.yml b/settings/installer/ospd/version/8/minor/2.yml new file mode 100644 index 000000000..1d1897025 --- /dev/null +++ b/settings/installer/ospd/version/8/minor/2.yml @@ -0,0 +1,5 @@ +--- + +installer: + version: + minor: 2 From b6dfe696ab0c09acdc052f84b68a115841a5f27c Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Wed, 13 Jan 2016 13:11:02 +0200 Subject: [PATCH 085/137] Adds the overcloud deployment for osp-d Change-Id: I854d15a63ee6289c5f82b2040f878bd260a9b4f6 --- playbooks/installer/ospd/overcloud/main.yml | 4 + playbooks/installer/ospd/overcloud/post.yml | 5 + playbooks/installer/ospd/overcloud/pre.yml | 167 ++++++++++++++++++ playbooks/installer/ospd/overcloud/run.yml | 21 +++ .../ospd/overcloud/scripts/bootif-fix.service | 9 + .../ospd/overcloud/scripts/bootif-fix.sh | 5 + .../templates/virthost_instack.json.j2 | 25 +++ playbooks/installer/ospd/post.yml | 16 +- playbooks/installer/ospd/undercloud/pre.yml | 6 + playbooks/installer/ospd/undercloud/run.yml | 2 +- playbooks/provisioner/virsh/main.yml | 89 ++++------ .../virsh/templates/create_images.sh.j2 | 8 + .../virsh/templates/create_vms.sh.j2 | 18 ++ .../overcloud/images/build/tasks/main.yml | 2 + .../overcloud/images/import/tasks/main.yml | 15 ++ settings/installer/ospd.yml | 48 +++++ .../ospd/network/neutron/variant/gre.yml | 5 +- .../ospd/network/neutron/variant/vxlan.yml | 5 +- settings/provisioner/virsh.yml | 4 +- .../virsh/topology/ospd_minimal.yml | 30 +++- 20 files changed, 422 insertions(+), 62 deletions(-) create mode 100644 playbooks/installer/ospd/overcloud/main.yml create mode 100644 playbooks/installer/ospd/overcloud/post.yml create mode 100644 playbooks/installer/ospd/overcloud/pre.yml create mode 100644 playbooks/installer/ospd/overcloud/run.yml create mode 100644 playbooks/installer/ospd/overcloud/scripts/bootif-fix.service create mode 100644 playbooks/installer/ospd/overcloud/scripts/bootif-fix.sh create mode 100644 playbooks/installer/ospd/overcloud/templates/virthost_instack.json.j2 create mode 100644 playbooks/provisioner/virsh/templates/create_images.sh.j2 create mode 100644 playbooks/provisioner/virsh/templates/create_vms.sh.j2 create mode 100644 roles/ospd/overcloud/images/build/tasks/main.yml create mode 100644 roles/ospd/overcloud/images/import/tasks/main.yml diff --git a/playbooks/installer/ospd/overcloud/main.yml b/playbooks/installer/ospd/overcloud/main.yml new file mode 100644 index 000000000..bf08a0fbf --- /dev/null +++ b/playbooks/installer/ospd/overcloud/main.yml @@ -0,0 +1,4 @@ +--- +- include: pre.yml +- include: run.yml +- include: post.yml diff --git a/playbooks/installer/ospd/overcloud/post.yml b/playbooks/installer/ospd/overcloud/post.yml new file mode 100644 index 000000000..f73e8a886 --- /dev/null +++ b/playbooks/installer/ospd/overcloud/post.yml @@ -0,0 +1,5 @@ +--- +# Any step that should happen after the deployment of the overcloud playbook +# This could be validation of installation, etc +- name: Post tasks + hosts: localhost diff --git a/playbooks/installer/ospd/overcloud/pre.yml b/playbooks/installer/ospd/overcloud/pre.yml new file mode 100644 index 000000000..71c3a2dcf --- /dev/null +++ b/playbooks/installer/ospd/overcloud/pre.yml @@ -0,0 +1,167 @@ +--- +# Any step that should happen before initiating the overcloud playbook +# This could be package installation, image preparation, etc.. +- name: prep and upload images into glance + hosts: undercloud + gather_facts: no + become: yes + become_user: "{{ installer.user.name }}" + roles: + - {role: ospd/overcloud/images/build, when: installer.images.task == "build"} + - {role: ospd/overcloud/images/import, when: installer.images.task == "import"} + tasks: + - name: upload the overcloud images to glance + shell: "source ~/stackrc; openstack overcloud image upload" + +# In case we're dealing with virthost, we need to make sure the undercloud is able to ssh to the hypervisor +- name: Create the stack user on the virthost and allow SSH to hypervisor + hosts: virthost + gather_facts: no + tasks: + - name: Create stack user on virthost + user: + name: "{{ installer.user.name }}" + state: present + password: "{{ installer.user.password | password_hash('sha512') }}" + + - name: Set permissions for the user to access the hypervisor + copy: + content: | + [libvirt Management Access] + Identity=unix-user:{{ installer.user.name }} + Action=org.libvirt.unix.manage + ResultAny=yes + ResultInactive=yes + ResultActive=yes + dest: "/etc/polkit-1/localauthority/50-local.d/50-libvirt-user-{{ installer.user.name }}.pkla" + + - name: Make sure sshpass installed + yum: + name: sshpass + state: latest + delegate_to: undercloud + + - name: SSH copy ID to the hypervisor + become: yes + become_user: "{{ installer.user.name }}" + shell: "sshpass -p '{{ installer.user.password }}' ssh-copy-id {{ installer.user.name }}@192.168.122.1 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + delegate_to: undercloud + + - name: prepare instack.json + become: yes + become_user: "{{ installer.user.name }}" + template: + src: "templates/virthost_instack.json.j2" + dest: "~/instack_template.json" + delegate_to: undercloud + + - name: populate the private key + become: yes + become_user: "{{ installer.user.name }}" + shell: "cat ~/instack_template.json | jq --arg key \"$(cat ~/.ssh/id_rsa)\" '. | .nodes[].pm_password=$key | .[\"ssh-key\"]=$key'> instackenv.json" + delegate_to: undercloud + +- name: Validate our instackenv.json file + hosts: undercloud + gather_facts: no + become: yes + become_user: "{{ installer.user.name }}" + tasks: + - name: validate our instack.json file + get_url: + url: "https://raw.githubusercontent.com/rthallisey/clapper/master/instackenv-validator.py" + dest: "~/instackenv-validator.py" + + - name: validate our instack.json file + shell: "python instackenv-validator.py -f ~/instackenv.json" + + - name: register our hosts to instack + become_user: "{{ installer.user.name }}" + shell: "source ~/stackrc; openstack baremetal import --json instackenv.json" + + - name: assign the kernel and ramdisk before introspection begins + become_user: "{{ installer.user.name }}" + shell: "source ~/stackrc; openstack baremetal configure boot" + +# In case of virthost we need to fix the pxe_ssh limitation of correctly assigning the MAC address to the iPXE script +- name: Fixing the pxe_ssh and iPXE + hosts: virthost + gather_facts: no + tasks: + - name: copy bootif script on the undercloud + copy: + src: "scripts/bootif-fix.sh" + dest: "/usr/bin/bootif-fix" + mode: 0755 + delegate_to: undercloud + + - name: copy the service file + copy: + src: "scripts/bootif-fix.service" + dest: "/usr/lib/systemd/system/bootif-fix.service" + delegate_to: undercloud + + - name: reload the service daemon + shell: "systemctl daemon-reload" + delegate_to: undercloud + + - name: enable and run bootif-fix service + service: + name: "bootif-fix" + enabled: yes + state: started + delegate_to: undercloud + +- name: Introspec nodes and Create flavors + hosts: undercloud + gather_facts: yes + tags: debug + +- name: Introspec nodes and Create flavors + hosts: undercloud + become: yes + become_user: "{{ installer.user.name }}" + gather_facts: no + tasks: + - name: start node introspection (should take ~20 minutes :) ) + shell: "source ~/stackrc; openstack baremetal introspection bulk start" + + - name: get subnet ID to update neutron's DNS server + shell: "source ~/stackrc; neutron subnet-list | grep {{ installer.undercloud.config.network_cidr }} | awk '{print $2;}'" + register: subnet_id + + - name: get the nameserver + shell: "cat /etc/resolv.conf | grep -m 1 'nameserver' | awk '{print $2}'" + register: nameserver + + - name: update neutron DNS server + shell: "source ~/stackrc; neutron subnet-update {{ subnet_id.stdout }} --dns-nameserver {{ nameserver.stdout }}" + + - name: create the baremetal flavor for our machines + shell: "source ~/stackrc; openstack flavor create --id auto --ram 4096 --disk 16 --vcpus 1 baremetal" + + - name: set additional properties + shell: "source ~/stackrc; openstack flavor set --property 'cpu_arch'='x86_64' --property 'capabilities:boot_option'='local' baremetal" + + - name: create the flavors for our machines + shell: "source ~/stackrc; openstack flavor create --id auto --ram {{ item.value.memory | int - 100 }} --disk {{ item.value.disks.disk1.size | regex_replace('(?P[0-9]+).*$', '\\g') | int - 10 }} --vcpus {{ item.value.cpu }} {{ item.key }}" + when: "'{{ item.key }}' != 'undercloud'" + with_dict: provisioner.nodes + + - name: set additional properties + shell: "source ~/stackrc; openstack flavor set --property 'cpu_arch'='x86_64' --property 'capabilities:boot_option'='local' --property 'capabilities:profile'='{{ item.key }}' {{ item.key }}" + when: "'{{ item.key }}' != 'undercloud'" + with_dict: provisioner.nodes + + - name: get the node UUID + shell: "source ~/stackrc; ironic node-list | grep {{ item.key }} | awk '{print $2}'" + when: "'{{ item.key }}' != 'undercloud'" + with_dict: provisioner.nodes + register: node_list + + - name: tag our nodes with the proper profile + shell: "source ~/stackrc; ironic node-update {{ item.1.stdout }} add properties/capabilities='profile:{{ item.0 }},boot_option:local'" + when: "'{{ item.0 }}' != 'undercloud'" + with_together: + - provisioner.nodes.keys() + - node_list.results diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml new file mode 100644 index 000000000..427668542 --- /dev/null +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -0,0 +1,21 @@ +--- +- name: Prepare templates and deploy the overcloud + hosts: undercloud + become: yes + become_user: "{{ installer.user.name }}" + gather_facts: yes + tasks: + # TODO: Add the complete deployment using swift/cinder/scale etc + - name: Deploy the overcloud + shell: | + source ~/stackrc; openstack overcloud deploy --debug \ + --log-file overcloud_deployment_{{ 100 | random }}.log \ + --templates \ + --neutron-network-type {{ installer.overcloud.network.backend }} \ + --neutron-tunnel-types {{ installer.overcloud.network.backend }} \ + --control-scale {{ groups['controller']|length }} \ + --control-flavor controller \ + --compute-scale {{ groups['compute']|length }} \ + --compute-flavor compute \ + --compute-scale {{ groups['ceph']|length }} \ + --compute-flavor ceph diff --git a/playbooks/installer/ospd/overcloud/scripts/bootif-fix.service b/playbooks/installer/ospd/overcloud/scripts/bootif-fix.service new file mode 100644 index 000000000..8694570d7 --- /dev/null +++ b/playbooks/installer/ospd/overcloud/scripts/bootif-fix.service @@ -0,0 +1,9 @@ +[Unit] +Description=Automated fix for incorrect iPXE BOOFIF + +[Service] +Type=simple +ExecStart=/usr/bin/bootif-fix + +[Install] +WantedBy=multi-user.target diff --git a/playbooks/installer/ospd/overcloud/scripts/bootif-fix.sh b/playbooks/installer/ospd/overcloud/scripts/bootif-fix.sh new file mode 100644 index 000000000..4c8621cd5 --- /dev/null +++ b/playbooks/installer/ospd/overcloud/scripts/bootif-fix.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +while true; + do find /httpboot/ -type f ! -iname "kernel" ! -iname "ramdisk" ! -iname "*.kernel" ! -iname "*.ramdisk" -exec sed -i 's|{mac|{net0/mac|g' {} +; +done diff --git a/playbooks/installer/ospd/overcloud/templates/virthost_instack.json.j2 b/playbooks/installer/ospd/overcloud/templates/virthost_instack.json.j2 new file mode 100644 index 000000000..365a500de --- /dev/null +++ b/playbooks/installer/ospd/overcloud/templates/virthost_instack.json.j2 @@ -0,0 +1,25 @@ +{ + "ssh-user": "{{ installer.user.name }}", + "ssh-key": "$(cat ~/.ssh/id_rsa)", + "power_manager": "nova.virt.baremetal.virtual_power_driver.VirtualPowerManager", + "host-ip": "192.168.122.1", + "arch": "x86_64", + "nodes": [ +{% for host_name in groups['openstack_nodes'] %} + {% if host_name != 'undercloud' %} + { + "name": "{{ hostvars[host_name].inventory_hostname }}", + "pm_addr": "192.168.122.1", + "pm_password": "$(cat ~/.ssh/id_rsa)", + "pm_type": "pxe_ssh", + "mac": ["{{ hostvars[host_name]['ansible_%s' % installer.undercloud.config.local_interface].macaddress }}"], + "cpu": "{{ provisioner.nodes[host_name].cpu }}", + "memory": "{{ provisioner.nodes[host_name].memory }}", + "disk": "{{ provisioner.nodes[host_name].disks.disk1.size | regex_replace('(?P[0-9]+).*$', '\\g') }}", + "arch": "x86_64", + "pm_user": "{{ installer.user.name }}" + }{% if not loop.last %},{% endif %} + {% endif %} +{% endfor %} + ] +} diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml index 67e9bf804..16d28fc90 100644 --- a/playbooks/installer/ospd/post.yml +++ b/playbooks/installer/ospd/post.yml @@ -2,4 +2,18 @@ # Any step that should happen after the deployment of the osp-d playbook # This could be create ssh forwarding to the nodes, validation of installation, etc - name: Post tasks - hosts: localhost + hosts: undercloud + become: yes + become_user: "{{ installer.user.name }}" + tasks: + - name: Copy the keystonerc file for the tester + fetch: + src: "~/overcloudrc" + dest: "{{ inventory_dir }}/keystonerc" + flat: yes +# +#- name: Post tasks +# hosts: undercloud +# tasks: +# - name: Copy the tempest-input-file to the root home dir +# shell: "cp /home/{{ installer.user.name }}/tempest-deployer-input.conf ~/tempest-deployer-input.conf" diff --git a/playbooks/installer/ospd/undercloud/pre.yml b/playbooks/installer/ospd/undercloud/pre.yml index 9e935b869..67e95bd72 100644 --- a/playbooks/installer/ospd/undercloud/pre.yml +++ b/playbooks/installer/ospd/undercloud/pre.yml @@ -18,9 +18,15 @@ - name: install the rhos-release RPM shell: "yum localinstall -y {{ product.rpm }}" + - debug: + msg: "rhos-release {{ product.full_version }} -p {{ installer.build }}" + - name: create necessary repos with for director using rhos-release command: "rhos-release {{ product.full_version }} -p {{ installer.build }}" + - debug: + msg: "rhos-release {{ product.version.major }} -p {{ product.build }}" + - name: create necessary repos for core using rhos-release command: "rhos-release {{ product.version.major }} -p {{ product.build }}" diff --git a/playbooks/installer/ospd/undercloud/run.yml b/playbooks/installer/ospd/undercloud/run.yml index 7d954e5fc..365d54519 100644 --- a/playbooks/installer/ospd/undercloud/run.yml +++ b/playbooks/installer/ospd/undercloud/run.yml @@ -26,4 +26,4 @@ dest: "/home/{{ installer.user.name }}/undercloud.conf" - name: install the undercloud (should take ~30 minutes :) ) - shell: "openstack undercloud install" + shell: "openstack undercloud install --debug &> undercloud_install.log" diff --git a/playbooks/provisioner/virsh/main.yml b/playbooks/provisioner/virsh/main.yml index cf9e85813..86cbe4259 100644 --- a/playbooks/provisioner/virsh/main.yml +++ b/playbooks/provisioner/virsh/main.yml @@ -159,71 +159,44 @@ dest: "/var/lib/libvirt/images/{{ provisioner.image.name }}-original" url: "{{ provisioner.image.base_url }}/{{ provisioner.image.name }}" - - name: Create a new base image - shell: "qemu-img create -f qcow2 {{ provisioner.image.name }} {{ provisioner.image.disk.size }}" - args: - chdir: "/var/lib/libvirt/images/" - - - name: Resize base image volume to defined size - shell: "virt-resize --expand /dev/sda1 {{ provisioner.image.name }}-original {{ provisioner.image.name }}" - args: - chdir: "/var/lib/libvirt/images/" - - - name: Create the topology base images - shell: "qemu-img create -f qcow2 -b {{ provisioner.image.name }} base.qcow2" - args: - chdir: "/var/lib/libvirt/images/" - - name: Remove cloud-init from the base image - shell: "virt-customize -a base.qcow2 --run-command 'yum remove cloud-init* -y'" + shell: "virt-customize -a {{ provisioner.image.name }}-original --run-command 'yum remove cloud-init* -y'" args: chdir: "/var/lib/libvirt/images/" - name: Reset the password to a default one - shell: "virt-customize -a base.qcow2 --root-password password:redhat" + shell: "virt-customize -a {{ provisioner.image.name }}-original --root-password password:redhat" args: chdir: "/var/lib/libvirt/images/" - name: Configure two network interfaces for the image - shell: "virt-customize -a base.qcow2 --run-command 'cp /etc/sysconfig/network-scripts/ifcfg-eth{0,1} && sed -i s/DEVICE=.*/DEVICE=eth1/g /etc/sysconfig/network-scripts/ifcfg-eth1'" + shell: "virt-customize -a {{ provisioner.image.name }}-original --run-command 'cp /etc/sysconfig/network-scripts/ifcfg-eth{0,1} && sed -i s/DEVICE=.*/DEVICE=eth1/g /etc/sysconfig/network-scripts/ifcfg-eth1'" args: chdir: "/var/lib/libvirt/images/" - - name: Create each node image - shell: "cp base.qcow2 {{ item.key }}.qcow2" - args: - chdir: "/var/lib/libvirt/images/" - with_dict: provisioner.nodes + - name: Create image templates for nodes + template: + dest: "~/create_images.sh" + src: "templates/create_images.sh.j2" + mode: 0755 - # TODO: make this a module to enable dynamic creation of networks - - name: Create the VMs and attach to the relevant networks - shell: | - virt-install --name {{ item.key }} \ - --virt-type kvm \ - --cpu host \ - --ram {{ item.value.memory }} \ - --vcpus {{ item.value.cpu }} \ - --os-variant {{ item.value.os.variant }} \ - --disk path=/var/lib/libvirt/images/{{ item.key }}.qcow2,device=disk,bus=virtio,format=qcow2 \ - --import \ - --noautoconsole \ - --vnc \ - --network network:{{ provisioner.network.network_list.data.name }} \ - --network network:{{ provisioner.network.network_list.management.name }} \ - --network network:{{ provisioner.network.network_list.external.name }} - with_dict: provisioner.nodes + - name: The create images script + shell: "cat ~/create_images.sh" - - name: Pausing to let the VMs start and get an IP address - pause: - seconds: 60 + - name: Execute the create images script + shell: "bash ~/create_images.sh" - - name: Get IP list - shell: "echo $(grep $(virsh domiflist {{ item.key }} | awk '/default/ {print $5};') -r /var/log/messages | grep -m1 DHCPOFFER | awk '{print $7};')" - with_dict: provisioner.nodes - register: ip_list + - name: Create virt install templates for nodes + template: + dest: "~/create_vms.sh" + src: "templates/create_vms.sh.j2" + mode: 0755 - - set_fact: - vm_ip_list: "{{ ip_list.results }}" + - name: The create vms script + shell: "cat ~/create_vms.sh" + + - name: Execute the create vms script + shell: "bash ~/create_vms.sh" - name: Get MAC list shell: "virsh domiflist {{ item.key }} | awk '/default/ {print $5};'" @@ -233,6 +206,17 @@ - set_fact: vm_mac_list: "{{ mac_list.results }}" + - name: Wait until one of the VMs gets an IP + shell: "virsh net-dhcp-leases {{ provisioner.network.network_list.management.name }} | awk /{{ item.stdout }}/'{print $5}' | cut -d'/' -f1" + register: ip_list + until: ip_list.stdout.find("192.168.122") > -1 + retries: 10 + delay: 20 + with_items: vm_mac_list + + - set_fact: + vm_ip_list: "{{ ip_list.results }}" + - name: Add hosts to host list add_host: name="{{ item.key }}" @@ -271,8 +255,11 @@ - name: SSH copy ID to all VMs from virthost shell: "sshpass -p 'redhat' ssh-copy-id root@{{ item.stdout }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" - with_items: - - "{{ vm_ip_list }}" + register: shell_result + until: shell_result.stderr.find("ERROR") == -1 + retries: 20 + delay: 10 + with_items: vm_ip_list - name: Update ansible with the new hosts hosts: localhost diff --git a/playbooks/provisioner/virsh/templates/create_images.sh.j2 b/playbooks/provisioner/virsh/templates/create_images.sh.j2 new file mode 100644 index 000000000..ad5f23073 --- /dev/null +++ b/playbooks/provisioner/virsh/templates/create_images.sh.j2 @@ -0,0 +1,8 @@ +#!/bin/bash + +{% for node_name, node_values in provisioner.nodes.iteritems() %} +{% for disk_name, disk_values in node_values.disks.iteritems() %} +qemu-img create -f qcow2 {{ disk_values.path }}/{{ node_name }}.{{ disk_name }}.qcow2 {{ disk_values.size }} +{% endfor %} +virt-resize --expand /dev/sda1 /var/lib/libvirt/images/{{ provisioner.image.name }}-original {{ node_values.disks.disk1.path }}/{{ node_name }}.disk1.qcow2 +{% endfor %} diff --git a/playbooks/provisioner/virsh/templates/create_vms.sh.j2 b/playbooks/provisioner/virsh/templates/create_vms.sh.j2 new file mode 100644 index 000000000..647094f6e --- /dev/null +++ b/playbooks/provisioner/virsh/templates/create_vms.sh.j2 @@ -0,0 +1,18 @@ +#!/bin/bash +{% for node_name, node_values in provisioner.nodes.iteritems() %} +virt-install --name {{ node_name }} \ +{% for disk_name, disk_values in node_values.disks.iteritems() %} + --disk path={{ disk_values.path }}/{{ node_name }}.{{ disk_name }}.qcow2,device=disk,bus=virtio,format=qcow2 \ +{% endfor %} + --network network:{{ provisioner.network.network_list.data.name }} \ + --network network:{{ provisioner.network.network_list.management.name }} \ + --network network:{{ provisioner.network.network_list.external.name }} \ + --virt-type kvm \ + --cpu host \ + --ram {{ node_values.memory }} \ + --vcpus {{ node_values.cpu }} \ + --os-variant {{ node_values.os.variant }} \ + --import \ + --noautoconsole \ + --vnc +{% endfor %} diff --git a/roles/ospd/overcloud/images/build/tasks/main.yml b/roles/ospd/overcloud/images/build/tasks/main.yml new file mode 100644 index 000000000..0ac815576 --- /dev/null +++ b/roles/ospd/overcloud/images/build/tasks/main.yml @@ -0,0 +1,2 @@ +--- +# TODO: Add images build code here diff --git a/roles/ospd/overcloud/images/import/tasks/main.yml b/roles/ospd/overcloud/images/import/tasks/main.yml new file mode 100644 index 000000000..39474843e --- /dev/null +++ b/roles/ospd/overcloud/images/import/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: ensure wget is installed + yum: + name: wget + state: latest + +- name: download the pre-built overcloud images + shell: > + wget --quiet -c -O ~/{{ item.value }} + "{{ installer.images.base_url }}/{{ item.value }}" + with_dict: "{{ installer.images.overcloud.files }}" + +- name: untar the images + shell: "tar -xvf ~/{{ item.value }}" + with_dict: "{{ installer.images.overcloud.files }}" diff --git a/settings/installer/ospd.yml b/settings/installer/ospd.yml index 4d60e5acd..06dc2e7c4 100644 --- a/settings/installer/ospd.yml +++ b/settings/installer/ospd.yml @@ -19,6 +19,54 @@ installer: network_cidr: 172.16.0.0/24 network_gateway: 172.16.0.1 discovery_iprange: 172.16.0.150,172.16.0.180 + overcloud: + template_base: "~/my_templates" + storage: + backend: ceph + template: + file: "puppet/hieradata/ceph.yaml" + content: + ceph::profile::params::osd_journal_size: 1024 + ceph::profile::params::osd_pool_default_pg_num: 128 + ceph::profile::params::osd_pool_default_pgp_num: 128 + ceph::profile::params::osd_pool_default_size: 3 + ceph::profile::params::osd_pool_default_min_size: 1 + ceph::profile::params::osds: + '/dev/vdb': + journal: '' + ceph::profile::params::manage_repo: false + ceph::profile::params::authentication_type: cephx + + ceph_pools: + - volumes + - vms + - images + + ceph_osd_selinux_permissive: true + network: + backend: vxlan + template: + file: "advance_network.yml" + content: + InternalApiNetCidr: 172.17.1.0/24 + InternalApiAllocationPools: [{'start': '172.17.1.10', 'end': '172.17.1.200'}] + InternalApiNetworkVlanID: 101 + TenantNetCidr: 172.17.2.0/24 + TenantAllocationPools: [{'start': '172.17.2.10', 'end': '172.17.2.200'}] + TenantNetworkVlanID: 201 + StorageNetCidr: 172.17.3.0/24 + StorageAllocationPools: [{'start': '172.17.3.10', 'end': '172.17.3.200'}] + StorageNetworkVlanID: 301 + StorageMgmtNetCidr: 172.17.4.0/24 + StorageMgmtAllocationPools: [{'start': '172.17.4.10', 'end': '172.17.4.200'}] + StorageMgmtNetworkVlanID: 401 + ExternalNetCidr: 192.168.122.0/24 + ExternalAllocationPools: [{'start': '192.168.122.100', 'end': '192.168.122.129'}] + ExternalInterfaceDefaultRoute: 192.168.122.1 + ControlPlaneSubnetCidr: "24" + ControlPlaneDefaultRoute: 172.16.0.1 + EC2MetadataIp: 172.16.0.1 + DnsServers: ['192.168.122.1', '8.8.8.8'] defaults: build: latest diff --git a/settings/installer/ospd/network/neutron/variant/gre.yml b/settings/installer/ospd/network/neutron/variant/gre.yml index 185fe6bf5..146eace96 100644 --- a/settings/installer/ospd/network/neutron/variant/gre.yml +++ b/settings/installer/ospd/network/neutron/variant/gre.yml @@ -1,5 +1,6 @@ --- installer: - network: - variant: gre + overcloud: + network: + backend: gre diff --git a/settings/installer/ospd/network/neutron/variant/vxlan.yml b/settings/installer/ospd/network/neutron/variant/vxlan.yml index d3eded44c..46bb18b76 100644 --- a/settings/installer/ospd/network/neutron/variant/vxlan.yml +++ b/settings/installer/ospd/network/neutron/variant/vxlan.yml @@ -1,5 +1,6 @@ --- installer: - network: - variant: vxlan + overcloud: + network: + backend: vxlan diff --git a/settings/provisioner/virsh.yml b/settings/provisioner/virsh.yml index 2d470abb8..403e7ae39 100644 --- a/settings/provisioner/virsh.yml +++ b/settings/provisioner/virsh.yml @@ -14,12 +14,12 @@ provisioner: network: network_list: - management: - name: "default" data: name: "provisioning" ip_address: "172.16.0.254" netmask: "255.255.255.0" + management: + name: "default" external: name: "external" ip_address: "192.168.0.254" diff --git a/settings/provisioner/virsh/topology/ospd_minimal.yml b/settings/provisioner/virsh/topology/ospd_minimal.yml index 7d7489b3e..6b5f747f1 100644 --- a/settings/provisioner/virsh/topology/ospd_minimal.yml +++ b/settings/provisioner/virsh/topology/ospd_minimal.yml @@ -8,9 +8,10 @@ provisioner: os: &os type: linux variant: !lookup provisioner.image.os.variant - disk: &disk - size: !lookup provisioner.image.disk.size - path: /var/lib/libvirt/images + disks: &disks + disk1: &disk1 + path: /var/lib/libvirt/images + size: 30G network: &network_params interfaces: &interfaces management: &mgmt_interface @@ -28,10 +29,33 @@ provisioner: name: compute cpu: 2 memory: 6144 + disks: + disk1: + path: /var/lib/libvirt/images + size: 20G groups: - compute - openstack_nodes + ceph: + <<: *controller + name: ceph + cpu: 2 + memory: 4096 + disks: + <<: *disks + disk1: + path: /var/lib/libvirt/images + dev: /dev/sda1 + size: 20G + disk2: + path: /var/lib/libvirt/images + dev: /dev/sda2 + size: 20G + groups: + - ceph + - openstack_nodes + undercloud: <<: *controller name: undercloud From a33292de4f94261144dd1465cf0b137adc06fd66 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Sun, 24 Jan 2016 11:45:19 +0200 Subject: [PATCH 086/137] Add the missing --libvirt-type=qemu flag Change-Id: Ieabe0515c92cdc00098ee0895b2bf46df8225d91 --- playbooks/installer/ospd/overcloud/run.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml index 427668542..d6af767d8 100644 --- a/playbooks/installer/ospd/overcloud/run.yml +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -11,6 +11,7 @@ source ~/stackrc; openstack overcloud deploy --debug \ --log-file overcloud_deployment_{{ 100 | random }}.log \ --templates \ + --libvirt-type qemu \ --neutron-network-type {{ installer.overcloud.network.backend }} \ --neutron-tunnel-types {{ installer.overcloud.network.backend }} \ --control-scale {{ groups['controller']|length }} \ From 6db85b9aa80fac8160bcf11056d5c902caf968c6 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Sun, 24 Jan 2016 19:36:08 +0200 Subject: [PATCH 087/137] Tweaking the virsh provisioner Change-Id: I434f4a615db040f6154d650d65b3b28644411f9e --- playbooks/installer/ospd/overcloud/run.yml | 8 +- playbooks/provisioner/virsh/main.yml | 100 ++++++++++-------- .../provisioner/virsh/network/network.xml.j2 | 12 ++- settings/provisioner/virsh.yml | 32 ++++-- 4 files changed, 95 insertions(+), 57 deletions(-) diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml index d6af767d8..d23b7fce7 100644 --- a/playbooks/installer/ospd/overcloud/run.yml +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -14,9 +14,7 @@ --libvirt-type qemu \ --neutron-network-type {{ installer.overcloud.network.backend }} \ --neutron-tunnel-types {{ installer.overcloud.network.backend }} \ - --control-scale {{ groups['controller']|length }} \ + --control-scale {{ groups['controller']| length | default(0) }} \ --control-flavor controller \ - --compute-scale {{ groups['compute']|length }} \ - --compute-flavor compute \ - --compute-scale {{ groups['ceph']|length }} \ - --compute-flavor ceph + --compute-scale {{ groups['compute']| length | default(0) }} \ + --compute-flavor compute diff --git a/playbooks/provisioner/virsh/main.yml b/playbooks/provisioner/virsh/main.yml index 86cbe4259..e8418305c 100644 --- a/playbooks/provisioner/virsh/main.yml +++ b/playbooks/provisioner/virsh/main.yml @@ -3,7 +3,7 @@ hosts: localhost gather_facts: no tasks: - - name: Add hosts to host list + - name: add hosts to host list add_host: name="{{ item.value.name }}" groups="{{ item.value.groups| join(',') }}" @@ -35,22 +35,22 @@ gather_facts: no sudo: yes tasks: - - name: Check if CPU supports INTEL based KVM + - name: check if CPU supports INTEL based KVM shell: egrep -c 'vmx' /proc/cpuinfo ignore_errors: true register: kvm_intel - - name: Set fact for Intel based KVM + - name: set fact for Intel based KVM set_fact: kvm_base: "intel" when: kvm_intel == 0 - - name: Check if CPU supports AMD based KVM + - name: check if CPU supports AMD based KVM shell: egrep -c 'svm' /proc/cpuinfo ignore_errors: true register: kvm_amd - - name: Set fact for AMD based KVM + - name: set fact for AMD based KVM set_fact: kvm_base: "amd" when: kvm_amd == 0 @@ -60,7 +60,7 @@ gather_facts: no sudo: yes tasks: - - name: Enable nested KVM support for Intel + - name: enable nested KVM support for Intel lineinfile: dest: "/etc/modprobe.d/dist.conf" line: "options kvm_{{ kvm_base }} nested=1" @@ -68,7 +68,7 @@ create: yes when: kvm_base is defined - - name: Enable nested KVM support for AMD + - name: enable nested KVM support for AMD lineinfile: dest: "/etc/modprobe.d/dist.conf" line: "options {{ kvm_base }} nested=1" @@ -129,12 +129,12 @@ gather_facts: no sudo: yes tasks: - - name: Check for existing networks + - name: check for existing networks virt_net: command: list_nets register: network_list - - name: Create the networks for the topology + - name: create the networks for the topology virt_net: command: define name: "{{ item.value.name }}" @@ -142,82 +142,92 @@ when: "item.value.name not in network_list.list_nets" with_dict: provisioner.network.network_list - - name: Check if network is active + - name: check if network is active virt_net: state: active name: "{{ item.value.name }}" with_dict: provisioner.network.network_list - - name: Make network persistent + - name: make network persistent virt_net: autostart: "yes" name: "{{ item.value.name }}" with_dict: provisioner.network.network_list - - name: Download base guest image + - name: download base guest image get_url: dest: "/var/lib/libvirt/images/{{ provisioner.image.name }}-original" url: "{{ provisioner.image.base_url }}/{{ provisioner.image.name }}" - - name: Remove cloud-init from the base image + - name: remove cloud-init from the base image shell: "virt-customize -a {{ provisioner.image.name }}-original --run-command 'yum remove cloud-init* -y'" args: chdir: "/var/lib/libvirt/images/" - - name: Reset the password to a default one + - name: reset the password to a default one shell: "virt-customize -a {{ provisioner.image.name }}-original --root-password password:redhat" args: chdir: "/var/lib/libvirt/images/" - - name: Configure two network interfaces for the image + #TODO: configure interfaces based on config rather than hardcode + - name: configure three network interfaces for the image shell: "virt-customize -a {{ provisioner.image.name }}-original --run-command 'cp /etc/sysconfig/network-scripts/ifcfg-eth{0,1} && sed -i s/DEVICE=.*/DEVICE=eth1/g /etc/sysconfig/network-scripts/ifcfg-eth1'" args: chdir: "/var/lib/libvirt/images/" - - name: Create image templates for nodes + #TODO: configure interfaces based on config rather than hardcode + - name: configure three network interfaces for the image + shell: "virt-customize -a {{ provisioner.image.name }}-original --run-command 'cp /etc/sysconfig/network-scripts/ifcfg-eth{1,2} && sed -i s/DEVICE=.*/DEVICE=eth2/g /etc/sysconfig/network-scripts/ifcfg-eth2'" + args: + chdir: "/var/lib/libvirt/images/" + + - name: create image templates for nodes template: dest: "~/create_images.sh" src: "templates/create_images.sh.j2" mode: 0755 - - name: The create images script + - name: the create images script shell: "cat ~/create_images.sh" - name: Execute the create images script shell: "bash ~/create_images.sh" - - name: Create virt install templates for nodes + - name: create virt install templates for nodes template: dest: "~/create_vms.sh" src: "templates/create_vms.sh.j2" mode: 0755 - - name: The create vms script + - name: the create vms script shell: "cat ~/create_vms.sh" - - name: Execute the create vms script + - name: execute the create vms script shell: "bash ~/create_vms.sh" - - name: Get MAC list - shell: "virsh domiflist {{ item.key }} | awk '/default/ {print $5};'" - with_dict: provisioner.nodes + - name: get MAC list + shell: "virsh domiflist {{ item[0] }} | awk '/{{ item[1] }}/ {print $5};'" + with_nested: + - provisioner.nodes + - provisioner.network.network_list register: mac_list - set_fact: vm_mac_list: "{{ mac_list.results }}" - - name: Wait until one of the VMs gets an IP - shell: "virsh net-dhcp-leases {{ provisioner.network.network_list.management.name }} | awk /{{ item.stdout }}/'{print $5}' | cut -d'/' -f1" + - name: wait until one of the VMs gets an IP + shell: "virsh net-dhcp-leases {{ provisioner.network.network_list['%s' % item.item[1]].name }} | awk /{{ item.stdout }}/'{print $5}' | cut -d'/' -f1" + when: item.stdout != "" register: ip_list - until: ip_list.stdout.find("192.168.122") > -1 - retries: 10 - delay: 20 + until: ip_list.stdout.find("{{ provisioner.network.network_list['%s' % item.item[1]].ip_address | truncate(7, True, '') }}") > -1 + retries: 40 + delay: 5 with_items: vm_mac_list - set_fact: vm_ip_list: "{{ ip_list.results }}" - - name: Add hosts to host list + - name: add hosts to host list add_host: name="{{ item.key }}" groups="{{ item.value.groups| join(',') }}" @@ -226,28 +236,27 @@ ansible_ssh_password="redhat" with_dict: provisioner.nodes - - name: Update IPs of hosts + - name: update IPs of hosts add_host: - name="{{ item.0 }}" - ansible_ssh_host="{{ item.1.stdout }}" - with_together: - - provisioner.nodes.keys() - - vm_ip_list + name="{{ item.item.item[0] }}" + ansible_ssh_host="{{ item.item.stdout }}" + when: item.item is defined + with_items: vm_ip_list - - name: Make IPs persistent - shell: "virsh net-update default add ip-dhcp-host \"\" --live --config" - with_together: - - provisioner.nodes.keys() + - name: make IPs persistent + shell: "virsh net-update {{ item[0] }} add ip-dhcp-host \"\" --live --config" + when: item[1].item is defined and item[1].item.item[0] == item[0] + with_nested: + - provisioner.network.network_list - vm_ip_list - - vm_mac_list - - name: Generating RSA key for root + - name: generating RSA key for root user: name: root generate_ssh_key: yes delegate_to: virthost - - name: Copy created key from virthost for SSH proxy + - name: copy created key from virthost for SSH proxy fetch: src: ~/.ssh/id_rsa dest: "{{ inventory_dir }}/id_rsa" @@ -259,25 +268,26 @@ until: shell_result.stderr.find("ERROR") == -1 retries: 20 delay: 10 + when: item.item is defined with_items: vm_ip_list - name: Update ansible with the new hosts hosts: localhost tasks: - - name: Setup ssh config + - name: setup ssh config template: src: "templates/ssh.config.ansible.j2" dest: "{{ inventory_dir }}/ansible.ssh.config" mode: 0755 # This step is necessary in order to allow the SSH forwarding - - name: Update the ssh host name of each machine + - name: update the ssh host name of each machine add_host: name="{{ item.key }}" ansible_ssh_host="{{ item.key }}" with_dict: provisioner.nodes - - name: Update ansible with the new SSH settings + - name: update ansible with the new SSH settings ini_file: dest: "{{ inventory_dir }}/ansible.cfg" section: ssh_connection diff --git a/playbooks/provisioner/virsh/network/network.xml.j2 b/playbooks/provisioner/virsh/network/network.xml.j2 index 559363b3d..254a7d436 100644 --- a/playbooks/provisioner/virsh/network/network.xml.j2 +++ b/playbooks/provisioner/virsh/network/network.xml.j2 @@ -1,4 +1,14 @@ {{ item.value.name }} - + {% if item.value.forward is defined %} + + + {% endif %} + + {% if item.value.dhcp is defined %} + + + + {% endif %} + diff --git a/settings/provisioner/virsh.yml b/settings/provisioner/virsh.yml index 403e7ae39..ffb27b7bb 100644 --- a/settings/provisioner/virsh.yml +++ b/settings/provisioner/virsh.yml @@ -19,15 +19,35 @@ provisioner: ip_address: "172.16.0.254" netmask: "255.255.255.0" management: - name: "default" + name: "management" + ip_address: "10.0.0.1" + netmask: "255.255.255.0" + forward: + type: "nat" + dhcp: + range: + start: "10.0.0.2" + end: "10.0.0.100" + subnet_cidr: "10.0.0.0/24" + subnet_gateway: "10.0.0.1" + floating_ip: + start: "10.0.0.101" + end: "10.0.0.150" external: name: "external" - ip_address: "192.168.0.254" + ip_address: "192.168.1.1" netmask: "255.255.255.0" - subnet_cidr: "192.168.0.0/24" - subnet_gateway: "192.168.0.1" - allocation_start: "192.168.0.200" - allocation_end: "192.168.0.220" + forward: + type: "nat" + dhcp: + range: + start: "192.168.1.2" + end: "192.168.1.100" + subnet_cidr: "192.168.1.0/24" + subnet_gateway: "192.168.1.1" + floating_ip: + start: "192.168.1.101" + end: "192.168.1.150" packages: - libvirt From e1f8311ff0ac43400cf83189a006487c5ce75bfd Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Sun, 24 Jan 2016 22:11:33 +0200 Subject: [PATCH 088/137] Updating the overcloud according to the new virsh provisioner Change-Id: Ife6b981aea243c7601116d476575374002111ac4 --- playbooks/installer/ospd/overcloud/pre.yml | 8 +++---- playbooks/installer/ospd/overcloud/run.yml | 24 +++++++++---------- .../templates/overcloud_deploy.sh.j2 | 21 ++++++++++++++++ .../templates/virthost_instack.json.j2 | 4 ++-- playbooks/provisioner/virsh/main.yml | 6 ++--- 5 files changed, 41 insertions(+), 22 deletions(-) create mode 100644 playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 diff --git a/playbooks/installer/ospd/overcloud/pre.yml b/playbooks/installer/ospd/overcloud/pre.yml index 71c3a2dcf..991fd987b 100644 --- a/playbooks/installer/ospd/overcloud/pre.yml +++ b/playbooks/installer/ospd/overcloud/pre.yml @@ -18,13 +18,13 @@ hosts: virthost gather_facts: no tasks: - - name: Create stack user on virthost + - name: create stack user on virthost user: name: "{{ installer.user.name }}" state: present password: "{{ installer.user.password | password_hash('sha512') }}" - - name: Set permissions for the user to access the hypervisor + - name: set permissions for the user to access the hypervisor copy: content: | [libvirt Management Access] @@ -35,7 +35,7 @@ ResultActive=yes dest: "/etc/polkit-1/localauthority/50-local.d/50-libvirt-user-{{ installer.user.name }}.pkla" - - name: Make sure sshpass installed + - name: make sure sshpass installed yum: name: sshpass state: latest @@ -44,7 +44,7 @@ - name: SSH copy ID to the hypervisor become: yes become_user: "{{ installer.user.name }}" - shell: "sshpass -p '{{ installer.user.password }}' ssh-copy-id {{ installer.user.name }}@192.168.122.1 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + shell: "sshpass -p '{{ installer.user.password }}' ssh-copy-id {{ installer.user.name }}@{{ provisioner.network.network_list.management.ip_address }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" delegate_to: undercloud - name: prepare instack.json diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml index d23b7fce7..31d2432b0 100644 --- a/playbooks/installer/ospd/overcloud/run.yml +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -5,16 +5,14 @@ become_user: "{{ installer.user.name }}" gather_facts: yes tasks: - # TODO: Add the complete deployment using swift/cinder/scale etc - - name: Deploy the overcloud - shell: | - source ~/stackrc; openstack overcloud deploy --debug \ - --log-file overcloud_deployment_{{ 100 | random }}.log \ - --templates \ - --libvirt-type qemu \ - --neutron-network-type {{ installer.overcloud.network.backend }} \ - --neutron-tunnel-types {{ installer.overcloud.network.backend }} \ - --control-scale {{ groups['controller']| length | default(0) }} \ - --control-flavor controller \ - --compute-scale {{ groups['compute']| length | default(0) }} \ - --compute-flavor compute + - name: create the overcloud deploy script + template: + dest: "~/overcloud_deploy.sh" + src: "templates/overcloud_deploy.sh.j2" + mode: 0755 + + - name: the create overcloud script + shell: "cat ~/overcloud_deploy.sh" + + - name: Execute the overcloud deploy script (should take ~30 minutes :) ) + shell: "bash ~/overcloud_deploy.sh" diff --git a/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 b/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 new file mode 100644 index 000000000..48817b687 --- /dev/null +++ b/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 @@ -0,0 +1,21 @@ +#!/bin/bash + +source ~/stackrc; +openstack overcloud deploy --debug \ + --templates \ + --libvirt-type qemu \ + --neutron-network-type {{ installer.overcloud.network.backend }} \ + --neutron-tunnel-types {{ installer.overcloud.network.backend }} \ +{% if groups['controller'] is defined %} + --control-scale {{ groups['controller']| length }} \ + --control-flavor controller \ +{% endif %} +{% if groups['compute'] is defined %} + --compute-scale {{ groups['compute']| length }} \ + --compute-flavor compute \ +{% endif %} +{% if groups['ceph'] is defined %} + --ceph-storage-scale {{ groups['ceph']| length }} \ + --ceph-storage-flavor ceph \ +{% endif %} + --log-file overcloud_deployment_{{ 100 | random }}.log diff --git a/playbooks/installer/ospd/overcloud/templates/virthost_instack.json.j2 b/playbooks/installer/ospd/overcloud/templates/virthost_instack.json.j2 index 365a500de..aba8aa006 100644 --- a/playbooks/installer/ospd/overcloud/templates/virthost_instack.json.j2 +++ b/playbooks/installer/ospd/overcloud/templates/virthost_instack.json.j2 @@ -2,14 +2,14 @@ "ssh-user": "{{ installer.user.name }}", "ssh-key": "$(cat ~/.ssh/id_rsa)", "power_manager": "nova.virt.baremetal.virtual_power_driver.VirtualPowerManager", - "host-ip": "192.168.122.1", + "host-ip": "{{ provisioner.network.network_list.management.ip_address }}", "arch": "x86_64", "nodes": [ {% for host_name in groups['openstack_nodes'] %} {% if host_name != 'undercloud' %} { "name": "{{ hostvars[host_name].inventory_hostname }}", - "pm_addr": "192.168.122.1", + "pm_addr": "{{ provisioner.network.network_list.management.ip_address }}", "pm_password": "$(cat ~/.ssh/id_rsa)", "pm_type": "pxe_ssh", "mac": ["{{ hostvars[host_name]['ansible_%s' % installer.undercloud.config.local_interface].macaddress }}"], diff --git a/playbooks/provisioner/virsh/main.yml b/playbooks/provisioner/virsh/main.yml index e8418305c..7f256fa4a 100644 --- a/playbooks/provisioner/virsh/main.yml +++ b/playbooks/provisioner/virsh/main.yml @@ -239,13 +239,13 @@ - name: update IPs of hosts add_host: name="{{ item.item.item[0] }}" - ansible_ssh_host="{{ item.item.stdout }}" - when: item.item is defined + ansible_ssh_host="{{ item.stdout }}" + when: item.item is defined and item.item.item[1] == "management" with_items: vm_ip_list - name: make IPs persistent shell: "virsh net-update {{ item[0] }} add ip-dhcp-host \"\" --live --config" - when: item[1].item is defined and item[1].item.item[0] == item[0] + when: item[1].item is defined and item[1].item.item[1] == item[0] with_nested: - provisioner.network.network_list - vm_ip_list From d04e7c1484de9affba5b63c271d00fe99237a584 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Mon, 25 Jan 2016 07:00:55 +0200 Subject: [PATCH 089/137] Adding external network Temporary add of the external network until the templating system will be in place Change-Id: I6631ac3607e14320a77af4a814652b0f40fcf40e --- playbooks/installer/ospd/post.yml | 35 +++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml index 16d28fc90..138252639 100644 --- a/playbooks/installer/ospd/post.yml +++ b/playbooks/installer/ospd/post.yml @@ -11,9 +11,32 @@ src: "~/overcloudrc" dest: "{{ inventory_dir }}/keystonerc" flat: yes -# -#- name: Post tasks -# hosts: undercloud -# tasks: -# - name: Copy the tempest-input-file to the root home dir -# shell: "cp /home/{{ installer.user.name }}/tempest-deployer-input.conf ~/tempest-deployer-input.conf" + +#TODO: remove this when the templating system is in place +- name: External network creation + hosts: undercloud + become: yes + become_user: "{{ installer.user.name }}" + tasks: + - name: create the external network + shell: "source ~/overcloudrc; neutron net-create management --router:external" + + - name: create the external subnet + shell: "source ~/overcloudrc; neutron subnet-create management 172.16.0.0/24 --name management_subnet --enable-dhcp=False --allocation-pool start=172.16.0.210,end=172.16.0.230 --dns-nameserver {{ provisioner.network.network_list.management.ip_address }}" + + - name: create the internal network + shell: "source ~/overcloudrc; neutron net-create internal" + + - name: create the internal subnet + shell: "source ~/overcloudrc; neutron subnet-create internal 192.168.0.0/24 --name internal_subnet" + + - name: create the internal router + shell: "source ~/overcloudrc; neutron router-create internal_router" + + - name: set the router gateway to our management network + shell: "source ~/overcloudrc; neutron router-gateway-set internal_router management" + + - name: connect the router to the internal subnet + shell: "source ~/overcloudrc; neutron router-interface-add internal_router internal_subnet" + + From d2980f245540b2a63bf383bd8279917cbdd578ad Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Mon, 25 Jan 2016 07:47:14 +0200 Subject: [PATCH 090/137] Fix key file permissions Change-Id: I18da16218c8fd339aca4baa1df3c4e6c76295a29 --- playbooks/provisioner/virsh/main.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/playbooks/provisioner/virsh/main.yml b/playbooks/provisioner/virsh/main.yml index 7f256fa4a..96da8d846 100644 --- a/playbooks/provisioner/virsh/main.yml +++ b/playbooks/provisioner/virsh/main.yml @@ -262,6 +262,11 @@ dest: "{{ inventory_dir }}/id_rsa" flat: yes + - name: update file permissions + file: + path: "{{ inventory_dir }}/id_rsa" + mode: 0600 + - name: SSH copy ID to all VMs from virthost shell: "sshpass -p 'redhat' ssh-copy-id root@{{ item.stdout }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" register: shell_result From eeac72a719a41a189ca25c56d324366bf59f1240 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Mon, 25 Jan 2016 09:26:56 +0200 Subject: [PATCH 091/137] Adding ceph support Change-Id: Ie7147f9b8052b10746dcc4283295d0086f6f25a2 --- playbooks/installer/ospd/overcloud/run.yml | 23 +++++++++++++++---- .../templates/overcloud_deploy.sh.j2 | 1 + .../overcloud/templates/storage/ceph.yml.j2 | 17 ++++++++++++++ playbooks/installer/ospd/post.yml | 2 -- playbooks/provisioner/virsh/main.yml | 12 +++++----- 5 files changed, 43 insertions(+), 12 deletions(-) create mode 100644 playbooks/installer/ospd/overcloud/templates/storage/ceph.yml.j2 diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml index 31d2432b0..578b23379 100644 --- a/playbooks/installer/ospd/overcloud/run.yml +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -1,18 +1,33 @@ --- -- name: Prepare templates and deploy the overcloud +- name: Prepare templates hosts: undercloud become: yes become_user: "{{ installer.user.name }}" - gather_facts: yes + gather_facts: no + tasks: + - name: copy template folder + shell: "cp -rf /usr/share/openstack-tripleo-heat-templates ~/my_templates/" + + - name: prepare storage template + template: + src: "templates/storage/ceph.yml.j2" + dest: "~/my_templates/puppet/hieradata/ceph.yaml" + mode: 0755 + +- name: Install the overcloud + hosts: undercloud + become: yes + become_user: "{{ installer.user.name }}" + gather_facts: no tasks: - name: create the overcloud deploy script template: - dest: "~/overcloud_deploy.sh" src: "templates/overcloud_deploy.sh.j2" + dest: "~/overcloud_deploy.sh" mode: 0755 - name: the create overcloud script shell: "cat ~/overcloud_deploy.sh" - - name: Execute the overcloud deploy script (should take ~30 minutes :) ) + - name: execute the overcloud deploy script (should take ~30 minutes :) ) shell: "bash ~/overcloud_deploy.sh" diff --git a/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 b/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 index 48817b687..5fdd28c7d 100644 --- a/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 +++ b/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 @@ -17,5 +17,6 @@ openstack overcloud deploy --debug \ {% if groups['ceph'] is defined %} --ceph-storage-scale {{ groups['ceph']| length }} \ --ceph-storage-flavor ceph \ + -e ~/my_templates/environments/storage-environment.yaml \ {% endif %} --log-file overcloud_deployment_{{ 100 | random }}.log diff --git a/playbooks/installer/ospd/overcloud/templates/storage/ceph.yml.j2 b/playbooks/installer/ospd/overcloud/templates/storage/ceph.yml.j2 new file mode 100644 index 000000000..9aca8384c --- /dev/null +++ b/playbooks/installer/ospd/overcloud/templates/storage/ceph.yml.j2 @@ -0,0 +1,17 @@ +ceph::profile::params::osd_journal_size: 1024 +ceph::profile::params::osd_pool_default_pg_num: 128 +ceph::profile::params::osd_pool_default_pgp_num: 128 +ceph::profile::params::osd_pool_default_size: {{ groups['ceph'] | length }} +ceph::profile::params::osd_pool_default_min_size: 1 +ceph::profile::params::osds: + '/dev/vdb': + journal: '' +ceph::profile::params::manage_repo: false +ceph::profile::params::authentication_type: cephx + +ceph_pools: + - volumes + - vms + - images + +ceph_osd_selinux_permissive: true diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml index 138252639..a910ad261 100644 --- a/playbooks/installer/ospd/post.yml +++ b/playbooks/installer/ospd/post.yml @@ -38,5 +38,3 @@ - name: connect the router to the internal subnet shell: "source ~/overcloudrc; neutron router-interface-add internal_router internal_subnet" - - diff --git a/playbooks/provisioner/virsh/main.yml b/playbooks/provisioner/virsh/main.yml index 96da8d846..2a44d76d2 100644 --- a/playbooks/provisioner/virsh/main.yml +++ b/playbooks/provisioner/virsh/main.yml @@ -258,15 +258,10 @@ - name: copy created key from virthost for SSH proxy fetch: - src: ~/.ssh/id_rsa + src: "~/.ssh/id_rsa" dest: "{{ inventory_dir }}/id_rsa" flat: yes - - name: update file permissions - file: - path: "{{ inventory_dir }}/id_rsa" - mode: 0600 - - name: SSH copy ID to all VMs from virthost shell: "sshpass -p 'redhat' ssh-copy-id root@{{ item.stdout }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" register: shell_result @@ -279,6 +274,11 @@ - name: Update ansible with the new hosts hosts: localhost tasks: + - name: update file permissions + file: + path: "{{ inventory_dir }}/id_rsa" + mode: 0600 + - name: setup ssh config template: src: "templates/ssh.config.ansible.j2" From a3af554e226ccf33fb7861747f125e2d881f6b1e Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Tue, 26 Jan 2016 19:29:13 +0200 Subject: [PATCH 092/137] Adding an HA topology Change-Id: Ic63c1f70e1837a538fd21af3595f32b6edd8e167 --- .../virsh/topology/ospd_3cont_2comp.yml | 73 +++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 settings/provisioner/virsh/topology/ospd_3cont_2comp.yml diff --git a/settings/provisioner/virsh/topology/ospd_3cont_2comp.yml b/settings/provisioner/virsh/topology/ospd_3cont_2comp.yml new file mode 100644 index 000000000..3b85deb5a --- /dev/null +++ b/settings/provisioner/virsh/topology/ospd_3cont_2comp.yml @@ -0,0 +1,73 @@ +--- +provisioner: + nodes: + controller1: &controller + name: controller1 + cpu: !lookup provisioner.image.cpu + memory: 8192 + os: &os + type: linux + variant: !lookup provisioner.image.os.variant + disks: &disks + disk1: &disk1 + path: /var/lib/libvirt/images + size: 20G + network: &network_params + interfaces: &interfaces + management: &mgmt_interface + label: eth0 + data: &data_interface + label: eth1 + external: &external_interface + label: eth2 + groups: + - controller + - openstack_nodes + + controller2: + <<: *controller + name: controller2 + + controller3: + <<: *controller + name: controller3 + + compute1: + <<: *controller + name: compute1 + cpu: 2 + memory: 6144 + disks: + disk1: + path: /var/lib/libvirt/images + size: 20G + groups: + - compute + - openstack_nodes + + compute2: + <<: *controller + name: compute2 + cpu: 2 + memory: 6144 + disks: + disk1: + path: /var/lib/libvirt/images + size: 20G + groups: + - compute + - openstack_nodes + + undercloud: + <<: *controller + name: undercloud + memory: !lookup provisioner.image.memory + disks: + <<: *disks + disk1: + <<: *disk1 + size: 20G + groups: + - undercloud + - openstack_nodes + From 0150e6c6a152805b1c74269c506a4509c41ed6bd Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Tue, 26 Jan 2016 19:57:57 +0200 Subject: [PATCH 093/137] Updating the virt invocation to --cpu host-model There are some problems with the original --cpu host that might cause failures on some BM machines, using host-model is safer Change-Id: I855e107a8c021dff589bc10256bfe1ef5a7291e3 --- playbooks/provisioner/virsh/templates/create_vms.sh.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/provisioner/virsh/templates/create_vms.sh.j2 b/playbooks/provisioner/virsh/templates/create_vms.sh.j2 index 647094f6e..1aec5e459 100644 --- a/playbooks/provisioner/virsh/templates/create_vms.sh.j2 +++ b/playbooks/provisioner/virsh/templates/create_vms.sh.j2 @@ -8,7 +8,7 @@ virt-install --name {{ node_name }} \ --network network:{{ provisioner.network.network_list.management.name }} \ --network network:{{ provisioner.network.network_list.external.name }} \ --virt-type kvm \ - --cpu host \ + --cpu host-model \ --ram {{ node_values.memory }} \ --vcpus {{ node_values.cpu }} \ --os-variant {{ node_values.os.variant }} \ From b82f46ce9abf5d5123604739205032f552a5d795 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Tue, 26 Jan 2016 21:25:42 +0200 Subject: [PATCH 094/137] Change the method of flavor tagging The previous approach was using the node.key to create the flavor. This is wrong when trying to deploy HA. This patch fixes it by using a predefined list of allowed values Change-Id: Id39f88c9d296808df0f706e5e9fff9e26576d5a6 --- playbooks/installer/ospd/overcloud/pre.yml | 12 +++++++----- playbooks/installer/ospd/overcloud/run.yml | 1 + .../ospd/overcloud/templates/overcloud_deploy.sh.j2 | 1 + 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/playbooks/installer/ospd/overcloud/pre.yml b/playbooks/installer/ospd/overcloud/pre.yml index 991fd987b..7f565410b 100644 --- a/playbooks/installer/ospd/overcloud/pre.yml +++ b/playbooks/installer/ospd/overcloud/pre.yml @@ -148,10 +148,11 @@ when: "'{{ item.key }}' != 'undercloud'" with_dict: provisioner.nodes + #TODO: change the 'when:' statement to be from the config - name: set additional properties shell: "source ~/stackrc; openstack flavor set --property 'cpu_arch'='x86_64' --property 'capabilities:boot_option'='local' --property 'capabilities:profile'='{{ item.key }}' {{ item.key }}" - when: "'{{ item.key }}' != 'undercloud'" - with_dict: provisioner.nodes + when: "'{{ item.key }}' in ['controller', 'compute', 'ceph']" + with_dict: groups - name: get the node UUID shell: "source ~/stackrc; ironic node-list | grep {{ item.key }} | awk '{print $2}'" @@ -159,9 +160,10 @@ with_dict: provisioner.nodes register: node_list + #TODO: change the 'when:' statement to be from the config - name: tag our nodes with the proper profile - shell: "source ~/stackrc; ironic node-update {{ item.1.stdout }} add properties/capabilities='profile:{{ item.0 }},boot_option:local'" - when: "'{{ item.0 }}' != 'undercloud'" + shell: "source ~/stackrc; ironic node-update {{ item.1.stdout }} add properties/capabilities='profile:{{ item.0.key }},boot_option:local'" + when: "'{{ item.key }}' in ['controller', 'compute', 'ceph']" with_together: - - provisioner.nodes.keys() + - groups - node_list.results diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml index 578b23379..1348eaac3 100644 --- a/playbooks/installer/ospd/overcloud/run.yml +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -13,6 +13,7 @@ src: "templates/storage/ceph.yml.j2" dest: "~/my_templates/puppet/hieradata/ceph.yaml" mode: 0755 + when: group['ceph'] is defined - name: Install the overcloud hosts: undercloud diff --git a/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 b/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 index 5fdd28c7d..a458d1ea2 100644 --- a/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 +++ b/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 @@ -4,6 +4,7 @@ source ~/stackrc; openstack overcloud deploy --debug \ --templates \ --libvirt-type qemu \ + --ntp-server {{ distro.config.ntp_server_ip }} \ --neutron-network-type {{ installer.overcloud.network.backend }} \ --neutron-tunnel-types {{ installer.overcloud.network.backend }} \ {% if groups['controller'] is defined %} From ef5f41c8a1fc13d189c3b3093a837e4787d6b1d6 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Wed, 27 Jan 2016 12:55:59 +0200 Subject: [PATCH 095/137] Rework the provisioner and ospd playbooks to support HA Change-Id: I7987fd03e4726207dffa60c08e79878ff4524126 --- playbooks/installer/ospd/overcloud/pre.yml | 20 +++----- .../templates/virthost_instack.json.j2 | 6 +-- playbooks/provisioner/virsh/main.yml | 39 ++++++++------- .../virsh/templates/create_images.sh.j2 | 6 ++- .../virsh/templates/create_vms.sh.j2 | 6 ++- .../provisioner/virsh/topology/README.txt | 1 + .../provisioner/virsh/topology/all-in-one.yml | 1 + .../provisioner/virsh/topology/multi-node.yml | 1 + .../virsh/topology/ospd_1ctrl_1cmpt.yml | 50 +++++++++++++++++++ .../virsh/topology/ospd_3cont_2comp.yml | 30 +++-------- .../virsh/topology/ospd_minimal.yml | 1 + 11 files changed, 99 insertions(+), 62 deletions(-) create mode 100644 settings/provisioner/virsh/topology/ospd_1ctrl_1cmpt.yml diff --git a/playbooks/installer/ospd/overcloud/pre.yml b/playbooks/installer/ospd/overcloud/pre.yml index 7f565410b..1e10f7ebd 100644 --- a/playbooks/installer/ospd/overcloud/pre.yml +++ b/playbooks/installer/ospd/overcloud/pre.yml @@ -148,22 +148,18 @@ when: "'{{ item.key }}' != 'undercloud'" with_dict: provisioner.nodes - #TODO: change the 'when:' statement to be from the config - name: set additional properties shell: "source ~/stackrc; openstack flavor set --property 'cpu_arch'='x86_64' --property 'capabilities:boot_option'='local' --property 'capabilities:profile'='{{ item.key }}' {{ item.key }}" - when: "'{{ item.key }}' in ['controller', 'compute', 'ceph']" - with_dict: groups - - - name: get the node UUID - shell: "source ~/stackrc; ironic node-list | grep {{ item.key }} | awk '{print $2}'" when: "'{{ item.key }}' != 'undercloud'" with_dict: provisioner.nodes + + - name: get the node UUID + shell: "source ~/stackrc; ironic node-list | grep {{ item }} | awk '{print $2}'" + when: "'{{ item.rstrip('1234567890') }}' != 'undercloud'" + with_items: groups['openstack_nodes'] register: node_list - #TODO: change the 'when:' statement to be from the config - name: tag our nodes with the proper profile - shell: "source ~/stackrc; ironic node-update {{ item.1.stdout }} add properties/capabilities='profile:{{ item.0.key }},boot_option:local'" - when: "'{{ item.key }}' in ['controller', 'compute', 'ceph']" - with_together: - - groups - - node_list.results + shell: "source ~/stackrc; ironic node-update {{ item.stdout }} add properties/capabilities='profile:{{ item.item.rstrip('1234567890') }},boot_option:local'" + when: "item.item is defined and item.item.rstrip('1234567890') != 'undercloud'" + with_items: node_list.results diff --git a/playbooks/installer/ospd/overcloud/templates/virthost_instack.json.j2 b/playbooks/installer/ospd/overcloud/templates/virthost_instack.json.j2 index aba8aa006..e829c9943 100644 --- a/playbooks/installer/ospd/overcloud/templates/virthost_instack.json.j2 +++ b/playbooks/installer/ospd/overcloud/templates/virthost_instack.json.j2 @@ -13,9 +13,9 @@ "pm_password": "$(cat ~/.ssh/id_rsa)", "pm_type": "pxe_ssh", "mac": ["{{ hostvars[host_name]['ansible_%s' % installer.undercloud.config.local_interface].macaddress }}"], - "cpu": "{{ provisioner.nodes[host_name].cpu }}", - "memory": "{{ provisioner.nodes[host_name].memory }}", - "disk": "{{ provisioner.nodes[host_name].disks.disk1.size | regex_replace('(?P[0-9]+).*$', '\\g') }}", + "cpu": "{{ provisioner.nodes[host_name.rstrip('1234567890')].cpu }}", + "memory": "{{ provisioner.nodes[host_name.rstrip('1234567890')].memory }}", + "disk": "{{ provisioner.nodes[host_name.rstrip('1234567890')].disks.disk1.size | regex_replace('(?P[0-9]+).*$', '\\g') }}", "arch": "x86_64", "pm_user": "{{ installer.user.name }}" }{% if not loop.last %},{% endif %} diff --git a/playbooks/provisioner/virsh/main.yml b/playbooks/provisioner/virsh/main.yml index 2a44d76d2..b6178ebed 100644 --- a/playbooks/provisioner/virsh/main.yml +++ b/playbooks/provisioner/virsh/main.yml @@ -77,33 +77,33 @@ when: kvm_base is defined # A change in the modprove requires to reload the module - - name: Unload KVM module + - name: unload KVM module modprobe: name: "kvm_{{ kvm_base }}" state: absent ignore_errors: true when: kvm_base is defined - - name: Load KVM module + - name: load KVM module modprobe: name: "kvm_{{ kvm_base }}" state: present ignore_errors: true when: kvm_base is defined - - name: Install required QEMU-KVM packages + - name: install required QEMU-KVM packages yum: name=qemu-kvm state=present when: kvm_base is defined # Make sure the net-virtio module is enabled - - name: Unload vhost-net module + - name: unload vhost-net module modprobe: name: "vhost-net" state: absent ignore_errors: true when: kvm_base is defined - - name: Load KVM module + - name: load KVM module modprobe: name: "vhost-net" state: present @@ -190,7 +190,7 @@ - name: the create images script shell: "cat ~/create_images.sh" - - name: Execute the create images script + - name: execute the create images script shell: "bash ~/create_images.sh" - name: create virt install templates for nodes @@ -198,17 +198,24 @@ dest: "~/create_vms.sh" src: "templates/create_vms.sh.j2" mode: 0755 - + #TODO: move this logic to a module - name: the create vms script shell: "cat ~/create_vms.sh" - name: execute the create vms script shell: "bash ~/create_vms.sh" + - name: get the list of VMs + shell: "virsh list --all | grep -P '[\\w]+' | sed -n '2,$p' | awk '{print $2}'" + register: vm_names + + - set_fact: + vm_name_list: "{{ vm_names.stdout_lines }}" + - name: get MAC list shell: "virsh domiflist {{ item[0] }} | awk '/{{ item[1] }}/ {print $5};'" with_nested: - - provisioner.nodes + - vm_name_list - provisioner.network.network_list register: mac_list @@ -229,16 +236,10 @@ - name: add hosts to host list add_host: - name="{{ item.key }}" - groups="{{ item.value.groups| join(',') }}" - node_label="{{ item.value.name }}" + name="{{ item.item.item[0] }}" + groups="{{ provisioner.nodes['%s' % item.item.item[0].rstrip('1234567890')].groups | join(',') }}" ansible_ssh_user="root" ansible_ssh_password="redhat" - with_dict: provisioner.nodes - - - name: update IPs of hosts - add_host: - name="{{ item.item.item[0] }}" ansible_ssh_host="{{ item.stdout }}" when: item.item is defined and item.item.item[1] == "management" with_items: vm_ip_list @@ -288,9 +289,9 @@ # This step is necessary in order to allow the SSH forwarding - name: update the ssh host name of each machine add_host: - name="{{ item.key }}" - ansible_ssh_host="{{ item.key }}" - with_dict: provisioner.nodes + name="{{ item }}" + ansible_ssh_host="{{ item }}" + with_items: groups['openstack_nodes'] - name: update ansible with the new SSH settings ini_file: diff --git a/playbooks/provisioner/virsh/templates/create_images.sh.j2 b/playbooks/provisioner/virsh/templates/create_images.sh.j2 index ad5f23073..9841d7c58 100644 --- a/playbooks/provisioner/virsh/templates/create_images.sh.j2 +++ b/playbooks/provisioner/virsh/templates/create_images.sh.j2 @@ -1,8 +1,10 @@ #!/bin/bash {% for node_name, node_values in provisioner.nodes.iteritems() %} +{% for num in range(1, node_values.amount + 1, 1) %} {% for disk_name, disk_values in node_values.disks.iteritems() %} -qemu-img create -f qcow2 {{ disk_values.path }}/{{ node_name }}.{{ disk_name }}.qcow2 {{ disk_values.size }} +qemu-img create -f qcow2 {{ disk_values.path }}/{{ node_name }}{% if node_values.amount > 1 %}{{ num }}{% endif %}.{{ disk_name }}.qcow2 {{ disk_values.size }} +{% endfor %} +virt-resize --expand /dev/sda1 /var/lib/libvirt/images/{{ provisioner.image.name }}-original {{ node_values.disks.disk1.path }}/{{ node_name }}{% if node_values.amount > 1 %}{{ num }}{% endif %}.disk1.qcow2 {% endfor %} -virt-resize --expand /dev/sda1 /var/lib/libvirt/images/{{ provisioner.image.name }}-original {{ node_values.disks.disk1.path }}/{{ node_name }}.disk1.qcow2 {% endfor %} diff --git a/playbooks/provisioner/virsh/templates/create_vms.sh.j2 b/playbooks/provisioner/virsh/templates/create_vms.sh.j2 index 1aec5e459..a1d1e2be6 100644 --- a/playbooks/provisioner/virsh/templates/create_vms.sh.j2 +++ b/playbooks/provisioner/virsh/templates/create_vms.sh.j2 @@ -1,8 +1,9 @@ #!/bin/bash {% for node_name, node_values in provisioner.nodes.iteritems() %} -virt-install --name {{ node_name }} \ +{% for num in range(1, node_values.amount + 1, 1) %} +virt-install --name {{ node_name }}{% if node_values.amount > 1 %}{{ num }}{% endif %} \ {% for disk_name, disk_values in node_values.disks.iteritems() %} - --disk path={{ disk_values.path }}/{{ node_name }}.{{ disk_name }}.qcow2,device=disk,bus=virtio,format=qcow2 \ + --disk path={{ disk_values.path }}/{{ node_name }}{% if node_values.amount > 1 %}{{ num }}{% endif %}.{{ disk_name }}.qcow2,device=disk,bus=virtio,format=qcow2 \ {% endfor %} --network network:{{ provisioner.network.network_list.data.name }} \ --network network:{{ provisioner.network.network_list.management.name }} \ @@ -16,3 +17,4 @@ virt-install --name {{ node_name }} \ --noautoconsole \ --vnc {% endfor %} +{% endfor %} diff --git a/settings/provisioner/virsh/topology/README.txt b/settings/provisioner/virsh/topology/README.txt index 452f64a86..a9b47edfd 100644 --- a/settings/provisioner/virsh/topology/README.txt +++ b/settings/provisioner/virsh/topology/README.txt @@ -6,6 +6,7 @@ provisioner: # Dict of nodes example: name: controller + amount: 1 cpu: !lookup provisioner.image.cpu memory: 8192 os: &os diff --git a/settings/provisioner/virsh/topology/all-in-one.yml b/settings/provisioner/virsh/topology/all-in-one.yml index 92c7de975..c250d4c6d 100644 --- a/settings/provisioner/virsh/topology/all-in-one.yml +++ b/settings/provisioner/virsh/topology/all-in-one.yml @@ -3,6 +3,7 @@ provisioner: nodes: controller: &controller name: controller + amount: 1 cpu: !lookup provisioner.image.cpu memory: 16384 os: diff --git a/settings/provisioner/virsh/topology/multi-node.yml b/settings/provisioner/virsh/topology/multi-node.yml index e5bc3e6b5..08c29e78e 100644 --- a/settings/provisioner/virsh/topology/multi-node.yml +++ b/settings/provisioner/virsh/topology/multi-node.yml @@ -3,6 +3,7 @@ provisioner: nodes: controller: &controller name: controller + amount: 1 cpu: !lookup provisioner.image.cpu memory: 16384 os: diff --git a/settings/provisioner/virsh/topology/ospd_1ctrl_1cmpt.yml b/settings/provisioner/virsh/topology/ospd_1ctrl_1cmpt.yml new file mode 100644 index 000000000..22d77a272 --- /dev/null +++ b/settings/provisioner/virsh/topology/ospd_1ctrl_1cmpt.yml @@ -0,0 +1,50 @@ +--- +provisioner: + nodes: + controller: &controller + name: controller + amount: 3 + cpu: !lookup provisioner.image.cpu + memory: 8192 + os: &os + type: linux + variant: !lookup provisioner.image.os.variant + disks: &disks + disk1: &disk1 + path: /var/lib/libvirt/images + size: 30G + network: &network_params + interfaces: &interfaces + management: &mgmt_interface + label: eth0 + data: &data_interface + label: eth1 + external: &external_interface + label: eth2 + groups: + - controller + - openstack_nodes + + compute: + <<: *controller + name: compute + amount: 2 + cpu: 2 + memory: 6144 + disks: + disk1: + path: /var/lib/libvirt/images + size: 20G + groups: + - compute + - openstack_nodes + + undercloud: + <<: *controller + name: undercloud + amount: 1 + memory: !lookup provisioner.image.memory + groups: + - undercloud + - openstack_nodes + diff --git a/settings/provisioner/virsh/topology/ospd_3cont_2comp.yml b/settings/provisioner/virsh/topology/ospd_3cont_2comp.yml index 3b85deb5a..43907f7cf 100644 --- a/settings/provisioner/virsh/topology/ospd_3cont_2comp.yml +++ b/settings/provisioner/virsh/topology/ospd_3cont_2comp.yml @@ -1,8 +1,9 @@ --- provisioner: nodes: - controller1: &controller - name: controller1 + controller: &controller + name: controller + amount: 3 cpu: !lookup provisioner.image.cpu memory: 8192 os: &os @@ -24,30 +25,10 @@ provisioner: - controller - openstack_nodes - controller2: - <<: *controller - name: controller2 - - controller3: - <<: *controller - name: controller3 - - compute1: + compute: <<: *controller name: compute1 - cpu: 2 - memory: 6144 - disks: - disk1: - path: /var/lib/libvirt/images - size: 20G - groups: - - compute - - openstack_nodes - - compute2: - <<: *controller - name: compute2 + amount: 2 cpu: 2 memory: 6144 disks: @@ -61,6 +42,7 @@ provisioner: undercloud: <<: *controller name: undercloud + amount: 1 memory: !lookup provisioner.image.memory disks: <<: *disks diff --git a/settings/provisioner/virsh/topology/ospd_minimal.yml b/settings/provisioner/virsh/topology/ospd_minimal.yml index 6b5f747f1..093ded91b 100644 --- a/settings/provisioner/virsh/topology/ospd_minimal.yml +++ b/settings/provisioner/virsh/topology/ospd_minimal.yml @@ -3,6 +3,7 @@ provisioner: nodes: controller: &controller name: controller + amount: 1 cpu: !lookup provisioner.image.cpu memory: 8192 os: &os From 81c9e2e4e0305b255e09646352b2ba949914e563 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Wed, 27 Jan 2016 16:58:53 +0200 Subject: [PATCH 096/137] Fix cleanup for virst + adding tempest This patch fixes the cleanup that was broken due to the changes made to the provisioner. It also adds a "quick and dirty" tempest run just to be able to get results while development of the tester continues Change-Id: Ia15c6931ca46de809d139780c787b6b4294a8e8d --- playbooks/installer/ospd/post.yml | 35 +++++++++++++++++++++++++ playbooks/provisioner/virsh/cleanup.yml | 24 ++++++++--------- 2 files changed, 47 insertions(+), 12 deletions(-) diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml index a910ad261..fa8acb434 100644 --- a/playbooks/installer/ospd/post.yml +++ b/playbooks/installer/ospd/post.yml @@ -38,3 +38,38 @@ - name: connect the router to the internal subnet shell: "source ~/overcloudrc; neutron router-interface-add internal_router internal_subnet" + +#TODO: remove this when tester is in place +- name: Setup and run tempest + hosts: undercloud + become: yes + become_user: "{{ installer.user.name }}" + tasks: + - name: create role heat_stack_owner + shell: "source ~/overcloudrc; keystone role-create --name heat_stack_owner" + + - name: create the tempest dir + shell: "mkdir tempest-dir" + + - name: setup the tempest-dir + shell: "/usr/share/openstack-tempest-kilo/tools/configure-tempest-directory" + args: + chdir: "~/tempest-dir" + + - name: run tempest configuration tool + shell: "source ~/overcloudrc; tools/config_tempest.py --deployer-input ~/tempest-deployer-input.conf --debug --create identity.uri $OS_AUTH_URL identity.admin_password $OS_PASSWORD" + args: + chdir: "~/tempest-dir" + + - name: run tempest + shell: "tools/run-tests.sh --parallel --concurrency 4" + args: + chdir: "~/tempest-dir" + + - name: fetch tempest result + fetch: + src: "~/tempest-dir/tempest.xml" + dest: "{{ lookup('env', 'PWD') }}/nosetests.xml" + flat: yes + + diff --git a/playbooks/provisioner/virsh/cleanup.yml b/playbooks/provisioner/virsh/cleanup.yml index 39720f3ea..bfa4c0a07 100644 --- a/playbooks/provisioner/virsh/cleanup.yml +++ b/playbooks/provisioner/virsh/cleanup.yml @@ -3,27 +3,27 @@ hosts: virthost gather_facts: no tasks: + - name: get the list of VMs + shell: "virsh list --all | grep -P '[\\w]+' | sed -n '2,$p' | awk '{print $2}'" + register: vm_names + + - set_fact: + vm_name_list: "{{ vm_names.stdout_lines }}" + - name: stop relevant vms virt: - name: "{{ item.key }}" + name: "{{ item }}" state: destroyed - with_dict: provisioner.nodes + with_items: vm_name_list - name: undefine relevant VMs virt: - name: "{{ item.key }}" + name: "{{ item }}" command: undefine - with_dict: provisioner.nodes + with_items: vm_name_list - name: remove the networks we created virt_net: name: "{{ item.value.name }}" state: absent - with_dict: provisioner.networks - - - name: Remove persistent IPs - shell: "virsh net-update default delete ip-dhcp-host \"\" --live --config" - with_together: - - provisioner.nodes.keys() - - vm_ip_list - - vm_mac_list + with_dict: provisioner.network.network_list From 6fc3ea2157534b7276842da695a530494f2fcc6a Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Wed, 27 Jan 2016 20:50:14 +0200 Subject: [PATCH 097/137] Ignore errors on tempest run task or job will fail. It is necessary to ignore errors on the run tempest task as if the task has any failed tests, it will throw an error and ansible will consider this as a failed task even though it's expected Change-Id: I2d6246f7d954b5a1565df44d93f85246c34c4176 --- playbooks/installer/ospd/post.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml index fa8acb434..df9f50aab 100644 --- a/playbooks/installer/ospd/post.yml +++ b/playbooks/installer/ospd/post.yml @@ -65,6 +65,7 @@ shell: "tools/run-tests.sh --parallel --concurrency 4" args: chdir: "~/tempest-dir" + ignore_errors: yes - name: fetch tempest result fetch: From d7987aa6d89c6c3ffd700446f4e08da6bfd7b371 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Thu, 28 Jan 2016 08:49:06 +0200 Subject: [PATCH 098/137] Adding 3 HDD to the Ceph node and bumping tempest threads Change-Id: I668976894b0db2f5ded0278b5c5721495b1d825f --- playbooks/installer/ospd/overcloud/run.yml | 2 +- .../overcloud/templates/storage/ceph.yml.j2 | 14 ++++++++++--- playbooks/installer/ospd/post.yml | 2 +- .../provisioner/virsh/topology/README.txt | 1 + .../provisioner/virsh/topology/all-in-one.yml | 1 + .../provisioner/virsh/topology/multi-node.yml | 1 + .../virsh/topology/ospd_1ctrl_1cmpt.yml | 1 + .../virsh/topology/ospd_3cont_2comp.yml | 1 + .../virsh/topology/ospd_minimal.yml | 20 ++++++++++++------- 9 files changed, 31 insertions(+), 12 deletions(-) diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml index 1348eaac3..fcc0dd0e2 100644 --- a/playbooks/installer/ospd/overcloud/run.yml +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -13,7 +13,7 @@ src: "templates/storage/ceph.yml.j2" dest: "~/my_templates/puppet/hieradata/ceph.yaml" mode: 0755 - when: group['ceph'] is defined + when: groups['ceph'] is defined - name: Install the overcloud hosts: undercloud diff --git a/playbooks/installer/ospd/overcloud/templates/storage/ceph.yml.j2 b/playbooks/installer/ospd/overcloud/templates/storage/ceph.yml.j2 index 9aca8384c..eff8f2efc 100644 --- a/playbooks/installer/ospd/overcloud/templates/storage/ceph.yml.j2 +++ b/playbooks/installer/ospd/overcloud/templates/storage/ceph.yml.j2 @@ -1,11 +1,19 @@ ceph::profile::params::osd_journal_size: 1024 ceph::profile::params::osd_pool_default_pg_num: 128 ceph::profile::params::osd_pool_default_pgp_num: 128 -ceph::profile::params::osd_pool_default_size: {{ groups['ceph'] | length }} +{% if provisioner.nodes.ceph.amount == 1 %} +ceph::profile::params::osd_pool_default_size: {{ provisioner.nodes.ceph.disks | length - 1 }} +{% else %} +ceph::profile::params::osd_pool_default_size: {{ provisioner.nodes.ceph.amount }} +{% endif %} ceph::profile::params::osd_pool_default_min_size: 1 ceph::profile::params::osds: - '/dev/vdb': - journal: '' +{% for disk_name, disk_values in provisioner.nodes.ceph.disks.iteritems() %} +{% if disk_name != 'disk1' %} + '{{ disk_values.dev }}': + journal: '' +{% endif %} +{% endfor %} ceph::profile::params::manage_repo: false ceph::profile::params::authentication_type: cephx diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml index df9f50aab..08cf7493a 100644 --- a/playbooks/installer/ospd/post.yml +++ b/playbooks/installer/ospd/post.yml @@ -62,7 +62,7 @@ chdir: "~/tempest-dir" - name: run tempest - shell: "tools/run-tests.sh --parallel --concurrency 4" + shell: "tools/run-tests.sh --parallel --concurrency 8" args: chdir: "~/tempest-dir" ignore_errors: yes diff --git a/settings/provisioner/virsh/topology/README.txt b/settings/provisioner/virsh/topology/README.txt index a9b47edfd..e8fcde9c2 100644 --- a/settings/provisioner/virsh/topology/README.txt +++ b/settings/provisioner/virsh/topology/README.txt @@ -14,6 +14,7 @@ provisioner: variant: !lookup provisioner.image.os.variant disk: &disk size: !lookup provisioner.image.disk.size + dev: /dev/vda path: /var/lib/libvirt/images network: &network_params interfaces: &interfaces diff --git a/settings/provisioner/virsh/topology/all-in-one.yml b/settings/provisioner/virsh/topology/all-in-one.yml index c250d4c6d..efdb39a9e 100644 --- a/settings/provisioner/virsh/topology/all-in-one.yml +++ b/settings/provisioner/virsh/topology/all-in-one.yml @@ -11,6 +11,7 @@ provisioner: variant: !lookup provisioner.image.os.variant disk: size: !lookup provisioner.image.disk.size + dev: /dev/vda path: /var/lib/libvirt/images network: &network_params interfaces: &interfaces diff --git a/settings/provisioner/virsh/topology/multi-node.yml b/settings/provisioner/virsh/topology/multi-node.yml index 08c29e78e..e8db8609a 100644 --- a/settings/provisioner/virsh/topology/multi-node.yml +++ b/settings/provisioner/virsh/topology/multi-node.yml @@ -11,6 +11,7 @@ provisioner: variant: !lookup provisioner.image.os.variant disk: size: !lookup provisioner.image.disk.size + dev: /dev/vda path: /var/lib/libvirt/images network: &network_params interfaces: &interfaces diff --git a/settings/provisioner/virsh/topology/ospd_1ctrl_1cmpt.yml b/settings/provisioner/virsh/topology/ospd_1ctrl_1cmpt.yml index 22d77a272..b188ab195 100644 --- a/settings/provisioner/virsh/topology/ospd_1ctrl_1cmpt.yml +++ b/settings/provisioner/virsh/topology/ospd_1ctrl_1cmpt.yml @@ -12,6 +12,7 @@ provisioner: disks: &disks disk1: &disk1 path: /var/lib/libvirt/images + dev: /dev/vda size: 30G network: &network_params interfaces: &interfaces diff --git a/settings/provisioner/virsh/topology/ospd_3cont_2comp.yml b/settings/provisioner/virsh/topology/ospd_3cont_2comp.yml index 43907f7cf..a87ae663b 100644 --- a/settings/provisioner/virsh/topology/ospd_3cont_2comp.yml +++ b/settings/provisioner/virsh/topology/ospd_3cont_2comp.yml @@ -12,6 +12,7 @@ provisioner: disks: &disks disk1: &disk1 path: /var/lib/libvirt/images + dev: /dev/vda size: 20G network: &network_params interfaces: &interfaces diff --git a/settings/provisioner/virsh/topology/ospd_minimal.yml b/settings/provisioner/virsh/topology/ospd_minimal.yml index 093ded91b..2d56cb95e 100644 --- a/settings/provisioner/virsh/topology/ospd_minimal.yml +++ b/settings/provisioner/virsh/topology/ospd_minimal.yml @@ -10,8 +10,9 @@ provisioner: type: linux variant: !lookup provisioner.image.os.variant disks: &disks - disk1: &disk1 + disk1: path: /var/lib/libvirt/images + dev: /dev/sda1 size: 30G network: &network_params interfaces: &interfaces @@ -33,6 +34,7 @@ provisioner: disks: disk1: path: /var/lib/libvirt/images + dev: /dev/sda1 size: 20G groups: - compute @@ -44,15 +46,19 @@ provisioner: cpu: 2 memory: 4096 disks: - <<: *disks - disk1: + disk1: &disk1 path: /var/lib/libvirt/images - dev: /dev/sda1 + dev: /dev/vda size: 20G disk2: - path: /var/lib/libvirt/images - dev: /dev/sda2 - size: 20G + <<: *disk1 + dev: /dev/vdb + disk3: + <<: *disk1 + dev: /dev/vdc + disk4: + <<: *disk1 + dev: /dev/vdd groups: - ceph - openstack_nodes From 7136fa8b6d6d31d376e6ab50ed2138b43bf31f75 Mon Sep 17 00:00:00 2001 From: Arie Bregman Date: Thu, 28 Jan 2016 09:11:48 +0200 Subject: [PATCH 099/137] Add sshpass package in 'virsh' provisioner settings If sshpass is not present on the system, the task 'SSH copy ID to all VMs from virthost' will fail to run. Change-Id: I986d1adaf2cc906de3ca84bf9afe73353fa408f3 --- settings/provisioner/virsh.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/settings/provisioner/virsh.yml b/settings/provisioner/virsh.yml index ffb27b7bb..ef6e5ab3f 100644 --- a/settings/provisioner/virsh.yml +++ b/settings/provisioner/virsh.yml @@ -59,3 +59,4 @@ provisioner: - xauth - virt-viewer - libguestfs-xfs + - sshpass From af79f85bf3011e1bd918315e9632f4809d50467e Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Thu, 28 Jan 2016 20:50:03 +0200 Subject: [PATCH 100/137] Bump LVM default value to 10G + add reference to local templates Change-Id: I811634eaff736dbe25cb4369a91129f86b44eb32 --- playbooks/installer/ospd/overcloud/run.yml | 9 ++++++++- .../ospd/overcloud/templates/overcloud_deploy.sh.j2 | 2 +- playbooks/installer/ospd/post.yml | 6 +++--- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml index fcc0dd0e2..e233d80f4 100644 --- a/playbooks/installer/ospd/overcloud/run.yml +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -8,13 +8,20 @@ - name: copy template folder shell: "cp -rf /usr/share/openstack-tripleo-heat-templates ~/my_templates/" - - name: prepare storage template + - name: prepare ceph storage template template: src: "templates/storage/ceph.yml.j2" dest: "~/my_templates/puppet/hieradata/ceph.yaml" mode: 0755 when: groups['ceph'] is defined + - name: set a higher LVM size storage template + sudo: yes + lineinfile: + dest: "~/my_templates/puppet/hieradata/controller.yaml" + line: "cinder::setup_test_volume::size: '10G'" + when: groups['ceph'] is not defined + - name: Install the overcloud hosts: undercloud become: yes diff --git a/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 b/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 index a458d1ea2..e7a86c98f 100644 --- a/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 +++ b/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 @@ -2,7 +2,7 @@ source ~/stackrc; openstack overcloud deploy --debug \ - --templates \ + --templates ~/my_templates/ \ --libvirt-type qemu \ --ntp-server {{ distro.config.ntp_server_ip }} \ --neutron-network-type {{ installer.overcloud.network.backend }} \ diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml index 08cf7493a..02e221d1a 100644 --- a/playbooks/installer/ospd/post.yml +++ b/playbooks/installer/ospd/post.yml @@ -19,10 +19,10 @@ become_user: "{{ installer.user.name }}" tasks: - name: create the external network - shell: "source ~/overcloudrc; neutron net-create management --router:external" + shell: "source ~/overcloudrc; neutron net-create nova --router:external" - name: create the external subnet - shell: "source ~/overcloudrc; neutron subnet-create management 172.16.0.0/24 --name management_subnet --enable-dhcp=False --allocation-pool start=172.16.0.210,end=172.16.0.230 --dns-nameserver {{ provisioner.network.network_list.management.ip_address }}" + shell: "source ~/overcloudrc; neutron subnet-create nova 172.16.0.0/24 --name management_subnet --enable-dhcp=False --allocation-pool start=172.16.0.210,end=172.16.0.230 --dns-nameserver {{ provisioner.network.network_list.management.ip_address }}" - name: create the internal network shell: "source ~/overcloudrc; neutron net-create internal" @@ -34,7 +34,7 @@ shell: "source ~/overcloudrc; neutron router-create internal_router" - name: set the router gateway to our management network - shell: "source ~/overcloudrc; neutron router-gateway-set internal_router management" + shell: "source ~/overcloudrc; neutron router-gateway-set internal_router nova" - name: connect the router to the internal subnet shell: "source ~/overcloudrc; neutron router-interface-add internal_router internal_subnet" From 922096f0778ae423589633009f016562cb077ba5 Mon Sep 17 00:00:00 2001 From: Arie Bregman Date: Thu, 28 Jan 2016 09:36:46 +0200 Subject: [PATCH 101/137] [provisioner][virsh] fix cleanup Cleanup is not called by other virsh playbooks. To be familiar with virthost details, it has to be added using 'add_host'. Change-Id: I744818feebb8bfba7b39f7a5c5da887b989b7018 --- playbooks/provisioner/virsh/cleanup.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/playbooks/provisioner/virsh/cleanup.yml b/playbooks/provisioner/virsh/cleanup.yml index bfa4c0a07..67b8a4182 100644 --- a/playbooks/provisioner/virsh/cleanup.yml +++ b/playbooks/provisioner/virsh/cleanup.yml @@ -1,4 +1,18 @@ --- +- name: Add host to host list + hosts: localhost + gather_facts: no + tasks: + - name: add hosts to host list + add_host: + name="{{ item.value.name }}" + groups="{{ item.value.groups| join(',') }}" + node_label="{{ item.key }}" + ansible_ssh_user="{{ item.value.ssh_user }}" + ansible_ssh_host="{{ item.value.ssh_host }}" + ansible_ssh_private_key_file="{{ item.value.ssh_key_file }}" + with_dict: provisioner.hosts + - name: Remove all VMs and networks that were created hosts: virthost gather_facts: no From 13e01bb0b8d2689baa220c3aa4de82118baaf353 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Sun, 31 Jan 2016 05:27:05 +0200 Subject: [PATCH 102/137] Adding net isolation Change-Id: I11f6b100c03c71106f95d09fc0e9c98570c4b5c4 --- playbooks/installer/ospd/overcloud/pre.yml | 3 + playbooks/installer/ospd/overcloud/run.yml | 9 +- .../templates/overcloud_deploy.sh.j2 | 7 +- .../network-isolation/files/ceph-storage.yaml | 126 ++++++++++++++ .../files/cinder-storage.yaml | 132 +++++++++++++++ .../network-isolation/files/compute.yaml | 120 ++++++++++++++ .../network-isolation/files/controller.yaml | 156 ++++++++++++++++++ .../files/swift-storage.yaml | 132 +++++++++++++++ .../network-isolation/tasks/main.yml | 23 +++ .../templates/isolation.yml.j2 | 7 + settings/installer/ospd.yml | 48 +----- settings/installer/ospd/network/isolation.yml | 32 ++++ .../network/isolation/type/bond-with-vlan.yml | 6 + .../isolation/type/single-nic-vlan.yml | 6 + .../network/isolation/type/three-nic-vlan.yml | 6 + .../{neutron => isolation}/variant/gre.yml | 0 .../ospd/network/isolation/variant/sriov.yml | 7 + .../ospd/network/isolation/variant/vlan.yml | 6 + .../{neutron => isolation}/variant/vxlan.yml | 0 settings/installer/ospd/network/neutron.yml | 4 - .../ospd/network/neutron/variant/sriov.yml | 6 - .../ospd/network/neutron/variant/vlan.yml | 5 - .../installer/ospd/network/no-isolation.yml | 7 + settings/provisioner/virsh.yml | 4 + .../virsh/topology/ospd_3cont_2comp_3ceph.yml | 70 ++++++++ 25 files changed, 853 insertions(+), 69 deletions(-) create mode 100644 roles/ospd/overcloud/network-isolation/files/ceph-storage.yaml create mode 100644 roles/ospd/overcloud/network-isolation/files/cinder-storage.yaml create mode 100644 roles/ospd/overcloud/network-isolation/files/compute.yaml create mode 100644 roles/ospd/overcloud/network-isolation/files/controller.yaml create mode 100644 roles/ospd/overcloud/network-isolation/files/swift-storage.yaml create mode 100644 roles/ospd/overcloud/network-isolation/tasks/main.yml create mode 100644 roles/ospd/overcloud/network-isolation/templates/isolation.yml.j2 create mode 100644 settings/installer/ospd/network/isolation.yml create mode 100644 settings/installer/ospd/network/isolation/type/bond-with-vlan.yml create mode 100644 settings/installer/ospd/network/isolation/type/single-nic-vlan.yml create mode 100644 settings/installer/ospd/network/isolation/type/three-nic-vlan.yml rename settings/installer/ospd/network/{neutron => isolation}/variant/gre.yml (100%) create mode 100644 settings/installer/ospd/network/isolation/variant/sriov.yml create mode 100644 settings/installer/ospd/network/isolation/variant/vlan.yml rename settings/installer/ospd/network/{neutron => isolation}/variant/vxlan.yml (100%) delete mode 100644 settings/installer/ospd/network/neutron.yml delete mode 100644 settings/installer/ospd/network/neutron/variant/sriov.yml delete mode 100644 settings/installer/ospd/network/neutron/variant/vlan.yml create mode 100644 settings/installer/ospd/network/no-isolation.yml create mode 100644 settings/provisioner/virsh/topology/ospd_3cont_2comp_3ceph.yml diff --git a/playbooks/installer/ospd/overcloud/pre.yml b/playbooks/installer/ospd/overcloud/pre.yml index 1e10f7ebd..2e9d7e2fb 100644 --- a/playbooks/installer/ospd/overcloud/pre.yml +++ b/playbooks/installer/ospd/overcloud/pre.yml @@ -163,3 +163,6 @@ shell: "source ~/stackrc; ironic node-update {{ item.stdout }} add properties/capabilities='profile:{{ item.item.rstrip('1234567890') }},boot_option:local'" when: "item.item is defined and item.item.rstrip('1234567890') != 'undercloud'" with_items: node_list.results + + - name: copy template folder for customization + shell: "cp -rf /usr/share/openstack-tripleo-heat-templates {{ installer.overcloud.template_base }}" diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml index e233d80f4..c3d37749a 100644 --- a/playbooks/installer/ospd/overcloud/run.yml +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -4,21 +4,20 @@ become: yes become_user: "{{ installer.user.name }}" gather_facts: no + roles: + - {role: ospd/overcloud/network-isolation/, when: installer.overcloud.network.type == "isolation"} tasks: - - name: copy template folder - shell: "cp -rf /usr/share/openstack-tripleo-heat-templates ~/my_templates/" - - name: prepare ceph storage template template: src: "templates/storage/ceph.yml.j2" - dest: "~/my_templates/puppet/hieradata/ceph.yaml" + dest: "{{ installer.overcloud.template_base }}/puppet/hieradata/ceph.yaml" mode: 0755 when: groups['ceph'] is defined - name: set a higher LVM size storage template sudo: yes lineinfile: - dest: "~/my_templates/puppet/hieradata/controller.yaml" + dest: "{{ installer.overcloud.template_base }}/puppet/hieradata/controller.yaml" line: "cinder::setup_test_volume::size: '10G'" when: groups['ceph'] is not defined diff --git a/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 b/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 index e7a86c98f..f73c3b38d 100644 --- a/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 +++ b/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 @@ -2,7 +2,7 @@ source ~/stackrc; openstack overcloud deploy --debug \ - --templates ~/my_templates/ \ + --templates {{ installer.overcloud.template_base }} \ --libvirt-type qemu \ --ntp-server {{ distro.config.ntp_server_ip }} \ --neutron-network-type {{ installer.overcloud.network.backend }} \ @@ -18,6 +18,9 @@ openstack overcloud deploy --debug \ {% if groups['ceph'] is defined %} --ceph-storage-scale {{ groups['ceph']| length }} \ --ceph-storage-flavor ceph \ - -e ~/my_templates/environments/storage-environment.yaml \ + -e {{ installer.overcloud.template_base }}/environments/storage-environment.yaml \ +{% endif %} +{% if installer.overcloud.network.type == "isolation" %} + -e {{ installer.overcloud.template_base }}/{{ installer.overcloud.network.template.file }} \ {% endif %} --log-file overcloud_deployment_{{ 100 | random }}.log diff --git a/roles/ospd/overcloud/network-isolation/files/ceph-storage.yaml b/roles/ospd/overcloud/network-isolation/files/ceph-storage.yaml new file mode 100644 index 000000000..2f07472b8 --- /dev/null +++ b/roles/ospd/overcloud/network-isolation/files/ceph-storage.yaml @@ -0,0 +1,126 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure multiple interfaces + for the ceph storage role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ExternalInterfaceDefaultRoute: + default: '10.0.0.1' + description: default route for the external network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: json + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 0.0.0.0/0 + next_hop: {get_param: ControlPlaneDefaultRoute} + # Optionally have this interface as default route + default: true + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + type: ovs_bridge + name: br-isolated + use_dhcp: false + members: + - + type: interface + name: nic2 + # force the MAC address of the bridge to this interface + primary: true + - + type: vlan + vlan_id: {get_param: StorageNetworkVlanID} + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageMgmtNetworkVlanID} + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/roles/ospd/overcloud/network-isolation/files/cinder-storage.yaml b/roles/ospd/overcloud/network-isolation/files/cinder-storage.yaml new file mode 100644 index 000000000..506ab7442 --- /dev/null +++ b/roles/ospd/overcloud/network-isolation/files/cinder-storage.yaml @@ -0,0 +1,132 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure multiple interfaces + for the cinder storage role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ExternalInterfaceDefaultRoute: + default: '10.0.0.1' + description: default route for the external network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: json + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 0.0.0.0/0 + next_hop: {get_param: ControlPlaneDefaultRoute} + # Optionally have this interface as default route + default: true + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + type: ovs_bridge + name: br-isolated + use_dhcp: false + members: + - + type: interface + name: nic2 + # force the MAC address of the bridge to this interface + primary: true + - + type: vlan + vlan_id: {get_param: InternalApiNetworkVlanID} + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageNetworkVlanID} + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageMgmtNetworkVlanID} + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/roles/ospd/overcloud/network-isolation/files/compute.yaml b/roles/ospd/overcloud/network-isolation/files/compute.yaml new file mode 100644 index 000000000..ca8676f4c --- /dev/null +++ b/roles/ospd/overcloud/network-isolation/files/compute.yaml @@ -0,0 +1,120 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure multiple interfaces + for the compute role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: json + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 0.0.0.0/0 + next_hop: {get_param: ControlPlaneDefaultRoute} + # Optionally have this interface as default route + default: true + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + type: ovs_bridge + name: br-isolated + use_dhcp: false + members: + - + type: interface + name: nic2 + # force the MAC address of the bridge to this interface + primary: true + - + type: vlan + vlan_id: {get_param: InternalApiNetworkVlanID} + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageNetworkVlanID} + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: vlan + vlan_id: {get_param: TenantNetworkVlanID} + addresses: + - + ip_netmask: {get_param: TenantIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/roles/ospd/overcloud/network-isolation/files/controller.yaml b/roles/ospd/overcloud/network-isolation/files/controller.yaml new file mode 100644 index 000000000..03b571e20 --- /dev/null +++ b/roles/ospd/overcloud/network-isolation/files/controller.yaml @@ -0,0 +1,156 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure multiple interfaces + for the controller role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ExternalInterfaceDefaultRoute: + default: '10.0.0.1' + description: default route for the external network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: json + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 0.0.0.0/0 + next_hop: {get_param: ControlPlaneDefaultRoute} + # Optionally have this interface as default route + default: true + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + type: ovs_bridge + name: br-isolated + use_dhcp: false + members: + - + type: interface + name: nic2 + primary: true + - + type: vlan + vlan_id: {get_param: InternalApiNetworkVlanID} + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageNetworkVlanID} + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageMgmtNetworkVlanID} + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + - + type: vlan + vlan_id: {get_param: TenantNetworkVlanID} + addresses: + - + ip_netmask: {get_param: TenantIpSubnet} + - + type: ovs_bridge + # This will default to br-ex, anything else requires specific + # brige mapping entries for it to be used. + name: {get_input: bridge_name} + use_dhcp: false + addresses: + - + ip_netmask: {get_param: ExternalIpSubnet} + routes: + - + ip_netmask: 0.0.0.0/0 + next_hop: {get_param: ExternalInterfaceDefaultRoute} + members: + - + type: interface + name: nic3 + # force the MAC address of the bridge to this interface + primary: true + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/roles/ospd/overcloud/network-isolation/files/swift-storage.yaml b/roles/ospd/overcloud/network-isolation/files/swift-storage.yaml new file mode 100644 index 000000000..c3a48e6f5 --- /dev/null +++ b/roles/ospd/overcloud/network-isolation/files/swift-storage.yaml @@ -0,0 +1,132 @@ +heat_template_version: 2015-04-30 + +description: > + Software Config to drive os-net-config to configure multiple interfaces + for the swift storage role. + +parameters: + ControlPlaneIp: + default: '' + description: IP address/subnet on the ctlplane network + type: string + ExternalIpSubnet: + default: '' + description: IP address/subnet on the external network + type: string + InternalApiIpSubnet: + default: '' + description: IP address/subnet on the internal API network + type: string + StorageIpSubnet: + default: '' + description: IP address/subnet on the storage network + type: string + StorageMgmtIpSubnet: + default: '' + description: IP address/subnet on the storage mgmt network + type: string + TenantIpSubnet: + default: '' + description: IP address/subnet on the tenant network + type: string + ExternalNetworkVlanID: + default: 10 + description: Vlan ID for the external network traffic. + type: number + InternalApiNetworkVlanID: + default: 20 + description: Vlan ID for the internal_api network traffic. + type: number + StorageNetworkVlanID: + default: 30 + description: Vlan ID for the storage network traffic. + type: number + StorageMgmtNetworkVlanID: + default: 40 + description: Vlan ID for the storage mgmt network traffic. + type: number + TenantNetworkVlanID: + default: 50 + description: Vlan ID for the tenant network traffic. + type: number + ExternalInterfaceDefaultRoute: + default: '10.0.0.1' + description: default route for the external network + type: string + ControlPlaneSubnetCidr: # Override this via parameter_defaults + default: '24' + description: The subnet CIDR of the control plane network. + type: string + ControlPlaneDefaultRoute: # Override this via parameter_defaults + description: The subnet CIDR of the control plane network. + type: string + DnsServers: # Override this via parameter_defaults + default: [] + description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf. + type: json + EC2MetadataIp: # Override this via parameter_defaults + description: The IP address of the EC2 metadata server. + type: string + +resources: + OsNetConfigImpl: + type: OS::Heat::StructuredConfig + properties: + group: os-apply-config + config: + os_net_config: + network_config: + - + type: interface + name: nic1 + use_dhcp: false + dns_servers: {get_param: DnsServers} + addresses: + - + ip_netmask: + list_join: + - '/' + - - {get_param: ControlPlaneIp} + - {get_param: ControlPlaneSubnetCidr} + routes: + - + ip_netmask: 0.0.0.0/0 + next_hop: {get_param: ControlPlaneDefaultRoute} + # Optionally have this interface as default route + default: true + - + ip_netmask: 169.254.169.254/32 + next_hop: {get_param: EC2MetadataIp} + - + type: ovs_bridge + name: br-isolated + use_dhcp: false + members: + - + type: interface + name: nic2 + # force the MAC address of the bridge to this interface + primary: true + - + type: vlan + vlan_id: {get_param: InternalApiNetworkVlanID} + addresses: + - + ip_netmask: {get_param: InternalApiIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageNetworkVlanID} + addresses: + - + ip_netmask: {get_param: StorageIpSubnet} + - + type: vlan + vlan_id: {get_param: StorageMgmtNetworkVlanID} + addresses: + - + ip_netmask: {get_param: StorageMgmtIpSubnet} + +outputs: + OS::stack_id: + description: The OsNetConfigImpl resource. + value: {get_resource: OsNetConfigImpl} diff --git a/roles/ospd/overcloud/network-isolation/tasks/main.yml b/roles/ospd/overcloud/network-isolation/tasks/main.yml new file mode 100644 index 000000000..d8edd7496 --- /dev/null +++ b/roles/ospd/overcloud/network-isolation/tasks/main.yml @@ -0,0 +1,23 @@ +- name: prepare the network isolation + template: + src: "isolation.yml.j2" + dest: "{{ installer.overcloud.template_base }}/isolation_params.yml" + +- name: append our params to the network isolation file + shell: "cat {{ installer.overcloud.template_base }}/isolation_params.yml >> {{ installer.overcloud.template_base }}/environments/network-isolation.yaml" + +# Until https://review.openstack.org/#/c/214372 is merged, create the three-nic-vlans +- name: create folder for three per-role nic configuration + shell: "mkdir -p {{ installer.overcloud.template_base }}/network/config/three-nic-vlans/" + +# Check for a possible BUG in ansible copy, the "mode" doesn't work as expected +- name: copy files into folder + copy: + src: "{{ item }}" + dest: "{{ installer.overcloud.template_base }}/network/config/three-nic-vlans/" + with_items: + - controller.yaml + - compute.yaml + - cinder-storage.yaml + - ceph-storage.yaml + - swift-storage.yaml diff --git a/roles/ospd/overcloud/network-isolation/templates/isolation.yml.j2 b/roles/ospd/overcloud/network-isolation/templates/isolation.yml.j2 new file mode 100644 index 000000000..6c8d06010 --- /dev/null +++ b/roles/ospd/overcloud/network-isolation/templates/isolation.yml.j2 @@ -0,0 +1,7 @@ + # NIC Configs for our roles + OS::TripleO::Compute::Net::SoftwareConfig: ../network/config/three-nic-vlans/compute.yaml + OS::TripleO::Controller::Net::SoftwareConfig: ../network/config/three-nic-vlans/controller.yaml +{% if groups['ceph'] is defined %} + OS::TripleO::CephStorage::Net::SoftwareConfig: ../network/config/three-nic-vlans/ceph-storage.yaml +{% endif %} +{{ installer.overcloud.network.template.content | to_yaml }} diff --git a/settings/installer/ospd.yml b/settings/installer/ospd.yml index 06dc2e7c4..524563dfe 100644 --- a/settings/installer/ospd.yml +++ b/settings/installer/ospd.yml @@ -21,58 +21,12 @@ installer: discovery_iprange: 172.16.0.150,172.16.0.180 overcloud: template_base: "~/my_templates" - storage: - backend: ceph - template: - file: "puppet/hieradata/ceph.yaml" - content: - ceph::profile::params::osd_journal_size: 1024 - ceph::profile::params::osd_pool_default_pg_num: 128 - ceph::profile::params::osd_pool_default_pgp_num: 128 - ceph::profile::params::osd_pool_default_size: 3 - ceph::profile::params::osd_pool_default_min_size: 1 - ceph::profile::params::osds: - '/dev/vdb': - journal: '' - ceph::profile::params::manage_repo: false - ceph::profile::params::authentication_type: cephx - - ceph_pools: - - volumes - - vms - - images - - ceph_osd_selinux_permissive: true - network: - backend: vxlan - template: - file: "advance_network.yml" - content: - InternalApiNetCidr: 172.17.1.0/24 - InternalApiAllocationPools: [{'start': '172.17.1.10', 'end': '172.17.1.200'}] - InternalApiNetworkVlanID: 101 - TenantNetCidr: 172.17.2.0/24 - TenantAllocationPools: [{'start': '172.17.2.10', 'end': '172.17.2.200'}] - TenantNetworkVlanID: 201 - StorageNetCidr: 172.17.3.0/24 - StorageAllocationPools: [{'start': '172.17.3.10', 'end': '172.17.3.200'}] - StorageNetworkVlanID: 301 - StorageMgmtNetCidr: 172.17.4.0/24 - StorageMgmtAllocationPools: [{'start': '172.17.4.10', 'end': '172.17.4.200'}] - StorageMgmtNetworkVlanID: 401 - ExternalNetCidr: 192.168.122.0/24 - ExternalAllocationPools: [{'start': '192.168.122.100', 'end': '192.168.122.129'}] - ExternalInterfaceDefaultRoute: 192.168.122.1 - ControlPlaneSubnetCidr: "24" - ControlPlaneDefaultRoute: 172.16.0.1 - EC2MetadataIp: 172.16.0.1 - DnsServers: ['192.168.122.1', '8.8.8.8'] defaults: build: latest images: import version: 7 - network: neutron + network: no-isolation job: archive: diff --git a/settings/installer/ospd/network/isolation.yml b/settings/installer/ospd/network/isolation.yml new file mode 100644 index 000000000..59d718ccb --- /dev/null +++ b/settings/installer/ospd/network/isolation.yml @@ -0,0 +1,32 @@ +--- + +installer: + overcloud: + network: + type: "isolation" + template: + file: "environments/network-isolation.yaml" + content: + parameter_defaults: + InternalApiNetCidr: 172.17.1.0/24 + InternalApiAllocationPools: [{'start': '172.17.1.10', 'end': '172.17.1.200'}] + InternalApiNetworkVlanID: 101 + TenantNetCidr: 172.17.2.0/24 + TenantAllocationPools: [{'start': '172.17.2.10', 'end': '172.17.2.200'}] + TenantNetworkVlanID: 201 + StorageNetCidr: 172.17.3.0/24 + StorageAllocationPools: [{'start': '172.17.3.10', 'end': '172.17.3.200'}] + StorageNetworkVlanID: 301 + StorageMgmtNetCidr: 172.17.4.0/24 + StorageMgmtAllocationPools: [{'start': '172.17.4.10', 'end': '172.17.4.200'}] + StorageMgmtNetworkVlanID: 401 + ExternalNetCidr: 192.168.1.0/24 + ExternalAllocationPools: [{'start': '192.168.1.101', 'end': '192.168.1.149'}] + ExternalInterfaceDefaultRoute: 192.168.1.1 + ControlPlaneSubnetCidr: "24" + ControlPlaneDefaultRoute: 172.16.0.1 + EC2MetadataIp: 172.16.0.1 + DnsServers: ['192.168.1.1', '8.8.8.8'] + +defaults: + variant: vxlan diff --git a/settings/installer/ospd/network/isolation/type/bond-with-vlan.yml b/settings/installer/ospd/network/isolation/type/bond-with-vlan.yml new file mode 100644 index 000000000..7f762c1e7 --- /dev/null +++ b/settings/installer/ospd/network/isolation/type/bond-with-vlan.yml @@ -0,0 +1,6 @@ +--- + +installer: + overcloud: + network: + type: "bond-with-vlan" diff --git a/settings/installer/ospd/network/isolation/type/single-nic-vlan.yml b/settings/installer/ospd/network/isolation/type/single-nic-vlan.yml new file mode 100644 index 000000000..cc48d100e --- /dev/null +++ b/settings/installer/ospd/network/isolation/type/single-nic-vlan.yml @@ -0,0 +1,6 @@ +--- + +installer: + overcloud: + network: + type: "single-nic-vlan" diff --git a/settings/installer/ospd/network/isolation/type/three-nic-vlan.yml b/settings/installer/ospd/network/isolation/type/three-nic-vlan.yml new file mode 100644 index 000000000..51e97b5c2 --- /dev/null +++ b/settings/installer/ospd/network/isolation/type/three-nic-vlan.yml @@ -0,0 +1,6 @@ +--- + +installer: + overcloud: + network: + type: "three-nic-vlan" diff --git a/settings/installer/ospd/network/neutron/variant/gre.yml b/settings/installer/ospd/network/isolation/variant/gre.yml similarity index 100% rename from settings/installer/ospd/network/neutron/variant/gre.yml rename to settings/installer/ospd/network/isolation/variant/gre.yml diff --git a/settings/installer/ospd/network/isolation/variant/sriov.yml b/settings/installer/ospd/network/isolation/variant/sriov.yml new file mode 100644 index 000000000..30ca32dd4 --- /dev/null +++ b/settings/installer/ospd/network/isolation/variant/sriov.yml @@ -0,0 +1,7 @@ +--- + +installer: + overcloud: + network: + backend: sriov + diff --git a/settings/installer/ospd/network/isolation/variant/vlan.yml b/settings/installer/ospd/network/isolation/variant/vlan.yml new file mode 100644 index 000000000..e71d98082 --- /dev/null +++ b/settings/installer/ospd/network/isolation/variant/vlan.yml @@ -0,0 +1,6 @@ +--- + +installer: + overcloud: + network: + backend: vlan diff --git a/settings/installer/ospd/network/neutron/variant/vxlan.yml b/settings/installer/ospd/network/isolation/variant/vxlan.yml similarity index 100% rename from settings/installer/ospd/network/neutron/variant/vxlan.yml rename to settings/installer/ospd/network/isolation/variant/vxlan.yml diff --git a/settings/installer/ospd/network/neutron.yml b/settings/installer/ospd/network/neutron.yml deleted file mode 100644 index 88d3d64c8..000000000 --- a/settings/installer/ospd/network/neutron.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- - -defaults: - variant: vxlan diff --git a/settings/installer/ospd/network/neutron/variant/sriov.yml b/settings/installer/ospd/network/neutron/variant/sriov.yml deleted file mode 100644 index d91da3550..000000000 --- a/settings/installer/ospd/network/neutron/variant/sriov.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- - -installer: - network: - variant: sriov - diff --git a/settings/installer/ospd/network/neutron/variant/vlan.yml b/settings/installer/ospd/network/neutron/variant/vlan.yml deleted file mode 100644 index 48c736529..000000000 --- a/settings/installer/ospd/network/neutron/variant/vlan.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -installer: - network: - variant: vlan diff --git a/settings/installer/ospd/network/no-isolation.yml b/settings/installer/ospd/network/no-isolation.yml new file mode 100644 index 000000000..f1f1b4d3e --- /dev/null +++ b/settings/installer/ospd/network/no-isolation.yml @@ -0,0 +1,7 @@ +--- + +installer: + overcloud: + network: + type: "no-isolation" + backend: vxlan diff --git a/settings/provisioner/virsh.yml b/settings/provisioner/virsh.yml index ef6e5ab3f..e79162e17 100644 --- a/settings/provisioner/virsh.yml +++ b/settings/provisioner/virsh.yml @@ -60,3 +60,7 @@ provisioner: - virt-viewer - libguestfs-xfs - sshpass + +defaults: + image: rhel + topology: all-in-one diff --git a/settings/provisioner/virsh/topology/ospd_3cont_2comp_3ceph.yml b/settings/provisioner/virsh/topology/ospd_3cont_2comp_3ceph.yml new file mode 100644 index 000000000..0ca750d4a --- /dev/null +++ b/settings/provisioner/virsh/topology/ospd_3cont_2comp_3ceph.yml @@ -0,0 +1,70 @@ +--- +provisioner: + nodes: + controller: &controller + name: controller + amount: 3 + cpu: !lookup provisioner.image.cpu + memory: 8192 + os: &os + type: linux + variant: !lookup provisioner.image.os.variant + disks: &disks + disk1: &disk1 + path: /var/lib/libvirt/images + dev: /dev/vda + size: 20G + network: &network_params + interfaces: &interfaces + management: &mgmt_interface + label: eth0 + data: &data_interface + label: eth1 + external: &external_interface + label: eth2 + groups: + - controller + - openstack_nodes + + compute: + <<: *controller + name: compute1 + amount: 2 + cpu: 2 + memory: 6144 + disks: + disk1: + path: /var/lib/libvirt/images + size: 20G + groups: + - compute + - openstack_nodes + + ceph: + <<: *controller + name: ceph + cpu: 2 + memory: 4096 + disks: + <<: *disks + disk2: + <<: *disk1 + dev: /dev/vdb + groups: + - ceph + - openstack_nodes + + undercloud: + <<: *controller + name: undercloud + amount: 1 + memory: !lookup provisioner.image.memory + disks: + <<: *disks + disk1: + <<: *disk1 + size: 20G + groups: + - undercloud + - openstack_nodes + From cc8f31c0bd7759b1b1aac876eb9b5331147c2871 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Mon, 1 Feb 2016 13:53:34 +0200 Subject: [PATCH 103/137] fixing the aio and multi-node for virsh + default isolation Change-Id: I610d641ab027cb7f13bbdc5b2637be7ca1b18c05 --- settings/installer/ospd/network/isolation.yml | 1 + settings/provisioner/virsh/topology/all-in-one.yml | 9 +++++---- settings/provisioner/virsh/topology/multi-node.yml | 9 +++++---- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/settings/installer/ospd/network/isolation.yml b/settings/installer/ospd/network/isolation.yml index 59d718ccb..f51892239 100644 --- a/settings/installer/ospd/network/isolation.yml +++ b/settings/installer/ospd/network/isolation.yml @@ -29,4 +29,5 @@ installer: DnsServers: ['192.168.1.1', '8.8.8.8'] defaults: + type: "three-nic-vlan" variant: vxlan diff --git a/settings/provisioner/virsh/topology/all-in-one.yml b/settings/provisioner/virsh/topology/all-in-one.yml index efdb39a9e..cfad9cb65 100644 --- a/settings/provisioner/virsh/topology/all-in-one.yml +++ b/settings/provisioner/virsh/topology/all-in-one.yml @@ -9,10 +9,11 @@ provisioner: os: type: linux variant: !lookup provisioner.image.os.variant - disk: - size: !lookup provisioner.image.disk.size - dev: /dev/vda - path: /var/lib/libvirt/images + disks: + disk1: &disk1 + path: /var/lib/libvirt/images + dev: /dev/vda + size: 20G network: &network_params interfaces: &interfaces data: &data_interface diff --git a/settings/provisioner/virsh/topology/multi-node.yml b/settings/provisioner/virsh/topology/multi-node.yml index e8db8609a..07750a293 100644 --- a/settings/provisioner/virsh/topology/multi-node.yml +++ b/settings/provisioner/virsh/topology/multi-node.yml @@ -9,10 +9,11 @@ provisioner: os: type: linux variant: !lookup provisioner.image.os.variant - disk: - size: !lookup provisioner.image.disk.size - dev: /dev/vda - path: /var/lib/libvirt/images + disks: + disk1: &disk1 + path: /var/lib/libvirt/images + dev: /dev/vda + size: 20G network: &network_params interfaces: &interfaces data: &data_interface From 8411b43092a86729930b16de7cc14095b6945a79 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Mon, 1 Feb 2016 15:39:58 +0200 Subject: [PATCH 104/137] Moving ceph into it's own role Change-Id: I8b7d3bbd24d0e96cbd902082901d80d4cffb59f0 --- playbooks/installer/ospd/overcloud/run.yml | 9 ++------- roles/ospd/overcloud/storage/ceph/tasks/main.yml | 11 +++++++++++ 2 files changed, 13 insertions(+), 7 deletions(-) create mode 100644 roles/ospd/overcloud/storage/ceph/tasks/main.yml diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml index c3d37749a..dcf02f8fe 100644 --- a/playbooks/installer/ospd/overcloud/run.yml +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -6,14 +6,9 @@ gather_facts: no roles: - {role: ospd/overcloud/network-isolation/, when: installer.overcloud.network.type == "isolation"} + - {role: ospd/overcloud/storage/ceph/, when: groups['ceph'] is defined} tasks: - - name: prepare ceph storage template - template: - src: "templates/storage/ceph.yml.j2" - dest: "{{ installer.overcloud.template_base }}/puppet/hieradata/ceph.yaml" - mode: 0755 - when: groups['ceph'] is defined - + #TODO: move this to a role for cinder/LVM - name: set a higher LVM size storage template sudo: yes lineinfile: diff --git a/roles/ospd/overcloud/storage/ceph/tasks/main.yml b/roles/ospd/overcloud/storage/ceph/tasks/main.yml new file mode 100644 index 000000000..6ae459b94 --- /dev/null +++ b/roles/ospd/overcloud/storage/ceph/tasks/main.yml @@ -0,0 +1,11 @@ +- name: prepare ceph storage template + template: + src: "templates/storage/ceph.yml.j2" + dest: "{{ installer.overcloud.template_base }}/puppet/hieradata/ceph.yaml" + mode: 0755 + +- name: workaround for BUG where service ceph is not available + service: + name: "ceph" + state: "restarted" + delegate_to: ceph From 7d14574f4574060047375226cbf98dc873d9b81a Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Mon, 1 Feb 2016 16:02:30 +0200 Subject: [PATCH 105/137] Fix ceph when statement Change-Id: I17f942a8a704fb5f182438ca57b7ae5a382b799c --- playbooks/installer/ospd/overcloud/run.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml index dcf02f8fe..afa5b3208 100644 --- a/playbooks/installer/ospd/overcloud/run.yml +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -6,7 +6,7 @@ gather_facts: no roles: - {role: ospd/overcloud/network-isolation/, when: installer.overcloud.network.type == "isolation"} - - {role: ospd/overcloud/storage/ceph/, when: groups['ceph'] is defined} + - {role: ospd/overcloud/storage/ceph/, when: provisioner.nodes.ceph is defined} tasks: #TODO: move this to a role for cinder/LVM - name: set a higher LVM size storage template From 93051a3723cd02e79d0dd77643a05520f957edc7 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Mon, 1 Feb 2016 23:03:14 +0200 Subject: [PATCH 106/137] Creating ssh forwarding post overcloud deploy Change-Id: I9bb31c7e08364a5a704f22b1dc67ceb0337c984b --- playbooks/installer/ospd/overcloud/post.yml | 26 ++++++++++++++++++- .../overcloud/templates/ssh.config.ansible.j2 | 18 +++++++++++++ playbooks/installer/ospd/post.yml | 2 ++ .../overcloud/storage/ceph/tasks/main.yml | 6 ----- 4 files changed, 45 insertions(+), 7 deletions(-) create mode 100644 playbooks/installer/ospd/overcloud/templates/ssh.config.ansible.j2 diff --git a/playbooks/installer/ospd/overcloud/post.yml b/playbooks/installer/ospd/overcloud/post.yml index f73e8a886..082f194d7 100644 --- a/playbooks/installer/ospd/overcloud/post.yml +++ b/playbooks/installer/ospd/overcloud/post.yml @@ -1,5 +1,29 @@ --- # Any step that should happen after the deployment of the overcloud playbook # This could be validation of installation, etc -- name: Post tasks +- name: Fetch key for SSH to the overcloud + hosts: undercloud + gather_facts: no + become: yes + become_user: "{{ installer.user.name }}" + tasks: + - name: fetch the private key file from the undercloud + fetch: + src: "~/.ssh/id_rsa" + dest: "{{ inventory_dir }}/id_rsa_overcloud" + flat: yes + +- name: Update the inventory file hosts: localhost + gather_facts: no + tasks: + - name: update file permissions + file: + path: "{{ inventory_dir }}/id_rsa_overcloud" + mode: 0600 + + - name: update our ansible ssh configuration file + template: + src: "templates/ssh.config.ansible.j2" + dest: "{{ inventory_dir }}/ansible.ssh.config" + mode: 0755 diff --git a/playbooks/installer/ospd/overcloud/templates/ssh.config.ansible.j2 b/playbooks/installer/ospd/overcloud/templates/ssh.config.ansible.j2 new file mode 100644 index 000000000..74495cbf8 --- /dev/null +++ b/playbooks/installer/ospd/overcloud/templates/ssh.config.ansible.j2 @@ -0,0 +1,18 @@ +{% for host in groups['all'] %} +{% if hostvars[host].get('ansible_connection', '') != 'local' and host != 'virthost' %} +Host {{ host }} + ProxyCommand ssh -i {{ provisioner.hosts.host1.ssh_key_file }} {{ provisioner.hosts.host1.ssh_user }}@{{ provisioner.hosts.host1.ssh_host }} nc %h %p + HostName {{ hostvars[host].ansible_default_ipv4.address }} +{% if host == 'undercloud' %} + User root + IdentityFile {{ inventory_dir }}/id_rsa +{% else %} + User heat-admin + IdentityFile {{ inventory_dir }}/id_rsa_overcloud +{% endif %} + StrictHostKeyChecking no + UserKnownHostsFile=/dev/null + ForwardAgent yes + +{% endif %} +{% endfor %} diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml index 02e221d1a..1583cf642 100644 --- a/playbooks/installer/ospd/post.yml +++ b/playbooks/installer/ospd/post.yml @@ -3,6 +3,7 @@ # This could be create ssh forwarding to the nodes, validation of installation, etc - name: Post tasks hosts: undercloud + gather_facts: no become: yes become_user: "{{ installer.user.name }}" tasks: @@ -15,6 +16,7 @@ #TODO: remove this when the templating system is in place - name: External network creation hosts: undercloud + gather_facts: no become: yes become_user: "{{ installer.user.name }}" tasks: diff --git a/roles/ospd/overcloud/storage/ceph/tasks/main.yml b/roles/ospd/overcloud/storage/ceph/tasks/main.yml index 6ae459b94..39dd0e5a7 100644 --- a/roles/ospd/overcloud/storage/ceph/tasks/main.yml +++ b/roles/ospd/overcloud/storage/ceph/tasks/main.yml @@ -3,9 +3,3 @@ src: "templates/storage/ceph.yml.j2" dest: "{{ installer.overcloud.template_base }}/puppet/hieradata/ceph.yaml" mode: 0755 - -- name: workaround for BUG where service ceph is not available - service: - name: "ceph" - state: "restarted" - delegate_to: ceph From b20d5875dce98c825b55efcb3153bb000c23bd2b Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Mon, 1 Feb 2016 23:24:53 +0200 Subject: [PATCH 107/137] Temporary workaround for Ceph bug Change-Id: I53807f3c720797c0a34664453911d8d92931bc95 --- playbooks/installer/ospd/post.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml index 1583cf642..d08060b34 100644 --- a/playbooks/installer/ospd/post.yml +++ b/playbooks/installer/ospd/post.yml @@ -13,6 +13,11 @@ dest: "{{ inventory_dir }}/keystonerc" flat: yes + #TODO: remove this workaround once the bug is fixed + - name: Workaround for CEPH + shell: "ssh -l heat-admin {{ hostvars['%s' % item].ansible_et0.address }} 'sudo systemctl restart ceph'" + with_items: groups['ceph'] + #TODO: remove this when the templating system is in place - name: External network creation hosts: undercloud From 82de6bda811f5e484e1383670eebf618b2e56e99 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Tue, 2 Feb 2016 09:40:45 +0200 Subject: [PATCH 108/137] Fix typo Change-Id: I0885a4dd2417ad9955d742126c973bc046ba95d1 --- playbooks/installer/ospd/post.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml index d08060b34..701102bde 100644 --- a/playbooks/installer/ospd/post.yml +++ b/playbooks/installer/ospd/post.yml @@ -15,7 +15,7 @@ #TODO: remove this workaround once the bug is fixed - name: Workaround for CEPH - shell: "ssh -l heat-admin {{ hostvars['%s' % item].ansible_et0.address }} 'sudo systemctl restart ceph'" + shell: "ssh -l heat-admin {{ hostvars['%s' % item].ansible_eth0.address }} 'sudo systemctl restart ceph'" with_items: groups['ceph'] #TODO: remove this when the templating system is in place From d91f118345468ed07395ff5da52cc4f9ecb77227 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Tue, 2 Feb 2016 11:27:40 +0200 Subject: [PATCH 109/137] Fix missing param Change-Id: I82237455dab567f26f3e4de39ee21e9f1ad0fc62 --- playbooks/installer/ospd/post.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml index 701102bde..310c3ef9c 100644 --- a/playbooks/installer/ospd/post.yml +++ b/playbooks/installer/ospd/post.yml @@ -15,7 +15,7 @@ #TODO: remove this workaround once the bug is fixed - name: Workaround for CEPH - shell: "ssh -l heat-admin {{ hostvars['%s' % item].ansible_eth0.address }} 'sudo systemctl restart ceph'" + shell: "ssh -l heat-admin {{ hostvars['%s' % item].ansible_eth0.ipv4.address }} 'sudo systemctl restart ceph'" with_items: groups['ceph'] #TODO: remove this when the templating system is in place From 2adffd454eb164d1a24e9b4182e58d5f7f39fe66 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Tue, 2 Feb 2016 13:00:48 +0200 Subject: [PATCH 110/137] Changes current ceph workaround through the undercloud rather than ansible slave Change-Id: I204cd92935eab39780e8830f0e4a8bccc94e8197 --- playbooks/installer/ospd/post.yml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml index 310c3ef9c..0031c3487 100644 --- a/playbooks/installer/ospd/post.yml +++ b/playbooks/installer/ospd/post.yml @@ -13,10 +13,17 @@ dest: "{{ inventory_dir }}/keystonerc" flat: yes + #TODO: remove this workaround once the bug is fixed + - name: Get the ceph nodes IPs + shell: "source ~/stackrc; nova list | awk '/ceph/ {print $12}' | grep -oP '[0-9.]+'" + register: ip_list + when: groups['ceph'] is defined + #TODO: remove this workaround once the bug is fixed - name: Workaround for CEPH - shell: "ssh -l heat-admin {{ hostvars['%s' % item].ansible_eth0.ipv4.address }} 'sudo systemctl restart ceph'" - with_items: groups['ceph'] + shell: "ssh -l heat-admin {{ item }} 'sudo systemctl restart ceph'" + with_items: ip_list.stdout_lines + when: groups['ceph'] is defined #TODO: remove this when the templating system is in place - name: External network creation From 0b5483866c51f71c32ae0daa0cffcf5da1feadfa Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Tue, 2 Feb 2016 14:58:57 +0200 Subject: [PATCH 111/137] Bumping up the timeout on of transition of states Change-Id: I8cb73bbfe1867e962d261f41b26043f1b304e518 --- playbooks/installer/ospd/overcloud/pre.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/playbooks/installer/ospd/overcloud/pre.yml b/playbooks/installer/ospd/overcloud/pre.yml index 2e9d7e2fb..9b3a876b9 100644 --- a/playbooks/installer/ospd/overcloud/pre.yml +++ b/playbooks/installer/ospd/overcloud/pre.yml @@ -79,6 +79,10 @@ become_user: "{{ installer.user.name }}" shell: "source ~/stackrc; openstack baremetal import --json instackenv.json" + - name: update the transition timeout value to allow operation to finish successfully + shell: "crudini --set /etc/ironic/ironic.conf conductor max_time_interval 200" + sudo: yes + - name: assign the kernel and ramdisk before introspection begins become_user: "{{ installer.user.name }}" shell: "source ~/stackrc; openstack baremetal configure boot" From 1bc2cc55f9066a1cfc2530b1fe0e044595f24965 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Tue, 2 Feb 2016 15:46:12 +0200 Subject: [PATCH 112/137] Cleanup and adjusting permission escalation Change-Id: Ic661b4b273f5d654ac56a8ff4241ec746c8a4b32 --- playbooks/installer/ospd/overcloud/pre.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/playbooks/installer/ospd/overcloud/pre.yml b/playbooks/installer/ospd/overcloud/pre.yml index 9b3a876b9..f686e4b65 100644 --- a/playbooks/installer/ospd/overcloud/pre.yml +++ b/playbooks/installer/ospd/overcloud/pre.yml @@ -76,15 +76,13 @@ shell: "python instackenv-validator.py -f ~/instackenv.json" - name: register our hosts to instack - become_user: "{{ installer.user.name }}" shell: "source ~/stackrc; openstack baremetal import --json instackenv.json" - name: update the transition timeout value to allow operation to finish successfully shell: "crudini --set /etc/ironic/ironic.conf conductor max_time_interval 200" - sudo: yes + become_user: root - name: assign the kernel and ramdisk before introspection begins - become_user: "{{ installer.user.name }}" shell: "source ~/stackrc; openstack baremetal configure boot" # In case of virthost we need to fix the pxe_ssh limitation of correctly assigning the MAC address to the iPXE script From 697aad587b22cb9a034b401e24b104b48af2cafc Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Tue, 2 Feb 2016 22:01:03 +0200 Subject: [PATCH 113/137] Add SSH config to ignore strict host check Change-Id: I7ef782c1529cc7b3a18860cd5cc8eb0263b1f1f9 --- playbooks/provisioner/virsh/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/provisioner/virsh/main.yml b/playbooks/provisioner/virsh/main.yml index b6178ebed..b1ae8646b 100644 --- a/playbooks/provisioner/virsh/main.yml +++ b/playbooks/provisioner/virsh/main.yml @@ -298,4 +298,4 @@ dest: "{{ inventory_dir }}/ansible.cfg" section: ssh_connection option: ssh_args - value: "-o ForwardAgent=yes -o ServerAliveInterval=30 -o ControlMaster=auto -o ControlPersist=30m -F {{ inventory_dir }}/ansible.ssh.config" + value: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ForwardAgent=yes -o ServerAliveInterval=30 -o ControlMaster=auto -o ControlPersist=30m -F {{ inventory_dir }}/ansible.ssh.config" From fec440802458a334ea21c551f8f17fef7fb61e8d Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Tue, 2 Feb 2016 22:34:32 +0100 Subject: [PATCH 114/137] Revert "Add SSH config to ignore strict host check" This reverts commit 93a5913913bbeed8a029fd1e0beba0bb1b49daad. Change-Id: I52f65bfb57621e65c6930c469b816c8856238497 --- playbooks/provisioner/virsh/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/provisioner/virsh/main.yml b/playbooks/provisioner/virsh/main.yml index b1ae8646b..b6178ebed 100644 --- a/playbooks/provisioner/virsh/main.yml +++ b/playbooks/provisioner/virsh/main.yml @@ -298,4 +298,4 @@ dest: "{{ inventory_dir }}/ansible.cfg" section: ssh_connection option: ssh_args - value: "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ForwardAgent=yes -o ServerAliveInterval=30 -o ControlMaster=auto -o ControlPersist=30m -F {{ inventory_dir }}/ansible.ssh.config" + value: "-o ForwardAgent=yes -o ServerAliveInterval=30 -o ControlMaster=auto -o ControlPersist=30m -F {{ inventory_dir }}/ansible.ssh.config" From 19f951f549cb9d18e00bbc5db8e13a83265413f4 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Wed, 3 Feb 2016 16:50:12 +0200 Subject: [PATCH 115/137] Add StrictHostKeyChecking=no to ceph workaround Change-Id: Ifec4b434b0783e66b74769b0d7b0ac91d59e5095 --- playbooks/installer/ospd/post.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml index 0031c3487..3231d5116 100644 --- a/playbooks/installer/ospd/post.yml +++ b/playbooks/installer/ospd/post.yml @@ -13,15 +13,15 @@ dest: "{{ inventory_dir }}/keystonerc" flat: yes - #TODO: remove this workaround once the bug is fixed + #TODO: remove this workaround once RHBZ1304367 is fixed - name: Get the ceph nodes IPs shell: "source ~/stackrc; nova list | awk '/ceph/ {print $12}' | grep -oP '[0-9.]+'" register: ip_list when: groups['ceph'] is defined - #TODO: remove this workaround once the bug is fixed + #TODO: remove this workaround once RHBZ1304367 is fixed - name: Workaround for CEPH - shell: "ssh -l heat-admin {{ item }} 'sudo systemctl restart ceph'" + shell: "ssh -o StrictHostKeyChecking=no -l heat-admin {{ item }} 'sudo systemctl restart ceph'" with_items: ip_list.stdout_lines when: groups['ceph'] is defined From a211996f49fbd5da84a11193ba4e6bb1e53bec6d Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Thu, 4 Feb 2016 10:11:58 +0200 Subject: [PATCH 116/137] Add SSL support Change-Id: Ie7a0caee32a99d4ffcb336e376b9f2cd52e35e8e --- playbooks/installer/ospd/overcloud/run.yml | 3 +- .../templates/overcloud_deploy.sh.j2 | 4 +++ playbooks/installer/ospd/post.yml | 18 ++++++++--- .../isolation}/files/ceph-storage.yaml | 0 .../isolation}/files/cinder-storage.yaml | 0 .../isolation}/files/compute.yaml | 0 .../isolation}/files/controller.yaml | 0 .../isolation}/files/swift-storage.yaml | 0 .../isolation}/tasks/main.yml | 0 .../isolation}/templates/isolation.yml.j2 | 0 roles/ospd/overcloud/ssl/tasks/main.yml | 32 +++++++++++++++++++ settings/installer/ospd.yml | 6 ++++ settings/installer/ospd/ssl/no.yml | 5 +++ settings/installer/ospd/ssl/yes.yml | 5 +++ 14 files changed, 68 insertions(+), 5 deletions(-) rename roles/ospd/overcloud/{network-isolation => network/isolation}/files/ceph-storage.yaml (100%) rename roles/ospd/overcloud/{network-isolation => network/isolation}/files/cinder-storage.yaml (100%) rename roles/ospd/overcloud/{network-isolation => network/isolation}/files/compute.yaml (100%) rename roles/ospd/overcloud/{network-isolation => network/isolation}/files/controller.yaml (100%) rename roles/ospd/overcloud/{network-isolation => network/isolation}/files/swift-storage.yaml (100%) rename roles/ospd/overcloud/{network-isolation => network/isolation}/tasks/main.yml (100%) rename roles/ospd/overcloud/{network-isolation => network/isolation}/templates/isolation.yml.j2 (100%) create mode 100644 roles/ospd/overcloud/ssl/tasks/main.yml create mode 100644 settings/installer/ospd/ssl/no.yml create mode 100644 settings/installer/ospd/ssl/yes.yml diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml index afa5b3208..93a938562 100644 --- a/playbooks/installer/ospd/overcloud/run.yml +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -5,7 +5,8 @@ become_user: "{{ installer.user.name }}" gather_facts: no roles: - - {role: ospd/overcloud/network-isolation/, when: installer.overcloud.network.type == "isolation"} + - {role: ospd/overcloud/network/isolation/, when: installer.overcloud.network.type == "isolation"} + - {role: ospd/overcloud/ssl/, when: installer.overcloud.use_ssl == "yes"} - {role: ospd/overcloud/storage/ceph/, when: provisioner.nodes.ceph is defined} tasks: #TODO: move this to a role for cinder/LVM diff --git a/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 b/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 index f73c3b38d..6435b72ef 100644 --- a/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 +++ b/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 @@ -22,5 +22,9 @@ openstack overcloud deploy --debug \ {% endif %} {% if installer.overcloud.network.type == "isolation" %} -e {{ installer.overcloud.template_base }}/{{ installer.overcloud.network.template.file }} \ +{% endif %} +{% if installer.overcloud.use_ssl == "yes" %} + -e {{ installer.overcloud.template_base }}/environments/enable-tls.yaml \ + -e {{ installer.overcloud.template_base }}/environments/inject-trust-anchor.yaml \ {% endif %} --log-file overcloud_deployment_{{ 100 | random }}.log diff --git a/playbooks/installer/ospd/post.yml b/playbooks/installer/ospd/post.yml index 3231d5116..85067e832 100644 --- a/playbooks/installer/ospd/post.yml +++ b/playbooks/installer/ospd/post.yml @@ -13,17 +13,27 @@ dest: "{{ inventory_dir }}/keystonerc" flat: yes - #TODO: remove this workaround once RHBZ1304367 is fixed +- name: Check if RHBZ1304367 is enabled + hosts: undercloud + gather_facts: no + sudo: no + tasks: + - group_by: key=workaround_rhbz1304367 + when: workarounds.rhbz1304367 is defined and groups['ceph'] is defined + +- name: "Workaround RHBZ1304367: overcloud deployment finished successfully and Ceph's OSDs are down" + hosts: workaround_rhbz1304367 + gather_facts: no + become: yes + become_user: "{{ installer.user.name }}" + tasks: - name: Get the ceph nodes IPs shell: "source ~/stackrc; nova list | awk '/ceph/ {print $12}' | grep -oP '[0-9.]+'" register: ip_list - when: groups['ceph'] is defined - #TODO: remove this workaround once RHBZ1304367 is fixed - name: Workaround for CEPH shell: "ssh -o StrictHostKeyChecking=no -l heat-admin {{ item }} 'sudo systemctl restart ceph'" with_items: ip_list.stdout_lines - when: groups['ceph'] is defined #TODO: remove this when the templating system is in place - name: External network creation diff --git a/roles/ospd/overcloud/network-isolation/files/ceph-storage.yaml b/roles/ospd/overcloud/network/isolation/files/ceph-storage.yaml similarity index 100% rename from roles/ospd/overcloud/network-isolation/files/ceph-storage.yaml rename to roles/ospd/overcloud/network/isolation/files/ceph-storage.yaml diff --git a/roles/ospd/overcloud/network-isolation/files/cinder-storage.yaml b/roles/ospd/overcloud/network/isolation/files/cinder-storage.yaml similarity index 100% rename from roles/ospd/overcloud/network-isolation/files/cinder-storage.yaml rename to roles/ospd/overcloud/network/isolation/files/cinder-storage.yaml diff --git a/roles/ospd/overcloud/network-isolation/files/compute.yaml b/roles/ospd/overcloud/network/isolation/files/compute.yaml similarity index 100% rename from roles/ospd/overcloud/network-isolation/files/compute.yaml rename to roles/ospd/overcloud/network/isolation/files/compute.yaml diff --git a/roles/ospd/overcloud/network-isolation/files/controller.yaml b/roles/ospd/overcloud/network/isolation/files/controller.yaml similarity index 100% rename from roles/ospd/overcloud/network-isolation/files/controller.yaml rename to roles/ospd/overcloud/network/isolation/files/controller.yaml diff --git a/roles/ospd/overcloud/network-isolation/files/swift-storage.yaml b/roles/ospd/overcloud/network/isolation/files/swift-storage.yaml similarity index 100% rename from roles/ospd/overcloud/network-isolation/files/swift-storage.yaml rename to roles/ospd/overcloud/network/isolation/files/swift-storage.yaml diff --git a/roles/ospd/overcloud/network-isolation/tasks/main.yml b/roles/ospd/overcloud/network/isolation/tasks/main.yml similarity index 100% rename from roles/ospd/overcloud/network-isolation/tasks/main.yml rename to roles/ospd/overcloud/network/isolation/tasks/main.yml diff --git a/roles/ospd/overcloud/network-isolation/templates/isolation.yml.j2 b/roles/ospd/overcloud/network/isolation/templates/isolation.yml.j2 similarity index 100% rename from roles/ospd/overcloud/network-isolation/templates/isolation.yml.j2 rename to roles/ospd/overcloud/network/isolation/templates/isolation.yml.j2 diff --git a/roles/ospd/overcloud/ssl/tasks/main.yml b/roles/ospd/overcloud/ssl/tasks/main.yml new file mode 100644 index 000000000..be637c0da --- /dev/null +++ b/roles/ospd/overcloud/ssl/tasks/main.yml @@ -0,0 +1,32 @@ +- name: create the self signed SSL + shell: "openssl genrsa -out {{ installer.overcloud.template_base }}/overcloud-privkey.pem 2048" + +- name: create the self signed CA certificate + shell: "openssl req -new -x509 -key {{ installer.overcloud.template_base }}/overcloud-privkey.pem -out {{ installer.overcloud.template_base }}/overcloud-cacert.pem -days 365 -subj '/C=US/ST=NC/L=Raleigh/O=Red HAt/OU=QE/CN={{ installer.overcloud.network.template.content.parameter_defaults.ExternalAllocationPools[0].start }}'" + +- name: update the tls template + shell: "sed -i 's/CLOUDNAME/IP_ADDRESS/' {{ installer.overcloud.template_base }}/environments/enable-tls.yaml" + +- name: copy the self signed CA certification to our trusted store + shell: "sudo cp {{ installer.overcloud.template_base }}/overcloud-cacert.pem /etc/pki/ca-trust/source/anchors/" + +- name: update our CA store to reflect our addition + shell: "sudo update-ca-trust extract" + +- name: register the CA certificate + shell: "cat {{ installer.overcloud.template_base }}/overcloud-cacert.pem" + register: cacert + +- name: register the private key + shell: "cat {{ installer.overcloud.template_base }}/overcloud-privkey.pem" + register: private_key + +- name: insert the content of the CA certificate and private key respectfully + replace: + dest: "{{ item.dest }}" + regexp: "{{ item.regex }}" + replace: "{{ item.replace_with }}" + with_items: + - {dest: "{{ installer.overcloud.template_base }}/environments/enable-tls.yaml", regex: "(SSLCertificate:.*\\s+)The contents of your certificate.*$", replace_with: "\\1{{ cacert.stdout | indent(4) | string }}"} + - {dest: "{{ installer.overcloud.template_base }}/environments/enable-tls.yaml", regex: "(SSLKey:.*\\s+)The contents of the private key.*$", replace_with: "\\1{{ private_key.stdout | indent(4) | string }}"} + - {dest: "{{ installer.overcloud.template_base }}/environments/inject-trust-anchor.yaml", regex: "(SSLRootCertificate:.*\\s+)The contents of your root CA.*$", replace_with: "\\1{{ cacert.stdout | indent(4) | string }}"} diff --git a/settings/installer/ospd.yml b/settings/installer/ospd.yml index 524563dfe..e641b301a 100644 --- a/settings/installer/ospd.yml +++ b/settings/installer/ospd.yml @@ -27,6 +27,7 @@ defaults: images: import version: 7 network: no-isolation + ssl: no job: archive: @@ -38,3 +39,8 @@ job: - /home/stack/deploy-overcloudrc - /home/stack/network-environment.yaml - /usr/share/openstack-tripleo-heat-templates + +workaournds: + rhbz1304367: + desc: "Workaround RHBZ1304367: overcloud deployment finished successfully and Ceph's OSDs are down" + enabled: True diff --git a/settings/installer/ospd/ssl/no.yml b/settings/installer/ospd/ssl/no.yml new file mode 100644 index 000000000..4fc6cf770 --- /dev/null +++ b/settings/installer/ospd/ssl/no.yml @@ -0,0 +1,5 @@ +--- + +installer: + overcloud: + use_ssl: "no" diff --git a/settings/installer/ospd/ssl/yes.yml b/settings/installer/ospd/ssl/yes.yml new file mode 100644 index 000000000..1f95810c2 --- /dev/null +++ b/settings/installer/ospd/ssl/yes.yml @@ -0,0 +1,5 @@ +--- + +installer: + overcloud: + use_ssl: "yes" From 87bb8fc5a272bab80b1a6324b20562968ba91640 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Fri, 5 Feb 2016 20:10:47 +0200 Subject: [PATCH 117/137] Adjusting SSL invocation Change-Id: I1661bb70f5aa117ae0e40504518794328a2e6721 --- playbooks/installer/ospd/overcloud/run.yml | 3 ++- .../installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 | 2 +- settings/installer/ospd/network/isolation.yml | 3 ++- .../installer/ospd/network/isolation/type/bond-with-vlan.yml | 4 +++- .../installer/ospd/network/isolation/type/single-nic-vlan.yml | 4 +++- .../installer/ospd/network/isolation/type/three-nic-vlan.yml | 4 +++- settings/installer/ospd/network/no-isolation.yml | 3 ++- 7 files changed, 16 insertions(+), 7 deletions(-) diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml index 93a938562..5bfdf73ff 100644 --- a/playbooks/installer/ospd/overcloud/run.yml +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -5,7 +5,8 @@ become_user: "{{ installer.user.name }}" gather_facts: no roles: - - {role: ospd/overcloud/network/isolation/, when: installer.overcloud.network.type == "isolation"} + - {role: ospd/overcloud/network/isolation/, when: installer.overcloud.network.isolation.enable == "yes"} + - {role: ospd/overcloud/network/ipv6/, when: installer.overcloud.network.ipv6 == "yes"} - {role: ospd/overcloud/ssl/, when: installer.overcloud.use_ssl == "yes"} - {role: ospd/overcloud/storage/ceph/, when: provisioner.nodes.ceph is defined} tasks: diff --git a/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 b/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 index 6435b72ef..63dd38279 100644 --- a/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 +++ b/playbooks/installer/ospd/overcloud/templates/overcloud_deploy.sh.j2 @@ -20,7 +20,7 @@ openstack overcloud deploy --debug \ --ceph-storage-flavor ceph \ -e {{ installer.overcloud.template_base }}/environments/storage-environment.yaml \ {% endif %} -{% if installer.overcloud.network.type == "isolation" %} +{% if installer.overcloud.network.isolation.enable == "yes" %} -e {{ installer.overcloud.template_base }}/{{ installer.overcloud.network.template.file }} \ {% endif %} {% if installer.overcloud.use_ssl == "yes" %} diff --git a/settings/installer/ospd/network/isolation.yml b/settings/installer/ospd/network/isolation.yml index f51892239..a9ac4e245 100644 --- a/settings/installer/ospd/network/isolation.yml +++ b/settings/installer/ospd/network/isolation.yml @@ -3,7 +3,8 @@ installer: overcloud: network: - type: "isolation" + isolation: + enable: "yes" template: file: "environments/network-isolation.yaml" content: diff --git a/settings/installer/ospd/network/isolation/type/bond-with-vlan.yml b/settings/installer/ospd/network/isolation/type/bond-with-vlan.yml index 7f762c1e7..600f5a00a 100644 --- a/settings/installer/ospd/network/isolation/type/bond-with-vlan.yml +++ b/settings/installer/ospd/network/isolation/type/bond-with-vlan.yml @@ -3,4 +3,6 @@ installer: overcloud: network: - type: "bond-with-vlan" + isolation: + enable: "yes" + type: "bond-with-vlan" diff --git a/settings/installer/ospd/network/isolation/type/single-nic-vlan.yml b/settings/installer/ospd/network/isolation/type/single-nic-vlan.yml index cc48d100e..a566d8318 100644 --- a/settings/installer/ospd/network/isolation/type/single-nic-vlan.yml +++ b/settings/installer/ospd/network/isolation/type/single-nic-vlan.yml @@ -3,4 +3,6 @@ installer: overcloud: network: - type: "single-nic-vlan" + isolation: + enable: "yes" + type: "single-nic-vlan" diff --git a/settings/installer/ospd/network/isolation/type/three-nic-vlan.yml b/settings/installer/ospd/network/isolation/type/three-nic-vlan.yml index 51e97b5c2..7244a4481 100644 --- a/settings/installer/ospd/network/isolation/type/three-nic-vlan.yml +++ b/settings/installer/ospd/network/isolation/type/three-nic-vlan.yml @@ -3,4 +3,6 @@ installer: overcloud: network: - type: "three-nic-vlan" + isolation: + enable: "yes" + type: "three-nic-vlan" diff --git a/settings/installer/ospd/network/no-isolation.yml b/settings/installer/ospd/network/no-isolation.yml index f1f1b4d3e..ed1d18aef 100644 --- a/settings/installer/ospd/network/no-isolation.yml +++ b/settings/installer/ospd/network/no-isolation.yml @@ -3,5 +3,6 @@ installer: overcloud: network: - type: "no-isolation" + isolation: + enable: "no" backend: vxlan From 070e46f37deeb39732309457c86c6f501a3d8c84 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Fri, 5 Feb 2016 20:33:15 +0200 Subject: [PATCH 118/137] Remove leftover ipv6 Change-Id: I6e4ce81d232b2c7aeadd1904085a8ff1cf5433cf --- playbooks/installer/ospd/overcloud/run.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml index 5bfdf73ff..cdcb9c5c2 100644 --- a/playbooks/installer/ospd/overcloud/run.yml +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -6,7 +6,6 @@ gather_facts: no roles: - {role: ospd/overcloud/network/isolation/, when: installer.overcloud.network.isolation.enable == "yes"} - - {role: ospd/overcloud/network/ipv6/, when: installer.overcloud.network.ipv6 == "yes"} - {role: ospd/overcloud/ssl/, when: installer.overcloud.use_ssl == "yes"} - {role: ospd/overcloud/storage/ceph/, when: provisioner.nodes.ceph is defined} tasks: From 439352da0bb9dd53628cc5784f90d940ebe3f4c7 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Fri, 5 Feb 2016 20:40:34 +0200 Subject: [PATCH 119/137] Add a place holder for ipv6 Change-Id: I9735880a3c52b5987aae2a9573135d5be5018f14 --- playbooks/installer/ospd/overcloud/run.yml | 1 + .../overcloud/network/ipv6/tasks/main.yml | 0 settings/installer/ospd/network/ipv6/no.yml | 1 + settings/installer/ospd/network/ipv6/yes.yml | 49 +++++++++++++++++++ 4 files changed, 51 insertions(+) create mode 100644 roles/ospd/overcloud/network/ipv6/tasks/main.yml create mode 100644 settings/installer/ospd/network/ipv6/no.yml create mode 100644 settings/installer/ospd/network/ipv6/yes.yml diff --git a/playbooks/installer/ospd/overcloud/run.yml b/playbooks/installer/ospd/overcloud/run.yml index cdcb9c5c2..5bfdf73ff 100644 --- a/playbooks/installer/ospd/overcloud/run.yml +++ b/playbooks/installer/ospd/overcloud/run.yml @@ -6,6 +6,7 @@ gather_facts: no roles: - {role: ospd/overcloud/network/isolation/, when: installer.overcloud.network.isolation.enable == "yes"} + - {role: ospd/overcloud/network/ipv6/, when: installer.overcloud.network.ipv6 == "yes"} - {role: ospd/overcloud/ssl/, when: installer.overcloud.use_ssl == "yes"} - {role: ospd/overcloud/storage/ceph/, when: provisioner.nodes.ceph is defined} tasks: diff --git a/roles/ospd/overcloud/network/ipv6/tasks/main.yml b/roles/ospd/overcloud/network/ipv6/tasks/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/settings/installer/ospd/network/ipv6/no.yml b/settings/installer/ospd/network/ipv6/no.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/settings/installer/ospd/network/ipv6/no.yml @@ -0,0 +1 @@ +--- diff --git a/settings/installer/ospd/network/ipv6/yes.yml b/settings/installer/ospd/network/ipv6/yes.yml new file mode 100644 index 000000000..7becc84ef --- /dev/null +++ b/settings/installer/ospd/network/ipv6/yes.yml @@ -0,0 +1,49 @@ +--- + +installer: + overcloud: + protocol: + type: "ipv6" + template: + file: "" + network: + type: "isolation" + template: + file: "environments/network-isolation.yaml" + content: +resource_registry: + OS::TripleO::BlockStorage::Net::SoftwareConfig: /home/stack/nic-configs/cinder-storage.yaml + OS::TripleO::Compute::Net::SoftwareConfig: /home/stack/nic-configs/compute.yaml + OS::TripleO::Controller::Net::SoftwareConfig: /home/stack/nic-configs/controller.yaml + OS::TripleO::ObjectStorage::Net::SoftwareConfig: /home/stack/nic-configs/swift-storage.yaml + OS::TripleO::CephStorage::Net::SoftwareConfig: /home/stack/nic-configs/ceph-storage.yaml + +parameters: + CloudName: rxtx.ro + NeutronExternalNetworkBridge: "''" + controllerExtraConfig: + 'neutron::agents::dhcp::enable_isolated_metadata': 'True' + +parameter_defaults: + DnsServers: ["10.16.36.29","10.11.5.19"] # Customize for your env + ControlPlaneSubnetCidr: "24" + EC2MetadataIp: 192.168.0.1 # Default for virt-env, customize if need be + ControlPlaneDefaultRoute: 192.168.0.1 # Default for virt-env, customize if need be + + ExternalInterfaceDefaultRoute: 2620:52:0:13b8::fe + ExternalNetCidr: '2620:52:0:13b8::/64' + ExternalAllocationPools: [{'start': '2620:52:0:13b8:5054:ff:fe3e:1', 'end': '2620:52:0:13b8:5054:ff:fe3e:9'}] + + TenantNetCidr: 'fd00:fd00:fd00:5000::/64' + TenantAllocationPools: [{'start': 'fd00:fd00:fd00:5000::10', 'end': 'fd00:fd00:fd00:5000:ffff:ffff:ffff:fffe'}] + + InternalApiNetCidr: 'fd00:fd00:fd00:2000::/64' + InternalApiAllocationPools: [{'start': 'fd00:fd00:fd00:2000::10', 'end': 'fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe'}] + + StorageNetCidr: 'fd00:fd00:fd00:3000::/64' + StorageAllocationPools: [{'start': 'fd00:fd00:fd00:3000::10', 'end': 'fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe'}] + + StorageMgmtNetCidr: 'fd00:fd00:fd00:4000::/64' + StorageMgmtAllocationPools: [{'start': 'fd00:fd00:fd00:4000::10', 'end': 'fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe'}] + StorageNetworkVlanID: 183 + StorageMgmtNetworkVlanID: 103 From 39930e521fea6b72f42a334804d9861f15715040 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Sat, 6 Feb 2016 15:16:27 +0200 Subject: [PATCH 120/137] Set default SSL as string Change-Id: I69c0470c3c1eb869460f67bceed29ca223d26fbf --- settings/installer/ospd.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/settings/installer/ospd.yml b/settings/installer/ospd.yml index e641b301a..3d0ba0c59 100644 --- a/settings/installer/ospd.yml +++ b/settings/installer/ospd.yml @@ -27,7 +27,7 @@ defaults: images: import version: 7 network: no-isolation - ssl: no + ssl: "no" job: archive: From 3ab91db36960c9796a576dfafd7599fb6e859f36 Mon Sep 17 00:00:00 2001 From: Tal Kammer Date: Sat, 6 Feb 2016 16:36:33 +0200 Subject: [PATCH 121/137] Fix typo in ceph workaround Change-Id: I80dfced5a144f212e8addd806d6f56d73adbf4d8 --- settings/installer/ospd.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/settings/installer/ospd.yml b/settings/installer/ospd.yml index 3d0ba0c59..d7e63ddd7 100644 --- a/settings/installer/ospd.yml +++ b/settings/installer/ospd.yml @@ -40,7 +40,7 @@ job: - /home/stack/network-environment.yaml - /usr/share/openstack-tripleo-heat-templates -workaournds: +workarounds: rhbz1304367: - desc: "Workaround RHBZ1304367: overcloud deployment finished successfully and Ceph's OSDs are down" + desc: "Overcloud deployment finished successfully and Ceph's OSDs are down" enabled: True From 3b32d4744e36544c33d9d424be3e3d188fdf2e47 Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Sun, 24 Jan 2016 12:26:07 +0200 Subject: [PATCH 122/137] Initial push of new KCLI Change-Id: I9bcad18c94c4c97612e850dbb7ecd21ab6cebd3a --- tools/kcli/.gitignore | 6 - tools/kcli/README.rst | 41 -- tools/kcli/conf.py | 17 + tools/kcli/execute/__init__.py | 4 + tools/kcli/{kcli => execute}/core.py | 13 +- tools/kcli/{kcli => execute}/execute.py | 45 +-- tools/kcli/kcli.cfg | 19 + tools/kcli/kcli.py | 478 ++++++++++++++++++++++++ tools/kcli/kcli/__init__.py | 4 - tools/kcli/logger.py | 29 ++ tools/kcli/parse.py | 82 ++++ tools/kcli/settings.py | 18 + tools/kcli/setup.py | 20 - tools/kcli/yamls.py | 91 +++++ 14 files changed, 758 insertions(+), 109 deletions(-) delete mode 100644 tools/kcli/.gitignore delete mode 100644 tools/kcli/README.rst create mode 100644 tools/kcli/conf.py create mode 100644 tools/kcli/execute/__init__.py rename tools/kcli/{kcli => execute}/core.py (67%) rename tools/kcli/{kcli => execute}/execute.py (75%) create mode 100644 tools/kcli/kcli.cfg create mode 100755 tools/kcli/kcli.py delete mode 100644 tools/kcli/kcli/__init__.py create mode 100644 tools/kcli/logger.py create mode 100644 tools/kcli/parse.py create mode 100644 tools/kcli/settings.py delete mode 100644 tools/kcli/setup.py create mode 100644 tools/kcli/yamls.py diff --git a/tools/kcli/.gitignore b/tools/kcli/.gitignore deleted file mode 100644 index 835d80cba..000000000 --- a/tools/kcli/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -build/ -dist/ -venv/ -.tox/ -*.egg-info/ -*.pyc diff --git a/tools/kcli/README.rst b/tools/kcli/README.rst deleted file mode 100644 index 5558bee9e..000000000 --- a/tools/kcli/README.rst +++ /dev/null @@ -1,41 +0,0 @@ -======================== -kcli - Khaleesi CLI tool -======================== - -``kcli`` is intended to reduce Khaleesi users' dependency on external CLI tools. - -Setup -===== - -.. Note:: Khaleesi is based on ansible so for setup to work, ``kcli`` requires - ansible installed:: - - $ pip install ansible - -from khaleesi directory. :: - - $ cd tools/kcli - $ python setup.py install # do this in the ``kcli`` directory - -Running kcli -============ - -**Assumes** that ``kcli`` is installed, else follow Setup_. - -You can get general usage information with the ``--help`` option:: - - kcli --help - -This displays options you can pass to ``kcli``. - -KCLI execute -============ - -.. Note:: This is a wrapper for the ``ansible-playbook`` command. In - verbose mode, the equivalent anisble command will be printed. - -Executes pre-configured ansible-playbooks, with given settings YAML file -generated by ksgen. if no settings file is defined, will look for the -default name ``ksgen_settings.yml``:: - - kcli [-vvvv] [--settings SETTINGS] execute [-i INVENTORY] [--provision] [--install] [--test] [--collect-logs] [--cleanup] diff --git a/tools/kcli/conf.py b/tools/kcli/conf.py new file mode 100644 index 000000000..4556727fe --- /dev/null +++ b/tools/kcli/conf.py @@ -0,0 +1,17 @@ +import ConfigParser +import sys + +# KCLI_CONF_FILE = '/etc/khaleesi/kcli.cfg' +KCLI_CONF_FILE = 'kcli.cfg' + +config = ConfigParser.ConfigParser(allow_no_value=True) +try: + with open(KCLI_CONF_FILE) as conf: + config.readfp(conf) +except IOError: + print "ERROR: kcli conf file (%s) not found" % KCLI_CONF_FILE + sys.exit(1) + +for dir_path in config.options('DEFAULTS'): + globals()[dir_path.upper()] = config.get('DEFAULTS', dir_path) + diff --git a/tools/kcli/execute/__init__.py b/tools/kcli/execute/__init__.py new file mode 100644 index 000000000..253173824 --- /dev/null +++ b/tools/kcli/execute/__init__.py @@ -0,0 +1,4 @@ +import core +import execute + +__VERSION__ = '0.2.0' diff --git a/tools/kcli/kcli/core.py b/tools/kcli/execute/core.py similarity index 67% rename from tools/kcli/kcli/core.py rename to tools/kcli/execute/core.py index f9d5e42be..e5c0986ba 100644 --- a/tools/kcli/kcli/core.py +++ b/tools/kcli/execute/core.py @@ -2,16 +2,13 @@ import argparse import os import sys +import conf -KHALEESI_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), - "..", "..", "..")) -PATH_TO_PLAYBOOKS = os.path.join(KHALEESI_DIR, "playbooks") -assert "playbooks" == os.path.basename(PATH_TO_PLAYBOOKS), \ +assert "playbooks" == os.path.basename(conf.PLAYBOOKS_DIR), \ "Bad path to playbooks" -VERBOSITY = 0 -KSGEN_SETTINGS_YML = "ksgen_settings.yml" +VERBOSITY = 0 def file_exists(prs, filename): if not os.path.exists(filename): @@ -29,10 +26,10 @@ def main(): help="verbose mode (-vvv for more," " -vvvv to enable connection debugging)") parser.add_argument("--settings", - default=KSGEN_SETTINGS_YML, + default=conf.KCLI_SETTINGS_YML, type=lambda x: file_exists(parser, x), help="settings file to use. default: %s" - % KSGEN_SETTINGS_YML) + % conf.KCLI_SETTINGS_YML) subparsers = parser.add_subparsers(metavar="COMMAND") diff --git a/tools/kcli/kcli/execute.py b/tools/kcli/execute/execute.py similarity index 75% rename from tools/kcli/kcli/execute.py rename to tools/kcli/execute/execute.py index a26c1e480..0d5c0a03d 100644 --- a/tools/kcli/kcli/execute.py +++ b/tools/kcli/execute/execute.py @@ -5,8 +5,8 @@ import ansible.inventory import ansible.utils -from kcli import core - +import conf +import core HOSTS_FILE = "hosts" LOCAL_HOSTS = "local_hosts" @@ -44,7 +44,7 @@ def execute_ansible(playbook, args): hosts = args.inventory or (LOCAL_HOSTS if playbook == PROVISION else HOSTS_FILE) playbook = playbook.replace("-", "_") + ".yml" - path_to_playbook = path.join(core.PATH_TO_PLAYBOOKS, playbook) + path_to_playbook = path.join(conf.PLAYBOOKS_DIR, playbook) # From ansible-playbook: stats = callbacks.AggregateStats() @@ -60,6 +60,9 @@ def execute_ansible(playbook, args): verbose=ansible.utils.VERBOSITY ) + module_path = None if not hasattr(conf, 'MODULES_DIR') else \ + conf.MODULES_DIR + pb = ansible.playbook.PlayBook( # From ansible-playbook: playbook=path_to_playbook, @@ -68,6 +71,7 @@ def execute_ansible(playbook, args): callbacks=playbook_cb, runner_callbacks=runner_cb, stats=stats, + module_path=module_path ) failed_hosts = [] @@ -75,6 +79,8 @@ def execute_ansible(playbook, args): if args.verbose: ansible_cmd = ["ansible-playbook"] + if module_path: + ansible_cmd.append("-M " + module_path) ansible_cmd.append("-" + "v" * args.verbose) ansible_cmd.append("-i " + hosts) ansible_cmd.append("--extra-vars @" + args.settings) @@ -111,8 +117,8 @@ def execute_ansible(playbook, args): colorize('changed', t['changed'], 'yellow'), colorize('unreachable', t['unreachable'], 'red'), colorize('failed', t['failures'], 'red')), - screen_only=True - ) + screen_only=True + ) callbacks.display("%s : %s %s %s %s" % ( hostcolor(h, t, False), @@ -120,8 +126,8 @@ def execute_ansible(playbook, args): colorize('changed', t['changed'], None), colorize('unreachable', t['unreachable'], None), colorize('failed', t['failures'], None)), - log_only=True - ) + log_only=True + ) print "" if len(failed_hosts) > 0: @@ -141,27 +147,6 @@ def ansible_wrapper(args): try: execute_ansible(playbook, args) except Exception: + if args.verbose: + raise raise Exception("Playbook %s failed!" % playbook) - - -execute_parser = core.subparsers.add_parser('execute', - help="execute playbooks") -execute_parser.add_argument('-i', '--inventory', - default=None, - type=lambda x: core.file_exists(execute_parser, x), - help="Inventory file to use. " - "Default: {lcl}. " - "NOTE: to reuse old environment use {" - "host}". - format(lcl=LOCAL_HOSTS, host=HOSTS_FILE)) -execute_parser.add_argument("--provision", action="store_true", - help="provision fresh nodes from server") -execute_parser.add_argument("--install", action="store_true", - help="install Openstack on nodes") -execute_parser.add_argument("--test", action="store_true", - help="execute tests") -execute_parser.add_argument("--collect-logs", action="store_true", - help="Pull logs from nodes") -execute_parser.add_argument("--cleanup", action="store_true", - help="cleanup nodes") -execute_parser.set_defaults(func=ansible_wrapper) diff --git a/tools/kcli/kcli.cfg b/tools/kcli/kcli.cfg new file mode 100644 index 000000000..eae881d2f --- /dev/null +++ b/tools/kcli/kcli.cfg @@ -0,0 +1,19 @@ +[DEFAULTS] +KHALEESI_DIR = /home/aopincar/Git/Repos/khaleesi +SETTINGS_DIR = %(KHALEESI_DIR)s/settings +MODULES_DIR = %(KHALEESI_DIR)s/library +ROLES_DIR = %(KHALEESI_DIR)s/roles +PLAYBOOKS_DIR = %(KHALEESI_DIR)s/playbooks +KCLI_SETTINGS_YML = kcli_settings.yml + +[ROOT_OPTS] +provisioner +installer +tester +distro +product + +[AUTO_EXEC_OPTS] +provision +install +test diff --git a/tools/kcli/kcli.py b/tools/kcli/kcli.py new file mode 100755 index 000000000..12469c013 --- /dev/null +++ b/tools/kcli/kcli.py @@ -0,0 +1,478 @@ +#!/usr/bin/env python + +import os +import sys +import re +import yaml +import yamls +import conf +from configure import Configuration +from logger import get_logger +from logging import WARNING, INFO, DEBUG +from parse import create_parser +from execute.execute import PLAYBOOKS + +SETTING_FILE_EXT = ".yml" +kcli_conf = conf.config +logger = None + +# Representer for Configuration object +yaml.SafeDumper.add_representer( + Configuration, + lambda dumper, value: + yaml.representer.BaseRepresenter.represent_mapping + (dumper, u'tag:yaml.org,2002:map', value)) + + +def dict_lookup(dic, key, *keys): + if logger: + calling_method_name = sys._getframe().f_back.f_code.co_name + current_method_name = sys._getframe().f_code.co_name + if current_method_name != calling_method_name: + full_key = list(keys) + full_key.insert(0, key) + logger.debug("looking up the value of \"%s\"" % ".".join(full_key)) + try: + if key not in dic: + if isinstance(key, str) and key.isdigit(): + key = int(key) + elif isinstance(key, int): + key = str(key) + if keys: + return dict_lookup(dic.get(key, {}), *keys) + value = dic[key] + if logger: + logger.debug("value has been found: \"%s\"" % value) + return value + except KeyError: + err_msg = "Key \"%s\" not found in %s" % (key, dic) + if logger: + logger.error(err_msg) + else: + print "ERROR:\t", err_msg + sys.exit(1) + + +def dict_insert(dic, val, key, *keys): + if not keys: + dic[key] = val + return + + if key not in dic: + dic[key] = {} + + dict_insert(dic[key], val, *keys) + + +def validate_settings_dir(settings_dir=None): + """ + Checks & returns the full path to the settings dir. + Path is set in the following priority: + 1. Method argument + 2. System environment variable + 3. Settings dir in the current working dir + :param settings_dir: path given as argument by a user + :return: path to settings dir (str) + :raise: ValueError: when path to the settings dir doesn't exist + """ + settings_dir = settings_dir or os.environ.get( + 'KHALEESI_SETTINGS') or os.path.join(os.getcwd(), "settings", "") + + if not os.path.exists(settings_dir): + raise ValueError( + "Path to settings dir doesn't exist: %s" % settings_dir) + + return settings_dir + + +class Lookup(yaml.YAMLObject): + yaml_tag = u'!lookup' + yaml_dumper = yaml.SafeDumper + + settings = None + + def __init__(self, key, old_style_lookup=False): + self.key = key + if old_style_lookup: + self.convert_old_style_lookup() + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, self.key) + + def convert_old_style_lookup(self): + self.key = '{{!lookup %s}}' % self.key + + parser = re.compile('\[\s*\!lookup\s*[\w.]*\s*\]') + lookups = parser.findall(self.key) + + for lookup in lookups: + self.key = self.key.replace(lookup, '.{{%s}}' % lookup[1:-1]) + + def replace_lookup(self): + """ + Replace any !lookup with the corresponding value from settings table + """ + while True: + parser = re.compile('\{\{\s*\!lookup\s*[\w.]*\s*\}\}') + lookups = parser.findall(self.key) + + if not lookups: + break + + for a_lookup in lookups: + lookup_key = re.search('(\w+\.?)+ *?\}\}', a_lookup) + lookup_key = lookup_key.group(0).strip()[:-2].strip() + lookup_value = dict_lookup( + self.settings, *lookup_key.split(".")) + + if isinstance(lookup_value, Lookup): + return + + lookup_value = str(lookup_value) + + self.key = re.sub('\{\{\s*\!lookup\s*[\w.]*\s*\}\}', + lookup_value, self.key, count=1) + + @classmethod + def from_yaml(cls, loader, node): + return Lookup(loader.construct_scalar(node), old_style_lookup=True) + + @classmethod + def to_yaml(cls, dumper, node): + if node.settings: + node.replace_lookup() + + return dumper.represent_data("%s" % node.key) + + +class OptionNode(object): + def __init__(self, path, parent=None): + self.path = path + self.option = self.path.split("/")[-1] + self.parent = parent + self.parent_value = None + if parent: + self.option = "-".join([self.parent.option, self.option]) + self.values = self._get_values() + self.children = {i: dict() for i in self._get_sub_options()} + + if self.parent: + self.parent_value = self.path.split("/")[-2] + self.parent.children[self.parent_value][self.option] = self + + def _get_values(self): + """ + Returns a sorted list of values available for the current option + """ + values = [a_file.split(SETTING_FILE_EXT)[0] + for a_file in os.listdir(self.path) + if os.path.isfile(os.path.join(self.path, a_file)) + and a_file.endswith(SETTING_FILE_EXT)] + + values.sort() + return values + + def _get_sub_options(self): + """ + Returns a sorted list of sup-options available for the current option + """ + options = [options_dir for options_dir in os.listdir(self.path) + if os.path.isdir(os.path.join(self.path, options_dir)) + and options_dir in self.values] + + options.sort() + return options + + +class OptionsTree(object): + def __init__(self, settings_dir, option): + self.root = None + self.name = option + self.action = option[:-2] if option.endswith('er') else option + self.options_dict = {} + self.root_dir = os.path.join(settings_dir, self.name) + + self.build_tree() + self.init_options_dict(self.root) + + def build_tree(self): + """ + Builds the OptionsTree + """ + self.add_node(self.root_dir, None) + + def add_node(self, path, parent): + """ + Adds OptionNode object to the tree + :param path: Path to option dir + :param parent: Parent option (OptionNode) + """ + node = OptionNode(path, parent) + + if not self.root: + self.root = node + + for child in node.children: + sub_options_dir = os.path.join(node.path, child) + sub_options = [a_dir for a_dir in os.listdir(sub_options_dir) if + os.path.isdir(os.path.join(sub_options_dir, a_dir))] + + for sub_option in sub_options: + self.add_node(os.path.join(sub_options_dir, sub_option), node) + + def init_options_dict(self, node): + """ + Initialize "options_dict" dictionary to store all options and their + valid values + :param node: OptionNode object + """ + if node.option not in self.options_dict: + self.options_dict[node.option] = {} + + if node.parent_value: + self.options_dict[node.option][node.parent_value] = node.values + + if 'ALL' not in self.options_dict[node.option]: + self.options_dict[node.option]['ALL'] = set() + + self.options_dict[node.option]['ALL'].update(node.values) + + for pre_value in node.children: + for child in node.children[pre_value].values(): + self.init_options_dict(child) + + def get_options_ymls(self, options): + ymls = [] + if not options: + return ymls + + keys = options.keys() + keys.sort() + + def step_in(key, node): + keys.remove(key) + if node.option != key.replace("_", "-"): + logger.error("Please provide all ancestor of \"--%s\"" % + key.replace("_", "-")) + sys.exit(1) + ymls.append(os.path.join(node.path, options[key] + ".yml")) + child_keys = [child_key for child_key in keys + if child_key.startswith(key) + and len(child_key.split("_")) == + len(key.split("_")) + 1] + + for child_key in child_keys: + step_in(child_key, node.children[options[key]][ + child_key.replace("_", "-")]) + + step_in(keys[0], self.root) + logger.debug("%s tree settings files:\n%s" % (self.name, ymls)) + + return ymls + + def __str__(self): + return yaml.safe_dump(self.options_dict, default_flow_style=False) + + +def merge_settings(settings, file_path): + logger.debug("Loading setting file: %s" % file_path) + if not os.path.exists(file_path): + logger.error("Setting file doesn't found: %s" % file_path) + sys.exit(1) + + loaded_file = Configuration.from_file(file_path).configure() + settings = settings.merge(loaded_file) + + return settings + + +def generate_settings_file(settings_files, extra_vars): + settings = Configuration.from_dict({}) + + for settings_file in settings_files: + settings = merge_settings(settings, settings_file) + + for extra_var in extra_vars: + if extra_var.startswith('@'): + settings_file = normalize_file(extra_var[1:]) + settings = merge_settings(settings, settings_file) + + else: + if '=' not in extra_var: + logger.error("\"%s\" - extra-var argument must be a path " + "to a setting file or 'key=value' pair" % + extra_var) + sys.exit(1) + key, value = extra_var.split("=") + dict_insert(settings, value, *key.split(".")) + + # Dump & load again settings, because 'in_string_lookup' can't work with + # 'Configuration' object. + dumped_settings = yaml.safe_dump(settings, default_flow_style=False) + settings = yaml.safe_load(dumped_settings) + + return settings + + +def in_string_lookup(settings): + """ + Convert strings contain the '!lookup' tag in them and don't + already converted into Lookup objects. + """ + if Lookup.settings is None: + Lookup.settings = settings + + my_iter = settings.iteritems() if isinstance(settings, dict) \ + else enumerate(settings) + + for idx_key, value in my_iter: + if isinstance(value, dict): + in_string_lookup(settings[idx_key]) + elif isinstance(value, list): + in_string_lookup(value) + elif isinstance(value, str): + parser = re.compile('\{\{\s*\!lookup\s*[\w.]*\s*\}\}') + lookups = parser.findall(value) + + if lookups: + settings[idx_key] = Lookup(value) + + +def normalize_file(file_path): + """ + Return a normalized absolutized version of a file + """ + if not os.path.isabs(file_path): + abspath = os.path.abspath(file_path) + logger.debug( + "Setting the absolute path of \"%s\" to: \"%s\"" + % (file_path, abspath) + ) + file_path = abspath + + if not os.path.exists(file_path): + logger.error("File not found: %s" % file_path) + sys.exit(1) + + return file_path + + +def lookup2lookup(settings): + first_dump = True + while True: + if not first_dump: + Lookup.settings = settings + settings = yaml.load(output) + + in_string_lookup(settings) + output = yaml.safe_dump(settings, default_flow_style=False) + + if first_dump: + first_dump = False + continue + + if not cmp(settings, Lookup.settings): + break + + return output + + +def main(): + options_trees = [] + settings_files = [] + settings_dir = validate_settings_dir(kcli_conf.get('DEFAULTS', + 'SETTINGS_DIR')) + + for option in kcli_conf.options('ROOT_OPTS'): + options_trees.append(OptionsTree(settings_dir, option)) + + parser = create_parser(options_trees) + args = parser.parse_args() + + verbose = int(args.verbose) + + if args.verbose == 0: + args.verbose = WARNING + elif args.verbose == 1: + args.verbose = INFO + else: + args.verbose = DEBUG + + global logger + logger = get_logger(args.verbose) + + # settings generation stage + if args.which.lower() != 'execute': + for input_file in args.input: + settings_files.append(normalize_file(input_file)) + + for options_tree in options_trees: + options = {key: value for key, value in vars(args).iteritems() + if value and key.startswith(options_tree.name)} + + settings_files += (options_tree.get_options_ymls(options)) + + logger.debug("All settings files to be loaded:\n%s" % settings_files) + + settings = generate_settings_file(settings_files, args.extra_vars) + + output = lookup2lookup(settings) + + if args.output_file: + with open(args.output_file, 'w') as output_file: + output_file.write(output) + else: + print output + + exec_playbook = (args.which == 'execute') or \ + (not args.dry_run and args.which in kcli_conf.options( + 'AUTO_EXEC_OPTS')) + + # playbook execution stage + if exec_playbook: + if args.which == 'execute': + execute_args = parser.parse_args() + elif args.which not in PLAYBOOKS: + logger.debug("No playbook named \"%s\", nothing to execute.\n" + "Please choose from: %s" % (args.which, PLAYBOOKS)) + return + else: + args_list = ["execute"] + if verbose: + args_list.append('-%s' % ('v' * verbose)) + if 'inventory' in args: + inventory = args.inventory + else: + inventory = 'local_hosts' if args.which == 'provision' \ + else 'hosts' + args_list.append('--inventory=%s' % inventory) + args_list.append('--' + args.which) + args_list.append('--collect-logs') + if args.output_file: + logger.debug("Using the newly created settings file: \"%s\"" + % args.output_file) + args_list.append('--settings=%s' % args.output_file) + else: + from time import time + + tmp_settings_file = 'kcli_settings_' + str(time()) + \ + SETTING_FILE_EXT + with open(tmp_settings_file, 'w') as output_file: + output_file.write(output) + logger.debug("Temporary settings file \"%s\" has been created " + "for execution purpose only." % tmp_settings_file) + args_list.append('--settings=%s' % tmp_settings_file) + + execute_args = parser.parse_args(args_list) + + logger.debug("execute parser args: %s" % args) + execute_args.func(execute_args) + + if not args.output_file and args.which != 'execute': + logger.debug("Temporary settings file \"%s\" has been deleted." + % tmp_settings_file) + os.remove(tmp_settings_file) + + +if __name__ == '__main__': + main() diff --git a/tools/kcli/kcli/__init__.py b/tools/kcli/kcli/__init__.py deleted file mode 100644 index 4875b5e41..000000000 --- a/tools/kcli/kcli/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from kcli import core -from kcli import execute - -__VERSION__ = '0.1.0' diff --git a/tools/kcli/logger.py b/tools/kcli/logger.py new file mode 100644 index 000000000..3fe595a64 --- /dev/null +++ b/tools/kcli/logger.py @@ -0,0 +1,29 @@ +import logging +from colorlog import ColoredFormatter + + +debug_formatter = ColoredFormatter( + "%(log_color)s%(levelname)-8s%(message)s", + log_colors=dict( + DEBUG='blue', + INFO='green', + WARNING='yellow', + ERROR='red', + CRITICAL='bold_red,bg_white', + ) +) + +def get_logger(log_level=logging.WARNING): + # Create stream handler with debug level + sh = logging.StreamHandler() + sh.setLevel(log_level) + + # Add the debug_formatter to sh + sh.setFormatter(debug_formatter) + + # Create logger and add handler to it + logger = logging.getLogger(__name__) + logger.setLevel(log_level) + logger.addHandler(sh) + + return logger diff --git a/tools/kcli/parse.py b/tools/kcli/parse.py new file mode 100644 index 000000000..0bf8c435b --- /dev/null +++ b/tools/kcli/parse.py @@ -0,0 +1,82 @@ +from argparse import ArgumentParser, RawTextHelpFormatter +from execute.execute import * +from execute.core import * +import conf + + +def create_parser(options_trees): + """ + Creates and return a parser dynamically + :param options_trees: An iterable with OptionsTree objects + :return: Namespace object + """ + parser = ArgumentParser(prog="kcli", + formatter_class=RawTextHelpFormatter) + + sub_parsers = parser.add_subparsers() + + execute_parser = sub_parsers.add_parser('execute') + execute_parser.add_argument('-i', '--inventory', default=None, + type=lambda x: core.file_exists( + execute_parser, x), + help="Inventory file to use. " + "Default: {lcl}. " + "NOTE: to reuse old environment use {" + "host}". + format(lcl=LOCAL_HOSTS, host=HOSTS_FILE)) + execute_parser.add_argument("-v", "--verbose", help="verbosity", + action='count', default=0) + execute_parser.add_argument("--provision", action="store_true", + help="provision fresh nodes from server") + execute_parser.add_argument("--install", action="store_true", + help="install Openstack on nodes") + execute_parser.add_argument("--test", action="store_true", + help="execute tests") + execute_parser.add_argument("--collect-logs", action="store_true", + help="Pull logs from nodes") + execute_parser.add_argument("--cleanup", action="store_true", + help="cleanup nodes") + execute_parser.add_argument("--settings", + type=lambda x: file_exists(parser, x), + help="settings file to use. default: %s" + % conf.KCLI_SETTINGS_YML) + execute_parser.set_defaults(func=ansible_wrapper) + execute_parser.set_defaults(which='execute') + + for options_tree in options_trees: + sub_parser = sub_parsers.add_parser( + options_tree.action, formatter_class=RawTextHelpFormatter) + options = options_tree.options_dict.keys() + options.sort() + + sub_parser.add_argument("-d", "--dry-run", action='store_true', + help="skip playbook execution stage") + sub_parser.add_argument("-e", "--extra-vars", default=list(), + action='append', help="Provide extra vars") + sub_parser.add_argument("-n", "--input", action='append', + help="a settings file that will be loaded " + "first, all other settings file will be" + " merged with it", default=list()) + sub_parser.add_argument("-o", "--output-file", + help="file to dump the settings into") + sub_parser.add_argument("-v", "--verbose", help="verbosity", + action='count', default=0) + + for option in options: + choices = None + help_msg = "" + for prev_value, cur_value in \ + options_tree.options_dict[option].iteritems(): + if prev_value == 'ALL': + choices = cur_value + continue + help_msg += "%s - %s\n" % (prev_value, cur_value) + + required = True if option == options[0] else False + sub_parser.add_argument("--" + option, type=str, help=help_msg, + choices=choices, required=required) + + # for sub-parser recognition purpose only + sub_parser.set_defaults(which=options_tree.action) + + return parser diff --git a/tools/kcli/settings.py b/tools/kcli/settings.py new file mode 100644 index 000000000..b41e3f488 --- /dev/null +++ b/tools/kcli/settings.py @@ -0,0 +1,18 @@ +import ConfigParser +import sys + +# KCLI_CONF_FILE = '/etc/khaleesi/kcli.cfg' +KCLI_CONF_FILE = 'kcli.cfg' + +# config = ConfigParser.RawConfigParser(allow_no_value=True) +config = ConfigParser.ConfigParser(allow_no_value=True) +try: + with open(KCLI_CONF_FILE) as conf: + config.readfp(conf) +except IOError: + print "ERROR: kcli conf file (%s) not found" % KCLI_CONF_FILE + sys.exit(1) + +for dir_path in config.options('DEFAULTS'): + globals()[dir_path.upper()] = config.get('DEFAULTS', dir_path) + diff --git a/tools/kcli/setup.py b/tools/kcli/setup.py deleted file mode 100644 index 1e882eb55..000000000 --- a/tools/kcli/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -from setuptools import setup, find_packages -from os.path import join, dirname, abspath -import kcli - - -prj_dir = dirname(abspath(__file__)) -setup( - name='kcli', - version=kcli.__VERSION__, - packages=find_packages(), - long_description=open(join(prj_dir, 'README.rst')).read(), - entry_points={ - 'console_scripts': ['kcli = kcli.core:main'] - }, - install_requires=[ - 'ansible', - ], - author='Yair Fried', - author_email='yfried@redhat.com' -) diff --git a/tools/kcli/yamls.py b/tools/kcli/yamls.py new file mode 100644 index 000000000..ccb0ad3f2 --- /dev/null +++ b/tools/kcli/yamls.py @@ -0,0 +1,91 @@ +import logging +import yaml +import sys + +from string import ascii_lowercase, digits +from configure import Configuration + +logger = logging.getLogger('logger') + + +def random_generator(size=32, chars=ascii_lowercase + digits): + import random + + return ''.join(random.choice(chars) for _ in range(size)) + + +@Configuration.add_constructor('join') +def _join_constructor(loader, node): + seq = loader.construct_sequence(node) + return ''.join([str(i) for i in seq]) + + +@Configuration.add_constructor('random') +def _random_constructor(loader, node): + """ + usage: + !random + returns a random string of characters + """ + + num_chars = loader.construct_scalar(node) + return random_generator(int(num_chars)) + + +def _limit_chars(_string, length): + length = int(length) + if length < 0: + raise AttributeError( + 'length to crop should be int, not ' + str(length)) + + return _string[:length] + + +@Configuration.add_constructor('limit_chars') +def _limit_chars_constructor(loader, node): + """ + Usage: + !limit_chars [, ] + Method returns first param cropped to chars. + """ + params = loader.construct_sequence(node) + if len(params) != 2: + raise AttributeError('limit_chars requires two params: string length') + return _limit_chars(params[0], params[1]) + + +@Configuration.add_constructor('env') +def _env_constructor(loader, node): + """ + usage: + !env + !env [, [default]] + !env [, [default], [length]] + returns value for the environment var-name + default may be specified by passing a second parameter in a list + length is maximum length of output (croped to that length) + """ + import os + # scalar node or string has no defaults, raise KeyError + # if absent + if isinstance(node, yaml.nodes.ScalarNode): + try: + return os.environ[loader.construct_scalar(node)] + except KeyError: + import kcli + + logger.error("No environment variable named \"%s\" and default" + "isn't defined" % node.value) + sys.exit(1) + + seq = loader.construct_sequence(node) + var = seq[0] + if len(seq) >= 2: + ret = os.getenv(var, seq[1]) # second item is default val + + # third item is max. length + if len(seq) == 3: + ret = _limit_chars(ret, seq[2]) + return ret + + return os.environ[var] From 0bf62f3df417c858495952519651b1409c4f344f Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Thu, 4 Feb 2016 16:26:41 +0200 Subject: [PATCH 123/137] [KCLI] Adds setup.py Adds version 1.0.0 (builds on old kcli) Adds requirements.txt Adds README Restructure tree for proper packaging Rename kcli.py->main.py to avoid import conflicts Rename kcli.cfg->kcli.cfg.example Ignores kcli.cfg Import kcli.yamls in __init__.py because it has meta-classes Change-Id: I5153f42b8adc5402ee890f7bd04c466c6ea12a34 --- tools/kcli/README.rst | 27 +++++++++++++++++++ tools/kcli/{kcli.cfg => etc/kcli.cfg.example} | 0 tools/kcli/execute/__init__.py | 4 --- tools/kcli/kcli/__init__.py | 3 +++ tools/kcli/{ => kcli}/conf.py | 0 tools/kcli/kcli/execute/__init__.py | 2 ++ tools/kcli/{ => kcli}/execute/core.py | 2 +- tools/kcli/{ => kcli}/execute/execute.py | 9 ++++--- tools/kcli/{ => kcli}/logger.py | 0 tools/kcli/{kcli.py => kcli/main.py} | 11 ++++---- tools/kcli/{ => kcli}/parse.py | 4 +-- tools/kcli/{ => kcli}/settings.py | 0 tools/kcli/{ => kcli}/yamls.py | 2 +- tools/kcli/requirements.txt | 6 +++++ tools/kcli/setup.py | 25 +++++++++++++++++ 15 files changed, 78 insertions(+), 17 deletions(-) create mode 100644 tools/kcli/README.rst rename tools/kcli/{kcli.cfg => etc/kcli.cfg.example} (100%) delete mode 100644 tools/kcli/execute/__init__.py create mode 100644 tools/kcli/kcli/__init__.py rename tools/kcli/{ => kcli}/conf.py (100%) create mode 100644 tools/kcli/kcli/execute/__init__.py rename tools/kcli/{ => kcli}/execute/core.py (97%) rename tools/kcli/{ => kcli}/execute/execute.py (98%) rename tools/kcli/{ => kcli}/logger.py (100%) rename tools/kcli/{kcli.py => kcli/main.py} (99%) rename tools/kcli/{ => kcli}/parse.py (99%) rename tools/kcli/{ => kcli}/settings.py (100%) rename tools/kcli/{ => kcli}/yamls.py (99%) create mode 100644 tools/kcli/requirements.txt create mode 100644 tools/kcli/setup.py diff --git a/tools/kcli/README.rst b/tools/kcli/README.rst new file mode 100644 index 000000000..98567454e --- /dev/null +++ b/tools/kcli/README.rst @@ -0,0 +1,27 @@ +======================== +kcli - Khaleesi CLI tool +======================== + +``kcli`` is intended to reduce Khaleesi users' dependency on external CLI tools. + +Setup +===== + +Use pip to install from source:: + + $ pip install tools/kcli + +.. note:: For development work it's better to install in editable mode:: + + $ pip install -e tools/kcli + +Running kcli +============ + +.. note:: Assumes that ``kcli`` is installed, else follow Setup_. + +You can get general usage information with the ``--help`` option:: + + kcli --help + +This displays options you can pass to ``kcli``. diff --git a/tools/kcli/kcli.cfg b/tools/kcli/etc/kcli.cfg.example similarity index 100% rename from tools/kcli/kcli.cfg rename to tools/kcli/etc/kcli.cfg.example diff --git a/tools/kcli/execute/__init__.py b/tools/kcli/execute/__init__.py deleted file mode 100644 index 253173824..000000000 --- a/tools/kcli/execute/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -import core -import execute - -__VERSION__ = '0.2.0' diff --git a/tools/kcli/kcli/__init__.py b/tools/kcli/kcli/__init__.py new file mode 100644 index 000000000..7f069f3be --- /dev/null +++ b/tools/kcli/kcli/__init__.py @@ -0,0 +1,3 @@ +# Contains meta-classes so we need to import it without using. +from kcli import yamls +__VERSION__ = '1.0.0' diff --git a/tools/kcli/conf.py b/tools/kcli/kcli/conf.py similarity index 100% rename from tools/kcli/conf.py rename to tools/kcli/kcli/conf.py diff --git a/tools/kcli/kcli/execute/__init__.py b/tools/kcli/kcli/execute/__init__.py new file mode 100644 index 000000000..c8cc42b81 --- /dev/null +++ b/tools/kcli/kcli/execute/__init__.py @@ -0,0 +1,2 @@ +# from kcli.execute import core +# from kcli.execute import execute diff --git a/tools/kcli/execute/core.py b/tools/kcli/kcli/execute/core.py similarity index 97% rename from tools/kcli/execute/core.py rename to tools/kcli/kcli/execute/core.py index e5c0986ba..034a9d8fd 100644 --- a/tools/kcli/execute/core.py +++ b/tools/kcli/kcli/execute/core.py @@ -2,8 +2,8 @@ import argparse import os import sys -import conf +from kcli import conf assert "playbooks" == os.path.basename(conf.PLAYBOOKS_DIR), \ "Bad path to playbooks" diff --git a/tools/kcli/execute/execute.py b/tools/kcli/kcli/execute/execute.py similarity index 98% rename from tools/kcli/execute/execute.py rename to tools/kcli/kcli/execute/execute.py index 0d5c0a03d..76d3bbb24 100644 --- a/tools/kcli/execute/execute.py +++ b/tools/kcli/kcli/execute/execute.py @@ -1,12 +1,13 @@ from os import path -import ansible.playbook -from ansible import callbacks + import ansible.color import ansible.inventory +import ansible.playbook import ansible.utils +from ansible import callbacks -import conf -import core +from kcli import conf +from kcli.execute import core HOSTS_FILE = "hosts" LOCAL_HOSTS = "local_hosts" diff --git a/tools/kcli/logger.py b/tools/kcli/kcli/logger.py similarity index 100% rename from tools/kcli/logger.py rename to tools/kcli/kcli/logger.py diff --git a/tools/kcli/kcli.py b/tools/kcli/kcli/main.py similarity index 99% rename from tools/kcli/kcli.py rename to tools/kcli/kcli/main.py index 12469c013..2999633b8 100755 --- a/tools/kcli/kcli.py +++ b/tools/kcli/kcli/main.py @@ -1,16 +1,17 @@ #!/usr/bin/env python -import os import sys +from logging import WARNING, INFO, DEBUG +import os import re + import yaml -import yamls -import conf from configure import Configuration + +from kcli import conf +from kcli.execute.execute import PLAYBOOKS from logger import get_logger -from logging import WARNING, INFO, DEBUG from parse import create_parser -from execute.execute import PLAYBOOKS SETTING_FILE_EXT = ".yml" kcli_conf = conf.config diff --git a/tools/kcli/parse.py b/tools/kcli/kcli/parse.py similarity index 99% rename from tools/kcli/parse.py rename to tools/kcli/kcli/parse.py index 0bf8c435b..fb50be260 100644 --- a/tools/kcli/parse.py +++ b/tools/kcli/kcli/parse.py @@ -1,7 +1,7 @@ from argparse import ArgumentParser, RawTextHelpFormatter -from execute.execute import * + from execute.core import * -import conf +from execute.execute import * def create_parser(options_trees): diff --git a/tools/kcli/settings.py b/tools/kcli/kcli/settings.py similarity index 100% rename from tools/kcli/settings.py rename to tools/kcli/kcli/settings.py diff --git a/tools/kcli/yamls.py b/tools/kcli/kcli/yamls.py similarity index 99% rename from tools/kcli/yamls.py rename to tools/kcli/kcli/yamls.py index ccb0ad3f2..ca6d2f793 100644 --- a/tools/kcli/yamls.py +++ b/tools/kcli/kcli/yamls.py @@ -72,7 +72,7 @@ def _env_constructor(loader, node): try: return os.environ[loader.construct_scalar(node)] except KeyError: - import kcli + import main logger.error("No environment variable named \"%s\" and default" "isn't defined" % node.value) diff --git a/tools/kcli/requirements.txt b/tools/kcli/requirements.txt new file mode 100644 index 000000000..bc8cb711c --- /dev/null +++ b/tools/kcli/requirements.txt @@ -0,0 +1,6 @@ +ansible<2.0.0 +python-novaclient<3.0.0 +python-neutronclient>=4.0.0 +PyYAML>=3.11 +configure>=0.5 +colorlog>=2.6.1 diff --git a/tools/kcli/setup.py b/tools/kcli/setup.py new file mode 100644 index 000000000..8969f7e8c --- /dev/null +++ b/tools/kcli/setup.py @@ -0,0 +1,25 @@ +from pip import req +from setuptools import setup, find_packages +from os.path import join, dirname, abspath +import kcli + +# parse_requirements() returns generator of pip.req.InstallRequirement objects +install_reqs = req.parse_requirements('requirements.txt', session=False) +# reqs is a list of requirement +# e.g. ['django==1.5.1', 'mezzanine==1.4.6'] +reqs = [str(ir.req) for ir in install_reqs] + + +prj_dir = dirname(abspath(__file__)) +setup( + name='kcli', + version=kcli.__VERSION__, + packages=find_packages(), + long_description=open(join(prj_dir, 'README.rst')).read(), + entry_points={ + 'console_scripts': ['kcli = kcli.main:main'] + }, + install_requires=reqs, + author='Yair Fried', + author_email='yfried@redhat.com' +) From 2dbd398b73bec4e4bb1c71a157829a3fb6999e1d Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Thu, 4 Feb 2016 17:53:59 +0200 Subject: [PATCH 124/137] [KCLI] Ignore kcli.cfg Users can keep their cfg file. Tracking cfg.example instead Change-Id: Ib57238ebe0f0550840364dbb13332e9fe59e848e --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 5c5b6ac9a..ca491cb9f 100644 --- a/.gitignore +++ b/.gitignore @@ -34,4 +34,5 @@ instack_hosts doc/_build/ fence_xvm.key vm-host-table +tools/kcli/etc/kcli.cfg *.swp From 16b4b7291f58902c578aa7ffb45e38c5f48d492c Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Thu, 4 Feb 2016 17:16:17 +0200 Subject: [PATCH 125/137] Define search paths for kcli.cfg Order is (based on ansible): ENV CWD USER HOME SYSTEM Removes settings.py (duplicate of conf.py) Change-Id: If43114184e17a86af93efa3d7b42f6e55c601194 --- tools/kcli/README.rst | 17 +++++++++++++++++ tools/kcli/kcli/conf.py | 36 ++++++++++++++++++++++++++++++------ tools/kcli/kcli/settings.py | 18 ------------------ 3 files changed, 47 insertions(+), 24 deletions(-) delete mode 100644 tools/kcli/kcli/settings.py diff --git a/tools/kcli/README.rst b/tools/kcli/README.rst index 98567454e..ab4163a46 100644 --- a/tools/kcli/README.rst +++ b/tools/kcli/README.rst @@ -15,6 +15,22 @@ Use pip to install from source:: $ pip install -e tools/kcli +Conf +==== + +.. note:: Assumes that ``kcli`` is installed, else follow Setup_. + +``kcli`` will look for ``kcli.cfg`` in the following order: + +#. In working directory: ``./kcli.cfg`` +#. In user home directory: ``~/.kcli.cfg`` +#. In system settings: ``/etc/khaleesi/kcli.cfg`` + +.. note:: To specify a different directory or different filename, override the + lookup order with ``KCLI_CONFIG`` environment variable:: + + $ KCLI_CONFIG=/my/config/file.ini kcli --help + Running kcli ============ @@ -25,3 +41,4 @@ You can get general usage information with the ``--help`` option:: kcli --help This displays options you can pass to ``kcli``. + diff --git a/tools/kcli/kcli/conf.py b/tools/kcli/kcli/conf.py index 4556727fe..e1576bbb7 100644 --- a/tools/kcli/kcli/conf.py +++ b/tools/kcli/kcli/conf.py @@ -1,17 +1,41 @@ import ConfigParser +import os import sys -# KCLI_CONF_FILE = '/etc/khaleesi/kcli.cfg' +ENV_VAR_NAME = "KCLI_CONFIG" KCLI_CONF_FILE = 'kcli.cfg' +CWD_PATH = os.path.join(os.getcwd(), KCLI_CONF_FILE) +USER_PATH = os.path.expanduser('~/.' + KCLI_CONF_FILE) +SYSTEM_PATH =os.path.join('/etc/khaleesi', KCLI_CONF_FILE) -config = ConfigParser.ConfigParser(allow_no_value=True) -try: - with open(KCLI_CONF_FILE) as conf: - config.readfp(conf) -except IOError: + +def load_config_file(): + """Load config file order(ENV, CWD, USER HOME, SYSTEM). + + :return ConfigParser: config object + """ + _config = ConfigParser.ConfigParser(allow_no_value=True) + env_path = os.getenv(ENV_VAR_NAME, None) + if env_path is not None: + env_path = os.path.expanduser(env_path) + if os.path.isdir(env_path): + env_path = os.path.join(env_path, KCLI_CONF_FILE) + for path in (env_path, CWD_PATH, USER_PATH, SYSTEM_PATH): + if path is not None and os.path.exists(path): + _config.read(path) + return _config + # TODO(yfried): replace 'exit' with a proper exception + # raise ConfigParser.Error("kcli conf file %s not found" % KCLI_CONF_FILE) print "ERROR: kcli conf file (%s) not found" % KCLI_CONF_FILE sys.exit(1) +config = load_config_file() + + + + + + for dir_path in config.options('DEFAULTS'): globals()[dir_path.upper()] = config.get('DEFAULTS', dir_path) diff --git a/tools/kcli/kcli/settings.py b/tools/kcli/kcli/settings.py deleted file mode 100644 index b41e3f488..000000000 --- a/tools/kcli/kcli/settings.py +++ /dev/null @@ -1,18 +0,0 @@ -import ConfigParser -import sys - -# KCLI_CONF_FILE = '/etc/khaleesi/kcli.cfg' -KCLI_CONF_FILE = 'kcli.cfg' - -# config = ConfigParser.RawConfigParser(allow_no_value=True) -config = ConfigParser.ConfigParser(allow_no_value=True) -try: - with open(KCLI_CONF_FILE) as conf: - config.readfp(conf) -except IOError: - print "ERROR: kcli conf file (%s) not found" % KCLI_CONF_FILE - sys.exit(1) - -for dir_path in config.options('DEFAULTS'): - globals()[dir_path.upper()] = config.get('DEFAULTS', dir_path) - From abc416b3321ad060b3a6021a6a877761d1064856 Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Thu, 4 Feb 2016 18:42:20 +0200 Subject: [PATCH 126/137] [KCLI] Doc BZ#1103566 workaround Change-Id: I936becf0490d82d60da96561a70ce47eec1d1d65 --- tools/kcli/README.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/kcli/README.rst b/tools/kcli/README.rst index ab4163a46..c8c0c379e 100644 --- a/tools/kcli/README.rst +++ b/tools/kcli/README.rst @@ -7,6 +7,11 @@ kcli - Khaleesi CLI tool Setup ===== +.. note:: On Fedora 23 `BZ#1103566 `_ + calls for:: + + $ dnf install redhat-rpm-config + Use pip to install from source:: $ pip install tools/kcli From 5394fe1ee1e6e6e0bb4899765381929ce6131622 Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Sun, 7 Feb 2016 13:40:50 +0200 Subject: [PATCH 127/137] [KCLI] Doc $WORKSPSACE workaround Change-Id: I5cfd76b1a6cfcf1046326c4adeda4cd11d494dd8 --- tools/kcli/README.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/kcli/README.rst b/tools/kcli/README.rst index c8c0c379e..c873bfd9d 100644 --- a/tools/kcli/README.rst +++ b/tools/kcli/README.rst @@ -47,3 +47,11 @@ You can get general usage information with the ``--help`` option:: This displays options you can pass to ``kcli``. +.. note:: Some setting files are hard-coded to look for the ``$WORKSPACE`` + environment variable (see `Khaleesi - Cookbook`) that should point to the + directory where ``khaleesi`` and ``khaleesi-settings`` have been cloned. You + can define it manually to work around that:: + + $ export WORKSAPCE=$(dirname `pwd`) + + From 0ddc554bcf29155c4ddff5ab1c01f2be176cd8cd Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Tue, 5 Jan 2016 16:34:46 +0200 Subject: [PATCH 128/137] [2.0.0] Convert local_action to "delegate_to: localhost" For ansible 2.0 this would work better. Also some minor cosmetic changes to tasks Change-Id: I159f95240708114ab0a85908af4f3e3d266d5639 --- playbooks/adhoc/sriov/compute.yml | 9 ++++- playbooks/collect_logs.yml | 10 ++++- .../rdo-manager/yum_repos/repo-rhos.yml | 16 ++++---- .../rdo-manager/updates/update-undercloud.yml | 18 ++++----- playbooks/provisioner/centosci/main.yml | 9 +++-- playbooks/provisioner/foreman/main.yml | 37 ++++++++++--------- .../openstack_virtual_baremetal/main.yml | 2 +- roles/common-handlers/handlers/main.yml | 24 +++++++----- .../openstack/openstack-status/tasks/main.yml | 9 +++-- 9 files changed, 79 insertions(+), 55 deletions(-) diff --git a/playbooks/adhoc/sriov/compute.yml b/playbooks/adhoc/sriov/compute.yml index 3005cc763..b98014a00 100644 --- a/playbooks/adhoc/sriov/compute.yml +++ b/playbooks/adhoc/sriov/compute.yml @@ -40,6 +40,11 @@ - name: Enable neutron-sriov-nic-agent service: name=neutron-sriov-nic-agent state=started enabled=yes - - local_action: - module: wait_for_ssh reboot_first=true host={{ hostvars[inventory_hostname].ansible_ssh_host }} user={{ hostvars[inventory_hostname].ansible_ssh_user }} key={{ hostvars[inventory_hostname].ansible_ssh_private_key_file }} + - name: reboot and wait for ssh + delegate_to: localhost + wait_for_ssh: + reboot_first: true + host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" + user: "{{ hostvars[inventory_hostname].ansible_ssh_user }}" + key: "{{ hostvars[inventory_hostname].ansible_ssh_private_key_file }}" sudo: no diff --git a/playbooks/collect_logs.yml b/playbooks/collect_logs.yml index d8662815d..3589bdad3 100644 --- a/playbooks/collect_logs.yml +++ b/playbooks/collect_logs.yml @@ -156,13 +156,19 @@ ignore_errors: true - name: extract the logs - local_action: unarchive src={{ base_dir }}/khaleesi/collected_files/{{ inventory_hostname }}.tar dest={{ base_dir }}/khaleesi/collected_files/ + delegate_to: localhost + unarchive: + src: "{{ base_dir }}/khaleesi/collected_files/{{ inventory_hostname }}.tar" + dest: "{{ base_dir }}/khaleesi/collected_files/" sudo: no ignore_errors: true when: job.gzip_logs is defined and job.gzip_logs - name: delete the tar file after extraction - local_action: file path={{ base_dir }}/khaleesi/collected_files/{{ inventory_hostname }}.tar state=absent + delegate_to: localhost + file: + path: "{{ base_dir }}/khaleesi/collected_files/{{ inventory_hostname }}.tar" + state: absent sudo: no ignore_errors: true when: job.gzip_logs is defined and job.gzip_logs diff --git a/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml b/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml index d43e8d0a9..9c32bb853 100644 --- a/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml +++ b/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml @@ -124,12 +124,12 @@ - name: reboot host sudo: no - local_action: - wait_for_ssh - reboot_first=true - host="{{ ansible_ssh_host }}" - user="root" - key="{{ ansible_ssh_private_key_file }}" - timeout=900 - sudo=false + delegate_to: localhost + wait_for_ssh: + reboot_first: true + host: "{{ ansible_ssh_host }}" + user: root + key: "{{ ansible_ssh_private_key_file }}" + timeout: 900 + sudo: false when: "'{{ repo_host }}' == 'virthost' and new_kernel.rc == 0" diff --git a/playbooks/post-deploy/rdo-manager/updates/update-undercloud.yml b/playbooks/post-deploy/rdo-manager/updates/update-undercloud.yml index 1d3799d95..1060acb5c 100644 --- a/playbooks/post-deploy/rdo-manager/updates/update-undercloud.yml +++ b/playbooks/post-deploy/rdo-manager/updates/update-undercloud.yml @@ -35,15 +35,15 @@ when: not yum_update_result.changed|bool - name: reboot host - local_action: - wait_for_ssh - reboot_first=true - host="{{ ansible_ssh_host }}" - user="stack" - ssh_opts="-F {{ base_dir }}/khaleesi/ssh.config.ansible" - key="{{ ansible_ssh_private_key_file }}" - timeout=900 - sudo=true + delegate_to: localhost + wait_for_ssh: + reboot_first: true + host: "{{ ansible_ssh_host }}" + user: stack + ssh_opts: "-F {{ base_dir }}/khaleesi/ssh.config.ansible" + key: "{{ ansible_ssh_private_key_file }}" + timeout: 900 + sudo: true - name: create vlan10 if doesn't exist ignore_errors: yes diff --git a/playbooks/provisioner/centosci/main.yml b/playbooks/provisioner/centosci/main.yml index c9a81045c..e7e72606a 100644 --- a/playbooks/provisioner/centosci/main.yml +++ b/playbooks/provisioner/centosci/main.yml @@ -39,6 +39,9 @@ gather_facts: no max_fail_percentage: 0 tasks: - - local_action: - module: wait_for_ssh host={{ hostvars[inventory_hostname].ansible_ssh_host }} user=root key={{ hostvars[inventory_hostname].ansible_ssh_private_key_file }} - sudo: no + delegate_to: localhost + wait_for_ssh: + host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" + user: root + key: "{{ hostvars[inventory_hostname].ansible_ssh_private_key_file }}" + sudo: no diff --git a/playbooks/provisioner/foreman/main.yml b/playbooks/provisioner/foreman/main.yml index 37160ae76..ca5a1558a 100644 --- a/playbooks/provisioner/foreman/main.yml +++ b/playbooks/provisioner/foreman/main.yml @@ -5,16 +5,14 @@ tasks: - name: Add candidate hosts to host list add_host: - name="{{ item.value.name }}" - groups="{{ item.value.groups - if item.value.groups is string - else item.value.groups| join(',') }}" - rebuild="{{ item.value.rebuild|lower}}" - node_label="{{ item.key }}" - ansible_fqdn="{{ item.value.fqdn }}" - ansible_ssh_user="{{ item.value.remote_user }}" - ansible_ssh_host="{{ item.value.fqdn }}" - ansible_ssh_private_key_file="{{ provisioner.key_file }}" + name: "{{ item.value.name }}" + groups: "{{ item.value.groups if item.value.groups is string else item.value.groups| join(',') }}" + rebuild: "{{ item.value.rebuild|lower}}" + node_label: "{{ item.key }}" + ansible_fqdn: "{{ item.value.fqdn }}" + ansible_ssh_user: "{{ item.value.remote_user }}" + ansible_ssh_host: "{{ item.value.fqdn }}" + ansible_ssh_private_key_file: "{{ provisioner.key_file }}" with_dict: provisioner.nodes - name: Rebuild nodes - Foreman @@ -34,11 +32,11 @@ register: created_nodes - name: Wait for hosts to get reachable (after rebuild) - local_action: - wait_for_ssh - user="root" - host={{ hostvars[inventory_hostname].ansible_ssh_host }} - key={{ hostvars[inventory_hostname].ansible_ssh_private_key_file }} + delegate_to: localhost + wait_for_ssh: + user: "root" + host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" + key: "{{ hostvars[inventory_hostname].ansible_ssh_private_key_file }}" - name: Check and Enable virtualization support hosts: openstack_nodes:virthost @@ -91,8 +89,13 @@ provisioner.nodes[node_label].network.interfaces register: update_ifcfgs - - local_action: - module: wait_for_ssh reboot_first=true host={{ hostvars[inventory_hostname].ansible_ssh_host }} user={{ hostvars[inventory_hostname].ansible_ssh_user }} key={{ hostvars[inventory_hostname].ansible_ssh_private_key_file }} + - name: reboot and wait for ssh + delegate_to: localhost + wait_for_ssh: + reboot_first: true + host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" + user: "{{ hostvars[inventory_hostname].ansible_ssh_user }}" + key: "{{ hostvars[inventory_hostname].ansible_ssh_private_key_file }}" when: update_ifcfgs|changed sudo: no diff --git a/playbooks/provisioner/openstack_virtual_baremetal/main.yml b/playbooks/provisioner/openstack_virtual_baremetal/main.yml index ce7967c09..9f474e6dc 100644 --- a/playbooks/provisioner/openstack_virtual_baremetal/main.yml +++ b/playbooks/provisioner/openstack_virtual_baremetal/main.yml @@ -406,7 +406,7 @@ fetch: src=~/ssh.config.ansible dest={{ base_dir }}/khaleesi/ssh.config.ansible flat=yes - name: change mod for ssh.config.ansible - local_action: shell chmod 755 {{ base_dir }}/khaleesi/ssh.config.ansible + shell: chmod 755 {{ base_dir }}/khaleesi/ssh.config.ansible - name: copy id_rsa key back to the slave fetch: src=~/.ssh/id_rsa dest={{ base_dir }}/khaleesi/id_rsa_undercloud_instance flat=yes diff --git a/roles/common-handlers/handlers/main.yml b/roles/common-handlers/handlers/main.yml index 4ac075f51..08016c483 100644 --- a/roles/common-handlers/handlers/main.yml +++ b/roles/common-handlers/handlers/main.yml @@ -1,19 +1,23 @@ --- - name: reboot sudo: no - local_action: - wait_for_ssh reboot_first=true host={{ hostvars[inventory_hostname].ansible_ssh_host }} user={{ hostvars[inventory_hostname].ansible_ssh_user }} key={{ hostvars[inventory_hostname].ansible_ssh_private_key_file }} + delegate_to: localhost + wait_for_ssh: + reboot_first: true + host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" + user: "{{ hostvars[inventory_hostname].ansible_ssh_user }}" + key: "{{ hostvars[inventory_hostname].ansible_ssh_private_key_file }}" - name: reboot_rdo_manager sudo: no - local_action: - wait_for_ssh - reboot_first=true - ssh_opts="-F ../../../ssh.config.ansible" - host="{{ ansible_ssh_host }}" - user="root" - key="{{ ansible_ssh_private_key_file }}" - sudo=false + delegate_to: localhost + wait_for_ssh: + reboot_first: true + ssh_opts: "-F ../../../ssh.config.ansible" + host: "{{ ansible_ssh_host }}" + user: "root" + key: "{{ ansible_ssh_private_key_file }}" + sudo: false notify: - Check instance uptime diff --git a/roles/openstack/openstack-status/tasks/main.yml b/roles/openstack/openstack-status/tasks/main.yml index 36769253e..e1f1d55cb 100644 --- a/roles/openstack/openstack-status/tasks/main.yml +++ b/roles/openstack/openstack-status/tasks/main.yml @@ -1,9 +1,12 @@ --- - name: Wait for openstack port 35357 to open sudo: no - local_action: - wait_for host={{ hostvars[inventory_hostname].ansible_ssh_host }} - port=35357 delay=10 timeout=120 + delegate_to: localhost + wait_for_ssh: + host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" + port: 35357 + delay: 10 + timeout: 120 register: wait_for_openstack - name: Fail if any of them fail From c3da62613bb1f8445ef53ee544826b9d057bec62 Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Sun, 10 Jan 2016 09:28:12 +0200 Subject: [PATCH 129/137] [2.0.0] Remove misplaced quotes. In "replace" Ansible 2.0 will not ignore some quotes that were ignored in 1 Change-Id: I5bec3f7257c16a68399c813a701651cd7933a329 --- playbooks/installer/packstack/run.yml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/playbooks/installer/packstack/run.yml b/playbooks/installer/packstack/run.yml index 3d83afcda..5cff819a0 100644 --- a/playbooks/installer/packstack/run.yml +++ b/playbooks/installer/packstack/run.yml @@ -20,28 +20,28 @@ - name: Edit packstack answer-file from the config lineinfile: - dest="/root/{{ installer.packstack.answer_file }}" - regexp='{{ item.key }}=.*' - line='{{ item.key }}={{ item.value }}' + dest: "/root/{{ installer.packstack.answer_file }}" + regexp: '{{ item.key }}=.*' + line: '{{ item.key }}={{ item.value }}' with_dict: installer.packstack.config - name: Update password values in answer file with default password replace: - dest="/root/{{ installer.packstack.answer_file }}" - regexp="(.*_PASSWORD|.*_PW)=.*" - replace="\1=redhat" + dest: "/root/{{ installer.packstack.answer_file }}" + regexp: "(.*_PASSWORD|.*_PW)=.*" + replace: '\1=redhat' - name: Update network hosts replace: - dest="/root/{{ installer.packstack.answer_file }}" - regexp=^CONFIG_NETWORK_HOSTS=.*$ - replace=CONFIG_NETWORK_HOSTS="{% for host in groups.network %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}" + dest: "/root/{{ installer.packstack.answer_file }}" + regexp: ^CONFIG_NETWORK_HOSTS=.*$ + replace: CONFIG_NETWORK_HOSTS={% for host in groups.network %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %} - name: Update compute hosts replace: - dest="/root/{{ installer.packstack.answer_file }}" - regexp=^CONFIG_COMPUTE_HOSTS=.*$ - replace=CONFIG_COMPUTE_HOSTS="{% for host in groups.compute %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %}" + dest: "/root/{{ installer.packstack.answer_file }}" + regexp: ^CONFIG_COMPUTE_HOSTS=.*$ + replace: CONFIG_COMPUTE_HOSTS={% for host in groups.compute %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}{% if not loop.last %},{% endif %}{% endfor %} - name: Running packstack shell: "packstack --answer-file=/root/{{ installer.packstack.answer_file }} && touch /root/packstack-already-done" From 8fc6ace837e53f1dd9da5ce82f00c73b76e69469 Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Tue, 12 Jan 2016 15:00:35 +0200 Subject: [PATCH 130/137] WIP [2.0.0] BREAK!!! Update inventory attributes In 2.0 ansible_ssh_(user|host|port) -> ansible_(user|host|port) Need to update -settings as well Change-Id: I0c5bc30bb012fd72bcf847a0eb7d8f27ebb0070e --- doc/khaleesi.rst | 2 +- playbooks/adhoc/sriov/compute.yml | 4 ++-- playbooks/full-job-patch-dvr.yml | 6 +++--- playbooks/full-job-patch.yml | 6 +++--- .../rdo-manager/templates/ssh_config.j2 | 14 +++++++------- .../rdo-manager/undercloud/pre-virthost.yml | 2 +- .../rdo-manager/yum_repos/repo-rhos.yml | 12 ++++++------ .../rdo-manager/updates/update-overcloud.yml | 8 ++++---- .../rdo-manager/updates/update-undercloud.yml | 2 +- playbooks/provisioner/beaker/main.yml | 4 ++-- playbooks/provisioner/centosci/main.yml | 6 +++--- playbooks/provisioner/foreman/main.yml | 14 +++++++------- playbooks/provisioner/manual/main.yml | 4 ++-- playbooks/provisioner/openstack/main.yml | 12 ++++++------ .../openstack_virtual_baremetal/main.yml | 4 ++-- playbooks/provisioner/templates/hosts.j2 | 2 +- playbooks/provisioner/templates/inventory.j2 | 4 ++-- playbooks/provisioner/virsh/main.yml | 16 ++++++++++------ .../virsh/templates/ssh.config.ansible.j2 | 2 +- playbooks/tester/coverage/activate.yml | 6 +++--- playbooks/tester/coverage/generate-report.yml | 2 +- .../coverage/templates/sitecustomize.py.j2 | 2 +- playbooks/tester/integration/horizon/pre.yml | 2 +- playbooks/tester/jenkins/builders/test.yml | 2 +- playbooks/tester/templates/hosts_slave.conf.j2 | 4 ++-- roles/common-handlers/handlers/main.yml | 6 +++--- roles/libvirt/ssh_config/templates/ssh_config.j2 | 4 ++-- .../ssh_config/templates/ssh_config_host.j2 | 2 +- roles/openstack/openstack-status/tasks/main.yml | 2 +- roles/patch_rpm/templates/patched_rpms.j2 | 2 +- 30 files changed, 81 insertions(+), 77 deletions(-) diff --git a/doc/khaleesi.rst b/doc/khaleesi.rst index 0bc23e07f..5a11af7a8 100644 --- a/doc/khaleesi.rst +++ b/doc/khaleesi.rst @@ -595,7 +595,7 @@ You must create a new `local_host` file. Here again adjust the IP address of you cat < local_hosts [undercloud] - undercloud groups=undercloud ansible_ssh_host= ansible_ssh_user=stack ansible_ssh_private_key_file=~/.ssh/id_rsa + undercloud groups=undercloud ansible_host= ansible_user=stack ansible_ssh_private_key_file=~/.ssh/id_rsa [local] localhost ansible_connection=local EOF diff --git a/playbooks/adhoc/sriov/compute.yml b/playbooks/adhoc/sriov/compute.yml index b98014a00..38973583f 100644 --- a/playbooks/adhoc/sriov/compute.yml +++ b/playbooks/adhoc/sriov/compute.yml @@ -44,7 +44,7 @@ delegate_to: localhost wait_for_ssh: reboot_first: true - host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" - user: "{{ hostvars[inventory_hostname].ansible_ssh_user }}" + host: "{{ hostvars[inventory_hostname].ansible_host }}" + user: "{{ hostvars[inventory_hostname].ansible_user }}" key: "{{ hostvars[inventory_hostname].ansible_ssh_private_key_file }}" sudo: no diff --git a/playbooks/full-job-patch-dvr.yml b/playbooks/full-job-patch-dvr.yml index 1fe6890fd..c17331fc9 100644 --- a/playbooks/full-job-patch-dvr.yml +++ b/playbooks/full-job-patch-dvr.yml @@ -22,10 +22,10 @@ yum: name=createrepo state=present - name: create repo folder - file: path=/home/{{ ansible_ssh_user }}/dist-git/ state=directory + file: path=/home/{{ ansible_user }}/dist-git/ state=directory - name: copy the generated rpms - copy: src={{ item }} dest=/home/{{ ansible_ssh_user }}/dist-git/{{ patch.dist_git.name }}/ + copy: src={{ item }} dest=/home/{{ ansible_user }}/dist-git/{{ patch.dist_git.name }}/ with_fileglob: - "{{ lookup('env', 'PWD') }}/generated_rpms/*.rpm" @@ -36,7 +36,7 @@ - name: Create local repo for patched rpm sudo: yes - shell: "createrepo /home/{{ ansible_ssh_user }}/dist-git/{{ patch.dist_git.name }}" + shell: "createrepo /home/{{ ansible_user }}/dist-git/{{ patch.dist_git.name }}" when: "{{ hostvars['localhost'].rpm_build_rc }} == 0" - include: install.yml diff --git a/playbooks/full-job-patch.yml b/playbooks/full-job-patch.yml index 36afda670..7e729275a 100644 --- a/playbooks/full-job-patch.yml +++ b/playbooks/full-job-patch.yml @@ -22,10 +22,10 @@ yum: name=createrepo state=present - name: create repo folder - file: path=/home/{{ ansible_ssh_user }}/dist-git/ state=directory + file: path=/home/{{ ansible_user }}/dist-git/ state=directory - name: copy the generated rpms - copy: src={{ item }} dest=/home/{{ ansible_ssh_user }}/dist-git/{{ patch.dist_git.name }}/ + copy: src={{ item }} dest=/home/{{ ansible_user }}/dist-git/{{ patch.dist_git.name }}/ with_fileglob: - "{{ lookup('env', 'PWD') }}/generated_rpms/*.rpm" @@ -36,7 +36,7 @@ - name: Create local repo for patched rpm sudo: yes - shell: "createrepo /home/{{ ansible_ssh_user }}/dist-git/{{ patch.dist_git.name }}" + shell: "createrepo /home/{{ ansible_user }}/dist-git/{{ patch.dist_git.name }}" when: "{{ hostvars['localhost'].rpm_build_rc }} == 0" - include: install.yml diff --git a/playbooks/installer/rdo-manager/templates/ssh_config.j2 b/playbooks/installer/rdo-manager/templates/ssh_config.j2 index eeeae2599..560320d20 100644 --- a/playbooks/installer/rdo-manager/templates/ssh_config.j2 +++ b/playbooks/installer/rdo-manager/templates/ssh_config.j2 @@ -1,13 +1,13 @@ {% if groups["virthost"] is defined %} Host undercloud-root - ProxyCommand ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i {{ hostvars[groups['virthost'][0]].ansible_ssh_private_key_file }} stack@{{ hostvars[groups['virthost'][0]].ansible_ssh_host }} -W {{ hostvars['localhost'].undercloud_ip }}:22 + ProxyCommand ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i {{ hostvars[groups['virthost'][0]].ansible_ssh_private_key_file }} stack@{{ hostvars[groups['virthost'][0]].ansible_host }} -W {{ hostvars['localhost'].undercloud_ip }}:22 IdentityFile id_rsa_virt_host User root StrictHostKeyChecking no UserKnownHostsFile=/dev/null Host undercloud - ProxyCommand ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i {{ hostvars[groups['virthost'][0]].ansible_ssh_private_key_file }} stack@{{ hostvars[groups['virthost'][0]].ansible_ssh_host }} -W {{ hostvars['localhost'].undercloud_ip }}:22 + ProxyCommand ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i {{ hostvars[groups['virthost'][0]].ansible_ssh_private_key_file }} stack@{{ hostvars[groups['virthost'][0]].ansible_host }} -W {{ hostvars['localhost'].undercloud_ip }}:22 IdentityFile id_rsa_virt_host User stack StrictHostKeyChecking no @@ -29,8 +29,8 @@ Host undercloud-from-virthost-as-stack {%endif %} {% if groups["virthost"] is not defined and hw_env is defined and hw_env.env_type != "ovb_host_cloud" %} -Host {{ hostvars[groups['undercloud'][0]].ansible_ssh_host }} - Hostname {{ hostvars[groups['undercloud'][0]].ansible_ssh_host }} +Host {{ hostvars[groups['undercloud'][0]].ansible_host }} + Hostname {{ hostvars[groups['undercloud'][0]].ansible_host }} IdentityFile ~/.ssh/id_rsa IdentitiesOnly yes User root @@ -39,14 +39,14 @@ Host {{ hostvars[groups['undercloud'][0]].ansible_ssh_host }} {% if groups["virthost"] is not defined and hw_env is defined and hw_env.env_type == "ovb_host_cloud" %} Host undercloud-root - ProxyCommand ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i {{ hostvars[groups['provisioned'][0]].ansible_ssh_private_key_file }} stack@{{ hostvars[groups['provisioned'][0]].ansible_ssh_host }} -W {{ floating_ip }}:22 + ProxyCommand ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i {{ hostvars[groups['provisioned'][0]].ansible_ssh_private_key_file }} stack@{{ hostvars[groups['provisioned'][0]].ansible_host }} -W {{ floating_ip }}:22 IdentityFile {{ base_dir }}/khaleesi/id_rsa_undercloud_instance User root StrictHostKeyChecking no UserKnownHostsFile=/dev/null Host undercloud - ProxyCommand ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i {{ hostvars[groups['provisioned'][0]].ansible_ssh_private_key_file }} stack@{{ hostvars[groups['provisioned'][0]].ansible_ssh_host }} -W {{ floating_ip }}:22 + ProxyCommand ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i {{ hostvars[groups['provisioned'][0]].ansible_ssh_private_key_file }} stack@{{ hostvars[groups['provisioned'][0]].ansible_host }} -W {{ floating_ip }}:22 IdentityFile {{ base_dir }}/khaleesi/id_rsa_undercloud_instance User stack StrictHostKeyChecking no @@ -63,7 +63,7 @@ Host undercloud-from-baremetal-host {% if groups["overcloud"] is defined %} {% for host in groups["overcloud"] %} Host {{ host }} - ProxyCommand ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i ~/.ssh/id_rsa -F ssh.config.ansible {{ hostvars[groups['undercloud'][0]].ansible_ssh_host }} -W {{ hostvars[host].ansible_fqdn }}:22 + ProxyCommand ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=60 -i ~/.ssh/id_rsa -F ssh.config.ansible {{ hostvars[groups['undercloud'][0]].ansible_host }} -W {{ hostvars[host].ansible_fqdn }}:22 IdentityFile id_rsa_undercloud IdentitiesOnly yes User heat-admin diff --git a/playbooks/installer/rdo-manager/undercloud/pre-virthost.yml b/playbooks/installer/rdo-manager/undercloud/pre-virthost.yml index cef8d2123..21314c656 100644 --- a/playbooks/installer/rdo-manager/undercloud/pre-virthost.yml +++ b/playbooks/installer/rdo-manager/undercloud/pre-virthost.yml @@ -2,7 +2,7 @@ - name: Update packages on the host hosts: undercloud vars: - - ansible_ssh_user: root + - ansible_user: root tasks: - name: repolist command: yum -d 7 repolist diff --git a/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml b/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml index 9c32bb853..35c062fe9 100644 --- a/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml +++ b/playbooks/installer/rdo-manager/yum_repos/repo-rhos.yml @@ -1,10 +1,10 @@ --- -- include: "{{ base_dir }}/khaleesi/playbooks/group_by.yml ansible_ssh_user=root" +- include: "{{ base_dir }}/khaleesi/playbooks/group_by.yml ansible_user=root" - name: Setup openstack repos hosts: "{{ repo_host }}:&RedHat" vars: - - ansible_ssh_user: root + - ansible_user: root environment: http_proxy: "{{ installer.http_proxy_url }}" gather_facts: yes @@ -93,21 +93,21 @@ # - name: Get build details # hosts: "{{ repo_host }}:&RedHat" # vars: -# - ansible_ssh_user: root +# - ansible_user: root # roles: # - build_mark/build - name: Linux common prep (Collect performance data, etc.) hosts: "{{ repo_host }}" vars: - - ansible_ssh_user: root + - ansible_user: root roles: - { role: linux-common } - name: Update packages on the host hosts: "{{ repo_host }}" vars: - - ansible_ssh_user: root + - ansible_user: root tasks: - name: repolist command: yum -d 7 repolist @@ -127,7 +127,7 @@ delegate_to: localhost wait_for_ssh: reboot_first: true - host: "{{ ansible_ssh_host }}" + host: "{{ ansible_host }}" user: root key: "{{ ansible_ssh_private_key_file }}" timeout: 900 diff --git a/playbooks/post-deploy/rdo-manager/updates/update-overcloud.yml b/playbooks/post-deploy/rdo-manager/updates/update-overcloud.yml index 27f5f7238..c3d093ec4 100644 --- a/playbooks/post-deploy/rdo-manager/updates/update-overcloud.yml +++ b/playbooks/post-deploy/rdo-manager/updates/update-overcloud.yml @@ -27,7 +27,7 @@ hosts: update:!undercloud tasks: - name: dump package list - shell: rpm -qa &> {{ ansible_ssh_host }}-rpm.log + shell: rpm -qa &> {{ ansible_host }}-rpm.log - name: copy 55-heat-config file to node BZ 1278181 sudo: yes @@ -121,15 +121,15 @@ hosts: update:!undercloud tasks: - name: dump package list - shell: rpm -qa &> {{ ansible_ssh_host }}-rpm-updated.log + shell: rpm -qa &> {{ ansible_host }}-rpm-updated.log - name: get rpm list stat register: rpm_list_result - stat: path=~/{{ ansible_ssh_host }}-rpm.log + stat: path=~/{{ ansible_host }}-rpm.log - name: get rpm updated stat register: rpm_list_updated_result - stat: path=~/{{ ansible_ssh_host }}-rpm-updated.log + stat: path=~/{{ ansible_host }}-rpm-updated.log - name: fail when rpm list checksum are equal fail: msg="Failed, no package has been updated..." diff --git a/playbooks/post-deploy/rdo-manager/updates/update-undercloud.yml b/playbooks/post-deploy/rdo-manager/updates/update-undercloud.yml index 1060acb5c..4ce22bee4 100644 --- a/playbooks/post-deploy/rdo-manager/updates/update-undercloud.yml +++ b/playbooks/post-deploy/rdo-manager/updates/update-undercloud.yml @@ -38,7 +38,7 @@ delegate_to: localhost wait_for_ssh: reboot_first: true - host: "{{ ansible_ssh_host }}" + host: "{{ ansible_host }}" user: stack ssh_opts: "-F {{ base_dir }}/khaleesi/ssh.config.ansible" key: "{{ ansible_ssh_private_key_file }}" diff --git a/playbooks/provisioner/beaker/main.yml b/playbooks/provisioner/beaker/main.yml index 58e81a155..f5578d936 100644 --- a/playbooks/provisioner/beaker/main.yml +++ b/playbooks/provisioner/beaker/main.yml @@ -11,9 +11,9 @@ name="host0" groups="provisioned" ansible_fqdn="{{ lookup('env', 'BEAKER_MACHINE') }}" - ansible_ssh_user="{{ provisioner.remote_user }}" + ansible_user="{{ provisioner.remote_user }}" ansible_ssh_private_key_file="{{ provisioner.key_file }}" - ansible_ssh_host="{{ lookup('env', 'BEAKER_MACHINE') }}" + ansible_host="{{ lookup('env', 'BEAKER_MACHINE') }}" - name: Use beaker to provision the machine hosts: localhost diff --git a/playbooks/provisioner/centosci/main.yml b/playbooks/provisioner/centosci/main.yml index e7e72606a..e9971edce 100644 --- a/playbooks/provisioner/centosci/main.yml +++ b/playbooks/provisioner/centosci/main.yml @@ -29,9 +29,9 @@ if item.item.value.groups is string else item.item.value.groups| join(',') }}" ansible_fqdn="{{ item.hosts.0.hostname }}" - ansible_ssh_user="{{ provisioner.remote_user }}" + ansible_user="{{ provisioner.remote_user }}" ansible_ssh_private_key_file="{{ provisioner.key_file }}" - ansible_ssh_host="{{ item.hosts.0.hostname }}" + ansible_host="{{ item.hosts.0.hostname }}" with_items: provisioned_nodes.results - name: wait for hosts to get reachable @@ -41,7 +41,7 @@ tasks: delegate_to: localhost wait_for_ssh: - host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" + host: "{{ hostvars[inventory_hostname].ansible_host }}" user: root key: "{{ hostvars[inventory_hostname].ansible_ssh_private_key_file }}" sudo: no diff --git a/playbooks/provisioner/foreman/main.yml b/playbooks/provisioner/foreman/main.yml index ca5a1558a..6662d0425 100644 --- a/playbooks/provisioner/foreman/main.yml +++ b/playbooks/provisioner/foreman/main.yml @@ -10,8 +10,8 @@ rebuild: "{{ item.value.rebuild|lower}}" node_label: "{{ item.key }}" ansible_fqdn: "{{ item.value.fqdn }}" - ansible_ssh_user: "{{ item.value.remote_user }}" - ansible_ssh_host: "{{ item.value.fqdn }}" + ansible_user: "{{ item.value.remote_user }}" + ansible_host: "{{ item.value.fqdn }}" ansible_ssh_private_key_file: "{{ provisioner.key_file }}" with_dict: provisioner.nodes @@ -24,7 +24,7 @@ auth_url: "{{ provisioner.foreman.auth_url }}" username: "{{ provisioner.foreman.username }}" password: "{{ provisioner.foreman.password }}" - host_id: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" + host_id: "{{ hostvars[inventory_hostname].ansible_host }}" rebuild: "{{ rebuild }}" wait_for_host: "{{ provisioner.foreman.wait_for_host|lower }}" retries: 4 @@ -35,14 +35,14 @@ delegate_to: localhost wait_for_ssh: user: "root" - host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" + host: "{{ hostvars[inventory_hostname].ansible_host }}" key: "{{ hostvars[inventory_hostname].ansible_ssh_private_key_file }}" - name: Check and Enable virtualization support hosts: openstack_nodes:virthost gather_facts: no vars: - - ansible_ssh_user: root + - ansible_user: root tasks: - name: Check if CPU supports INTEL based KVM shell: egrep -c 'vmx' /proc/cpuinfo @@ -93,8 +93,8 @@ delegate_to: localhost wait_for_ssh: reboot_first: true - host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" - user: "{{ hostvars[inventory_hostname].ansible_ssh_user }}" + host: "{{ hostvars[inventory_hostname].ansible_host }}" + user: "{{ hostvars[inventory_hostname].ansible_user }}" key: "{{ hostvars[inventory_hostname].ansible_ssh_private_key_file }}" when: update_ifcfgs|changed sudo: no diff --git a/playbooks/provisioner/manual/main.yml b/playbooks/provisioner/manual/main.yml index 71c789716..796ef33e1 100644 --- a/playbooks/provisioner/manual/main.yml +++ b/playbooks/provisioner/manual/main.yml @@ -17,7 +17,7 @@ if item.value.groups is string else item.value.groups| join(',') }}" ansible_fqdn="{{ item.value.hostname }}" - ansible_ssh_user="{{ item.value.remote_user }}" + ansible_user="{{ item.value.remote_user }}" ansible_ssh_private_key_file="{{ provisioner.key_file }}" - ansible_ssh_host="{{ item.value.hostname }}" + ansible_host="{{ item.value.hostname }}" with_dict: provisioner.nodes diff --git a/playbooks/provisioner/openstack/main.yml b/playbooks/provisioner/openstack/main.yml index 06adb4089..9b7d02874 100644 --- a/playbooks/provisioner/openstack/main.yml +++ b/playbooks/provisioner/openstack/main.yml @@ -121,9 +121,9 @@ name: "{{ item.item.value.name }}" groups: "{{ item.item.value.groups if item.item.value.groups is string else item.item.value.groups| join(',') }}" ansible_fqdn: "{{ item.item.value.hostname }}" - ansible_ssh_user: "{{ item.item.value.remote_user }}" + ansible_user: "{{ item.item.value.remote_user }}" ansible_ssh_private_key_file: "{{ provisioner.key_file }}" - ansible_ssh_host: "{%- if item.public_ip %}{{ item.public_ip }}{%- else %}{{ item.info.addresses[provisioner.network.network_list.management.name][0].addr }}{% endif %}" + ansible_host: "{%- if item.public_ip %}{{ item.public_ip }}{%- else %}{{ item.info.addresses[provisioner.network.network_list.management.name][0].addr }}{% endif %}" eth1_interface_ip: "{{ item.info.addresses[provisioner.network.network_list.data.name][0].addr }}" with_items: created_nodes.results @@ -145,7 +145,7 @@ - name: Add Neutron Floating IPs to host list add_host: name: "{{ item.item.value.name }}" - ansible_ssh_host: "{{ item.public_ip }}" + ansible_host: "{{ item.public_ip }}" with_items: floatingip.results when: floatingip @@ -157,7 +157,7 @@ tasks: - name: Wait for Reachable Nodes wait_for: - host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" + host: "{{ ansible_host }}" port: 22 search_regex: OpenSSH timeout: 600 @@ -205,6 +205,6 @@ wait_for_ssh: reboot_first: "true" # delegate_to changes the context for ansible_vars - host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" - user: "{{ hostvars[inventory_hostname].ansible_ssh_user }}" + host: "{{ hostvars[inventory_hostname].ansible_host }}" + user: "{{ hostvars[inventory_hostname].ansible_user }}" key: "{{ hostvars[inventory_hostname].ansible_ssh_private_key_file }}" diff --git a/playbooks/provisioner/openstack_virtual_baremetal/main.yml b/playbooks/provisioner/openstack_virtual_baremetal/main.yml index 9f474e6dc..6154424e4 100644 --- a/playbooks/provisioner/openstack_virtual_baremetal/main.yml +++ b/playbooks/provisioner/openstack_virtual_baremetal/main.yml @@ -14,9 +14,9 @@ name="host0" groups="provisioned" ansible_fqdn="{{ lookup('env', 'TEST_MACHINE') }}" - ansible_ssh_user="{{ provisioner.remote_user }}" + ansible_user="{{ provisioner.remote_user }}" ansible_ssh_private_key_file="{{ provisioner.key_file }}" - ansible_ssh_host="{{ lookup('env', 'TEST_MACHINE') }}" + ansible_host="{{ lookup('env', 'TEST_MACHINE') }}" - name: set up host cloud environment hosts: host0 diff --git a/playbooks/provisioner/templates/hosts.j2 b/playbooks/provisioner/templates/hosts.j2 index ba9dca649..67ec626f6 100644 --- a/playbooks/provisioner/templates/hosts.j2 +++ b/playbooks/provisioner/templates/hosts.j2 @@ -1,5 +1,5 @@ {% for host in groups['all'] %} {% if hostvars[host].get('ansible_connection', '') != 'local' %} -{{ hostvars[host]['ansible_ssh_host'] }} {{ host }} {{ host }}{{ provisioner.network.domain }} +{{ hostvars[host]['ansible_host'] }} {{ host }} {{ host }}{{ provisioner.network.domain }} {% endif %} {% endfor %} diff --git a/playbooks/provisioner/templates/inventory.j2 b/playbooks/provisioner/templates/inventory.j2 index 014bd2daa..c7bee727c 100644 --- a/playbooks/provisioner/templates/inventory.j2 +++ b/playbooks/provisioner/templates/inventory.j2 @@ -2,9 +2,9 @@ {% if hostvars[host].get('ansible_connection', '') == 'local' %} {{ host }} ansible_connection=local {% elif hostvars[host]['ansible_ssh_private_key_file'] is defined %} -{{ host }} ansible_ssh_host={{ hostvars[host]['ansible_ssh_host'] }} ansible_ssh_user={{ hostvars[host]['ansible_ssh_user'] }} ansible_ssh_private_key_file={{ hostvars[host]['ansible_ssh_private_key_file'] }} +{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_user={{ hostvars[host]['ansible_user'] }} ansible_ssh_private_key_file={{ hostvars[host]['ansible_ssh_private_key_file'] }} {% else %} -{{ host }} ansible_ssh_host={{ hostvars[host]['ansible_ssh_host'] }} ansible_ssh_user={{ hostvars[host]['ansible_ssh_user'] }} ansible_ssh_password={{ hostvars[host]['ansible_ssh_password'] }} +{{ host }} ansible_host={{ hostvars[host]['ansible_host'] }} ansible_user={{ hostvars[host]['ansible_user'] }} ansible_ssh_password={{ hostvars[host]['ansible_ssh_password'] }} {% endif %} {% endfor %} diff --git a/playbooks/provisioner/virsh/main.yml b/playbooks/provisioner/virsh/main.yml index b6178ebed..89846adeb 100644 --- a/playbooks/provisioner/virsh/main.yml +++ b/playbooks/provisioner/virsh/main.yml @@ -3,13 +3,13 @@ hosts: localhost gather_facts: no tasks: - - name: add hosts to host list + - name: Add hosts to host list add_host: name="{{ item.value.name }}" groups="{{ item.value.groups| join(',') }}" node_label="{{ item.key }}" - ansible_ssh_user="{{ item.value.ssh_user }}" - ansible_ssh_host="{{ item.value.ssh_host }}" + ansible_user="{{ item.value.ssh_user }}" + ansible_host="{{ item.value.ssh_host }}" ansible_ssh_private_key_file="{{ item.value.ssh_key_file }}" with_dict: provisioner.hosts @@ -33,6 +33,8 @@ - name: Check if virtualization is supported hosts: virthost gather_facts: no + vars: + - ansible_user: root sudo: yes tasks: - name: check if CPU supports INTEL based KVM @@ -58,6 +60,8 @@ - name: Enable KVM for intel hosts: virthost gather_facts: no + vars: + - ansible_user: root sudo: yes tasks: - name: enable nested KVM support for Intel @@ -238,9 +242,9 @@ add_host: name="{{ item.item.item[0] }}" groups="{{ provisioner.nodes['%s' % item.item.item[0].rstrip('1234567890')].groups | join(',') }}" - ansible_ssh_user="root" + ansible_user="root" ansible_ssh_password="redhat" - ansible_ssh_host="{{ item.stdout }}" + ansible_host="{{ item.stdout }}" when: item.item is defined and item.item.item[1] == "management" with_items: vm_ip_list @@ -290,7 +294,7 @@ - name: update the ssh host name of each machine add_host: name="{{ item }}" - ansible_ssh_host="{{ item }}" + ansible_host="{{ item }}" with_items: groups['openstack_nodes'] - name: update ansible with the new SSH settings diff --git a/playbooks/provisioner/virsh/templates/ssh.config.ansible.j2 b/playbooks/provisioner/virsh/templates/ssh.config.ansible.j2 index b33ff06b5..590692968 100644 --- a/playbooks/provisioner/virsh/templates/ssh.config.ansible.j2 +++ b/playbooks/provisioner/virsh/templates/ssh.config.ansible.j2 @@ -2,7 +2,7 @@ {% if hostvars[host].get('ansible_connection', '') != 'local' and host != 'virthost' %} Host {{ host }} ProxyCommand ssh -i {{ provisioner.hosts.host1.ssh_key_file }} {{ provisioner.hosts.host1.ssh_user }}@{{ provisioner.hosts.host1.ssh_host }} nc %h %p - HostName {{ hostvars[host].ansible_ssh_host }} + HostName {{ hostvars[host].ansible_host }} User root IdentityFile {{ inventory_dir }}/id_rsa StrictHostKeyChecking no diff --git a/playbooks/tester/coverage/activate.yml b/playbooks/tester/coverage/activate.yml index a13915041..65deedff9 100644 --- a/playbooks/tester/coverage/activate.yml +++ b/playbooks/tester/coverage/activate.yml @@ -10,9 +10,9 @@ - file: path=/tmp/coverage-data state=touch mode="u=rwx,g=rwx,o=rwx" - template: - owner: "{{ hostvars[inventory_hostname].ansible_ssh_user }}" - group: "{{ hostvars[inventory_hostname].ansible_ssh_user }}" - dest: "/home/{{ hostvars[inventory_hostname].ansible_ssh_user }}/.coveragerc" + owner: "{{ hostvars[inventory_hostname].ansible_user }}" + group: "{{ hostvars[inventory_hostname].ansible_user }}" + dest: "/home/{{ hostvars[inventory_hostname].ansible_user }}/.coveragerc" src: ./templates/my.coveragerc.j2 - template: diff --git a/playbooks/tester/coverage/generate-report.yml b/playbooks/tester/coverage/generate-report.yml index b7c3ced21..f25e8f143 100644 --- a/playbooks/tester/coverage/generate-report.yml +++ b/playbooks/tester/coverage/generate-report.yml @@ -11,7 +11,7 @@ - tar - name: generate coverage report - shell: "coverage html --rcfile=/home/{{ hostvars[inventory_hostname].ansible_ssh_user }}/.coveragerc" + shell: "coverage html --rcfile=/home/{{ hostvars[inventory_hostname].ansible_user }}/.coveragerc" - name: pack coverage report shell: tar czf /tmp/coverage_html_report.tar.gzip /tmp/coverage_html_report diff --git a/playbooks/tester/coverage/templates/sitecustomize.py.j2 b/playbooks/tester/coverage/templates/sitecustomize.py.j2 index 942742416..e387192ff 100644 --- a/playbooks/tester/coverage/templates/sitecustomize.py.j2 +++ b/playbooks/tester/coverage/templates/sitecustomize.py.j2 @@ -1,6 +1,6 @@ import os import coverage -os.environ['COVERAGE_PROCESS_START']= "/home/{{ hostvars[inventory_hostname].ansible_ssh_user}}/.coveragerc" +os.environ['COVERAGE_PROCESS_START']= "/home/{{ hostvars[inventory_hostname].ansible_user}}/.coveragerc" os.environ['COVERAGE_FILE'] = "/tmp/coverage-data" coverage.process_startup() diff --git a/playbooks/tester/integration/horizon/pre.yml b/playbooks/tester/integration/horizon/pre.yml index 37cc34a52..9b5cae8bf 100644 --- a/playbooks/tester/integration/horizon/pre.yml +++ b/playbooks/tester/integration/horizon/pre.yml @@ -9,7 +9,7 @@ vars: horizon_hosts_conf: /etc/httpd/conf.d/15-horizon_vhost.conf tasks: - - lineinfile: dest={{ horizon_hosts_conf }} insertafter="ServerAlias" line=" ServerAlias {{ ansible_ssh_host }}" state=present + - lineinfile: dest={{ horizon_hosts_conf }} insertafter="ServerAlias" line=" ServerAlias {{ ansible_host }}" state=present - service: name=httpd state=restarted - name: Get the list of avaialble services diff --git a/playbooks/tester/jenkins/builders/test.yml b/playbooks/tester/jenkins/builders/test.yml index c43494527..6af78bae2 100644 --- a/playbooks/tester/jenkins/builders/test.yml +++ b/playbooks/tester/jenkins/builders/test.yml @@ -2,7 +2,7 @@ - name: test created slave hosts: openstack_nodes vars: - - ansible_ssh_user: "rhos-ci" + - ansible_user: "rhos-ci" tasks: - set_fact: return_errors: [] diff --git a/playbooks/tester/templates/hosts_slave.conf.j2 b/playbooks/tester/templates/hosts_slave.conf.j2 index 36f93d3de..02720397b 100644 --- a/playbooks/tester/templates/hosts_slave.conf.j2 +++ b/playbooks/tester/templates/hosts_slave.conf.j2 @@ -1,9 +1,9 @@ {% for host in groups.openstack_nodes %} -{{ hostvars[host].ansible_ssh_host }} ansible_ssh_user=fedora +{{ hostvars[host].ansible_host }} ansible_user=fedora {% endfor %} [slave] {% for host in groups.openstack_nodes %} -{{ hostvars[host].ansible_ssh_host }} ansible_ssh_user=fedora +{{ hostvars[host].ansible_host }} ansible_user=fedora {% endfor %} diff --git a/roles/common-handlers/handlers/main.yml b/roles/common-handlers/handlers/main.yml index 08016c483..8d8c74262 100644 --- a/roles/common-handlers/handlers/main.yml +++ b/roles/common-handlers/handlers/main.yml @@ -4,8 +4,8 @@ delegate_to: localhost wait_for_ssh: reboot_first: true - host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" - user: "{{ hostvars[inventory_hostname].ansible_ssh_user }}" + host: "{{ hostvars[inventory_hostname].ansible_host }}" + user: "{{ hostvars[inventory_hostname].ansible_user }}" key: "{{ hostvars[inventory_hostname].ansible_ssh_private_key_file }}" - name: reboot_rdo_manager @@ -14,7 +14,7 @@ wait_for_ssh: reboot_first: true ssh_opts: "-F ../../../ssh.config.ansible" - host: "{{ ansible_ssh_host }}" + host: "{{ ansible_host }}" user: "root" key: "{{ ansible_ssh_private_key_file }}" sudo: false diff --git a/roles/libvirt/ssh_config/templates/ssh_config.j2 b/roles/libvirt/ssh_config/templates/ssh_config.j2 index 7edd56d14..ab50734e2 100644 --- a/roles/libvirt/ssh_config/templates/ssh_config.j2 +++ b/roles/libvirt/ssh_config/templates/ssh_config.j2 @@ -1,6 +1,6 @@ -Host libvirt_host +Host libvirt_host User root - HostName {{ hostvars['libvirt_host'].ansible_ssh_host }} + HostName {{ hostvars['libvirt_host'].ansible_host }} ProxyCommand none IdentityFile {{ key_file }} BatchMode yes diff --git a/roles/libvirt/ssh_config/templates/ssh_config_host.j2 b/roles/libvirt/ssh_config/templates/ssh_config_host.j2 index f6d6f9df7..083de9e40 100644 --- a/roles/libvirt/ssh_config/templates/ssh_config_host.j2 +++ b/roles/libvirt/ssh_config/templates/ssh_config_host.j2 @@ -1,7 +1,7 @@ Host {{ item.value.name }} ServerAliveInterval 60 TCPKeepAlive yes - ProxyCommand ssh -o ConnectTimeout=30 -A {{ hostvars["libvirt_host"].ansible_ssh_user}}@{{ hostvars["libvirt_host"].ansible_ssh_host }} nc --wait 30 %h.{{ provisioner.network.nic.net_1.domain }} %p + ProxyCommand ssh -o ConnectTimeout=30 -A {{ hostvars["libvirt_host"].ansible_user}}@{{ hostvars["libvirt_host"].ansible_host }} nc --wait 30 %h.{{ provisioner.network.nic.net_1.domain }} %p ControlMaster auto ControlPath ~/.ssh/mux-%r@%h:%p ControlPersist 8h diff --git a/roles/openstack/openstack-status/tasks/main.yml b/roles/openstack/openstack-status/tasks/main.yml index e1f1d55cb..f1e205bfc 100644 --- a/roles/openstack/openstack-status/tasks/main.yml +++ b/roles/openstack/openstack-status/tasks/main.yml @@ -3,7 +3,7 @@ sudo: no delegate_to: localhost wait_for_ssh: - host: "{{ hostvars[inventory_hostname].ansible_ssh_host }}" + host: "{{ hostvars[inventory_hostname].ansible_host }}" port: 35357 delay: 10 timeout: 120 diff --git a/roles/patch_rpm/templates/patched_rpms.j2 b/roles/patch_rpm/templates/patched_rpms.j2 index c6bcdd8b1..228a6f09a 100644 --- a/roles/patch_rpm/templates/patched_rpms.j2 +++ b/roles/patch_rpm/templates/patched_rpms.j2 @@ -1,6 +1,6 @@ [patched-rpms] name=patched component rpms -baseurl=file:///home/{{ ansible_ssh_user }}/dist-git/{{ patch.dist_git.name }} +baseurl=file:///home/{{ ansible_user }}/dist-git/{{ patch.dist_git.name }} enabled=1 gpgcheck=0 priority=1 From 97c47ff9af0a195ad4bbcf6b1820f94b3e5da69f Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Thu, 14 Jan 2016 12:48:31 +0200 Subject: [PATCH 131/137] WIP [2.0.0] BREAKS!!! Replace deprecated openstack modules with new ones nova_compute -> os_server quantum_network -> os_network Note: "provider" network attribute are still unsupported in shade and therefore in Ansible. Need to keep using deprecated module for provider networks. quantum_subnet -> os_subnet quantum_router -> os_router Removes explicit interface and gateway creation/deletion as new module can handle them also. Note: os_floating_ip is still buggy and breaks old flow. will handle later Change-Id: I6b95b00f4b585b0bbd4bddfe8f7b3d703b2ae53f --- playbooks/provisioner/openstack/cleanup.yml | 74 ++++-------- playbooks/provisioner/openstack/main.yml | 124 +++++++++----------- 2 files changed, 82 insertions(+), 116 deletions(-) diff --git a/playbooks/provisioner/openstack/cleanup.yml b/playbooks/provisioner/openstack/cleanup.yml index e927444fb..4fa2ecdfd 100644 --- a/playbooks/provisioner/openstack/cleanup.yml +++ b/playbooks/provisioner/openstack/cleanup.yml @@ -36,61 +36,39 @@ gather_facts: no tasks: - name: Delete created nodes - nova_compute: - auth_url: "{{ provisioner.auth_url }}" - state: absent - login_username: "{{ provisioner.username }}" - login_password: "{{ provisioner.password }}" - login_tenant_name: "{{ provisioner.tenant_name }}" - name: "{{ item.value.name }}" + os_server: + state: absent + auth: + auth_url: "{{ provisioner.auth_url }}" + username: "{{ provisioner.username }}" + password: "{{ provisioner.password }}" + project_name: "{{ provisioner.tenant_name }}" + name: "{{ item.value.name }}" # wait for deletion until we can delete flaoting ips explicitly. - wait: "yes" + wait: yes with_dict: provisioner.nodes - name: Cleanup Networks hosts: net_prov gather_facts: no tasks: - - name: Detach network interfaces from the router - quantum_router_interface: - auth_url: "{{ provisioner.auth_url }}" - login_username: "{{ provisioner.username }}" - login_password: "{{ provisioner.password }}" - login_tenant_name: "{{ provisioner.tenant_name }}" - state: absent - router_name: "{{ provisioner.network.router.name }}" - subnet_name: "{{ item }}" - with_items: - - "{{ provisioner['network']['network_list']['management']['subnet_name'] }}" - - "{{ provisioner['network']['network_list']['external']['subnet_name'] }}" - - - name: Unset gateway for router - quantum_router_gateway: - auth_url: "{{ provisioner.auth_url }}" - login_username: "{{ provisioner.username }}" - login_password: "{{ provisioner.password }}" - login_tenant_name: "{{ provisioner.tenant_name }}" - router_name: "{{ provisioner.network.router.name }}" - state: absent - - name: Delete created router - quantum_router: - auth_url: "{{ provisioner.auth_url }}" - state: absent - login_username: "{{ provisioner.username }}" - login_password: "{{ provisioner.password }}" - login_tenant_name: "{{ provisioner.tenant_name }}" - name: "{{ provisioner.network.router.name }}" + os_router: + state: absent + auth: + auth_url: "{{ provisioner.auth_url }}" + username: "{{ provisioner.username }}" + password: "{{ provisioner.password }}" + project_name: "{{ provisioner.tenant_name }}" + name: "{{ provisioner.network.router.name }}" - name: Delete created networks - quantum_network: - auth_url: "{{ provisioner.auth_url }}" - state: absent - login_username: "{{ provisioner.username }}" - login_password: "{{ provisioner.password }}" - login_tenant_name: "{{ provisioner.tenant_name }}" - name: "{{ item }}" - with_items: - - "{{ provisioner.network.network_list.management.name }}" - - "{{ provisioner.network.network_list.data.name }}" - - "{{ provisioner.network.network_list.external.name }}" + os_network: + state: absent + auth: + auth_url: "{{ provisioner.auth_url }}" + username: "{{ provisioner.username }}" + password: "{{ provisioner.password }}" + project_name: "{{ provisioner.tenant_name }}" + name: "{{ item }}" + with_items: provisioner.network.network_list.values()|map(attribute='name')|list diff --git a/playbooks/provisioner/openstack/main.yml b/playbooks/provisioner/openstack/main.yml index 9b7d02874..98bb64f89 100644 --- a/playbooks/provisioner/openstack/main.yml +++ b/playbooks/provisioner/openstack/main.yml @@ -20,69 +20,57 @@ gather_facts: no tasks: - name: Create networks - quantum_network: - auth_url: "{{ provisioner.auth_url }}" - login_username: "{{ provisioner.username }}" - login_password: "{{ provisioner.password }}" - login_tenant_name: "{{ provisioner.tenant_name }}" - name: "{{ item.value.name }}" + os_network: + state: present + auth: + auth_url: "{{ provisioner.auth_url }}" + username: "{{ provisioner.username }}" + password: "{{ provisioner.password }}" + project_name: "{{ provisioner.tenant_name }}" + name: "{{ item }}" register: "networks" - with_dict: "{{ provisioner.network.network_list }}" + with_items: provisioner.network.network_list.values()|map(attribute='name')|list - name: Create subnets hosts: net_prov gather_facts: no tasks: - name: Create subnet for each network - quantum_subnet: - auth_url: "{{ provisioner.auth_url }}" - login_username: "{{ provisioner.username }}" - login_password: "{{ provisioner.password }}" - login_tenant_name: "{{ provisioner.tenant_name }}" - name: "{{ item.value.subnet_name }}" - cidr: "{{ item.value.cidr }}" - network_name: "{{ item.value.name }}" - enable_dhcp: "{{ item.value.enable_dhcp | default('True') }}" -# dns_nameservers: "{{ item.value.dns_nameservers | join(',') | default('null') }}" - dns_nameservers: "{{ item.value.dns_nameservers.first_dns | default(omit) }}" - allocation_pool_start: "{{ item.value.allocation_pool_start | default(omit) }}" - allocation_pool_end: "{{ item.value.allocation_pool_end | default(omit) }}" + os_subnet: + auth: + auth_url: "{{ provisioner.auth_url }}" + username: "{{ provisioner.username }}" + password: "{{ provisioner.password }}" + project_name: "{{ provisioner.tenant_name }}" + name: "{{ item.subnet_name }}" + cidr: "{{ item.cidr }}" + network_name: "{{ item.name }}" + enable_dhcp: "{{ item.enable_dhcp | default('True') }}" +# dns_nameservers: "{{ item.dns_nameservers | join(',') | default('null') }}" + dns_nameservers: "{{ item.dns_nameservers.values() | default(omit) }}" + allocation_pool_start: "{{ item.allocation_pool_start | default(omit) }}" + allocation_pool_end: "{{ item.allocation_pool_end | default(omit) }}" register: "subnets" - with_dict: "{{ provisioner.network.network_list }}" + with_items: provisioner.network.network_list.values() + - name: Create and configure router hosts: net_prov tasks: - name: Create router - quantum_router: - auth_url: "{{ provisioner.auth_url }}" - login_username: "{{ provisioner.username }}" - login_password: "{{ provisioner.password }}" - login_tenant_name: "{{ provisioner.tenant_name }}" + os_router: + auth: + auth_url: "{{ provisioner.auth_url }}" + username: "{{ provisioner.username }}" + password: "{{ provisioner.password }}" + project_name: "{{ provisioner.tenant_name }}" name: "{{ provisioner.network.router.name }}" + network: "{{ provisioner.network.public_net_name }}" + interfaces: + - "{{ provisioner['network']['network_list']['external']['subnet_name'] }}" + - "{{ provisioner['network']['network_list']['management']['subnet_name'] }}" register: router - - name: Attach external interface to the router - quantum_router_interface: - auth_url: "{{ provisioner.auth_url }}" - login_username: "{{ provisioner.username }}" - login_password: "{{ provisioner.password }}" - login_tenant_name: "{{ provisioner.tenant_name }}" - router_name: "{{ provisioner.network.router.name }}" - subnet_name: "{{ item }}" - with_items: - - "{{ provisioner['network']['network_list']['external']['subnet_name'] }}" - - "{{ provisioner['network']['network_list']['management']['subnet_name'] }}" - - - name: Set gateway for router - quantum_router_gateway: - auth_url: "{{ provisioner.auth_url }}" - login_username: "{{ provisioner.username }}" - login_password: "{{ provisioner.password }}" - login_tenant_name: "{{ provisioner.tenant_name }}" - network_name: "{{ provisioner.network.public_net_name }}" - router_name: "{{ provisioner.network.router.name }}" - - name: Create nodes - OpenStack hosts: localhost gather_facts: no @@ -94,25 +82,25 @@ results: "{{ provisioner.network.network_list.values() }}" - name: Create nodes - nova_compute: - auth_url: "{{ provisioner.auth_url }}" - state: present - login_username: "{{ provisioner.username }}" - login_password: "{{ provisioner.password }}" - login_tenant_name: "{{ provisioner.tenant_name }}" - name: "{{ item.value.name }}" - image_id: "{{ item.value.image_id }}" - key_name: "{{ provisioner.key_name }}" - flavor_id: "{{ item.value.flavor_id }}" - nics: - - net-id: "{{ networks.results.0.id }}" - - net-id: "{{ networks.results.1.id }}" - - net-id: "{{ networks.results.2.id }}" - config_drive: True - auto_floating_ip: "{{ provisioner.network.use_floating_ip | default(omit) }}" - wait_for: 800 - # our library/nova_compute will retry booting new servers - # in case of errors, until it reaches 'wait_for' seconds timelimit + os_server: + state: present + auth: + auth_url: "{{ provisioner.auth_url }}" + username: "{{ provisioner.username }}" + password: "{{ provisioner.password }}" + project_name: "{{ provisioner.tenant_name }}" + name: "{{ item.value.name }}" + image: "{{ item.value.image_id }}" + key_name: "{{ provisioner.key_name }}" + flavor: "{{ item.value.flavor_id }}" + nics: + - net-id: "{{ networks.results.0.id }}" + - net-id: "{{ networks.results.1.id }}" + - net-id: "{{ networks.results.2.id }}" + config_drive: True + auto_floating_ip: "{{ provisioner.network.use_floating_ip | default(false) }}" + timeout: 180 + wait: yes with_dict: provisioner.nodes register: created_nodes @@ -123,8 +111,8 @@ ansible_fqdn: "{{ item.item.value.hostname }}" ansible_user: "{{ item.item.value.remote_user }}" ansible_ssh_private_key_file: "{{ provisioner.key_file }}" - ansible_host: "{%- if item.public_ip %}{{ item.public_ip }}{%- else %}{{ item.info.addresses[provisioner.network.network_list.management.name][0].addr }}{% endif %}" - eth1_interface_ip: "{{ item.info.addresses[provisioner.network.network_list.data.name][0].addr }}" + ansible_host: "{%- if item.interface_ip is defined %}{{ item.interface_ip }}{%- else %}{{ item.openstack.addresses[provisioner.network.network_list.management.name][0].addr }}{% endif %}" + eth1_interface_ip: "{{ item.openstack.addresses[provisioner.network.network_list.data.name][0].addr }}" with_items: created_nodes.results - name: Add Floating IPs From 7005092d543c54bb60a672fd3761caaa3a7b7bf5 Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Mon, 15 Feb 2016 13:08:21 +0200 Subject: [PATCH 132/137] tmp Change-Id: Idb9466f2d8b7b4e499e918ab4f5457bea526bafd --- playbooks/provisioner/openstack/main.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/playbooks/provisioner/openstack/main.yml b/playbooks/provisioner/openstack/main.yml index 98bb64f89..76eee1f77 100644 --- a/playbooks/provisioner/openstack/main.yml +++ b/playbooks/provisioner/openstack/main.yml @@ -19,6 +19,7 @@ hosts: net_prov gather_facts: no tasks: + - debug: var="{{ provisioner.network.network_list }}" - name: Create networks os_network: state: present @@ -29,7 +30,7 @@ project_name: "{{ provisioner.tenant_name }}" name: "{{ item }}" register: "networks" - with_items: provisioner.network.network_list.values()|map(attribute='name')|list + with_items: "{{ provisioner.network.network_list.values()|map(attribute='name')|list }}" - name: Create subnets hosts: net_prov From 6da5923f8194cdf2548d99439e50c350dfe7165f Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Tue, 16 Feb 2016 16:38:34 +0200 Subject: [PATCH 133/137] [2.1.0] Update Callbacks For ansible 2.1 --- plugins/callbacks/human_log.py | 4 +++- plugins/callbacks/timing.py | 4 +++- plugins/hacking/log_stdstream.py | 5 ++++- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/plugins/callbacks/human_log.py b/plugins/callbacks/human_log.py index daad0f84b..05e199137 100644 --- a/plugins/callbacks/human_log.py +++ b/plugins/callbacks/human_log.py @@ -22,12 +22,14 @@ except ImportError: import json +from ansible.plugins.callback import Callbackbase + # Fields to reformat output for FIELDS = ['cmd', 'command', 'start', 'end', 'delta', 'msg', 'stdout', 'stderr', 'results'] -class CallbackModule(object): +class CallbackModule(Callbackbase): def human_log(self, data): if type(data) == dict: for field in FIELDS: diff --git a/plugins/callbacks/timing.py b/plugins/callbacks/timing.py index 70563759e..4b1de1381 100644 --- a/plugins/callbacks/timing.py +++ b/plugins/callbacks/timing.py @@ -1,7 +1,9 @@ from datetime import datetime +from ansible.plugins.callback import Callbackbase -class CallbackModule(object): + +class CallbackModule(Callbackbase): __color = '\033[01;30m' __endcolor = '\033[00m' diff --git a/plugins/hacking/log_stdstream.py b/plugins/hacking/log_stdstream.py index 77bc678f6..902e13b61 100644 --- a/plugins/hacking/log_stdstream.py +++ b/plugins/hacking/log_stdstream.py @@ -6,6 +6,9 @@ import codecs import locale +from ansible.plugins.callback import Callbackbase + + TIME_FORMAT = "%b %d %Y %H:%M:%S" MARK_FORMAT = "%(now)s ======== MARK ========\n" MSG_FORMAT = "%(now)s - %(category)s - %(data)s\n\n" @@ -55,7 +58,7 @@ def log(host, category, data): fd.write(RESULTS_END) -class CallbackModule(object): +class CallbackModule(Callbackbase): """ logs playbook results, per host, in /tmp/ansible/stdstream_logs """ From 79b6cbb100a872f532960c187d4d41c98aa84458 Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Tue, 16 Feb 2016 16:47:25 +0200 Subject: [PATCH 134/137] Fix import in callbacks --- plugins/callbacks/human_log.py | 4 ++-- plugins/callbacks/timing.py | 4 ++-- plugins/hacking/log_stdstream.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/callbacks/human_log.py b/plugins/callbacks/human_log.py index 05e199137..18d968e31 100644 --- a/plugins/callbacks/human_log.py +++ b/plugins/callbacks/human_log.py @@ -22,14 +22,14 @@ except ImportError: import json -from ansible.plugins.callback import Callbackbase +from ansible.plugins.callback import CallbackBase # Fields to reformat output for FIELDS = ['cmd', 'command', 'start', 'end', 'delta', 'msg', 'stdout', 'stderr', 'results'] -class CallbackModule(Callbackbase): +class CallbackModule(CallbackBase): def human_log(self, data): if type(data) == dict: for field in FIELDS: diff --git a/plugins/callbacks/timing.py b/plugins/callbacks/timing.py index 4b1de1381..c53ed7e0d 100644 --- a/plugins/callbacks/timing.py +++ b/plugins/callbacks/timing.py @@ -1,9 +1,9 @@ from datetime import datetime -from ansible.plugins.callback import Callbackbase +from ansible.plugins.callback import CallbackBase -class CallbackModule(Callbackbase): +class CallbackModule(CallbackBase): __color = '\033[01;30m' __endcolor = '\033[00m' diff --git a/plugins/hacking/log_stdstream.py b/plugins/hacking/log_stdstream.py index 902e13b61..7b88d2f6b 100644 --- a/plugins/hacking/log_stdstream.py +++ b/plugins/hacking/log_stdstream.py @@ -6,7 +6,7 @@ import codecs import locale -from ansible.plugins.callback import Callbackbase +from ansible.plugins.callback import CallbackBase TIME_FORMAT = "%b %d %Y %H:%M:%S" @@ -58,7 +58,7 @@ def log(host, category, data): fd.write(RESULTS_END) -class CallbackModule(Callbackbase): +class CallbackModule(CallbackBase): """ logs playbook results, per host, in /tmp/ansible/stdstream_logs """ From 30f141716aa7f86402bf5738fbad6f089e96a8f5 Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Tue, 16 Feb 2016 17:08:13 +0200 Subject: [PATCH 135/137] Backward compt for callbacks Callbacks support ansible 1 and 2 --- plugins/callbacks/human_log.py | 9 +++++++-- plugins/callbacks/timing.py | 6 +++++- plugins/hacking/log_stdstream.py | 9 ++++++--- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/plugins/callbacks/human_log.py b/plugins/callbacks/human_log.py index 18d968e31..54e090d95 100644 --- a/plugins/callbacks/human_log.py +++ b/plugins/callbacks/human_log.py @@ -22,14 +22,19 @@ except ImportError: import json -from ansible.plugins.callback import CallbackBase +try: + from ansible.plugins.callback import CallbackBase + ANSIBLE2 = True +except ImportError: + ANSIBLE2 = False + # Fields to reformat output for FIELDS = ['cmd', 'command', 'start', 'end', 'delta', 'msg', 'stdout', 'stderr', 'results'] -class CallbackModule(CallbackBase): +class CallbackModule(CallbackBase if ANSIBLE2 else object): def human_log(self, data): if type(data) == dict: for field in FIELDS: diff --git a/plugins/callbacks/timing.py b/plugins/callbacks/timing.py index c53ed7e0d..3ab35a472 100644 --- a/plugins/callbacks/timing.py +++ b/plugins/callbacks/timing.py @@ -1,6 +1,10 @@ from datetime import datetime -from ansible.plugins.callback import CallbackBase +try: + from ansible.plugins.callback import CallbackBase + ANSIBLE2 = True +except ImportError: + ANSIBLE2 = False class CallbackModule(CallbackBase): diff --git a/plugins/hacking/log_stdstream.py b/plugins/hacking/log_stdstream.py index 7b88d2f6b..b01757915 100644 --- a/plugins/hacking/log_stdstream.py +++ b/plugins/hacking/log_stdstream.py @@ -6,8 +6,11 @@ import codecs import locale -from ansible.plugins.callback import CallbackBase - +try: + from ansible.plugins.callback import CallbackBase + ANSIBLE2 = True +except ImportError: + ANSIBLE2 = False TIME_FORMAT = "%b %d %Y %H:%M:%S" MARK_FORMAT = "%(now)s ======== MARK ========\n" @@ -58,7 +61,7 @@ def log(host, category, data): fd.write(RESULTS_END) -class CallbackModule(CallbackBase): +class CallbackModule(CallbackBase if ANSIBLE2 else object): """ logs playbook results, per host, in /tmp/ansible/stdstream_logs """ From 15fc74e012dcb834c7fc836eb91e5f68b990df12 Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Tue, 16 Feb 2016 18:41:29 +0200 Subject: [PATCH 136/137] Fix rhos-release --- library/rhos-release.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/rhos-release.py b/library/rhos-release.py index dfa57b80a..862dc71ec 100644 --- a/library/rhos-release.py +++ b/library/rhos-release.py @@ -184,7 +184,7 @@ def released(line): repodir=repodir, files=list(filenames), releases=installed_releases, - stdout=stdout.splitlines() + stdout=stdout ) From 7018cadc3fbbb081df3ccd64a0d870ec89215198 Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Wed, 17 Feb 2016 08:34:24 +0200 Subject: [PATCH 137/137] [2.0.0] Update more inventory attr Missed after merge. Remove debug --- playbooks/full-job-patch-opendaylight.yml | 6 ++--- .../rdo-manager/cleanup_virthost.yml | 2 +- .../environment-setup/baremetal/run.yml | 6 ++--- .../environment-setup/virthost/gate.yml | 2 +- .../virthost/instack-virt-setup/gate.yml | 2 +- .../virthost/instack-virt-setup/run.yml | 4 ++-- .../environment-setup/virthost/main.yml | 2 +- .../overcloud/ansible-inventory.yml | 12 +++++----- playbooks/installer/rdo-manager/user/main.yml | 2 +- .../rdo-manager/yum_repos/repo-rdo.yml | 16 +++++++------- .../opendaylight/install_odl_source.yml | 22 +++++++++---------- playbooks/provisioner/openstack/main.yml | 1 - playbooks/provisioner/virsh/cleanup.yml | 4 ++-- roles/delorean/tasks/install.yml | 2 +- 14 files changed, 41 insertions(+), 42 deletions(-) diff --git a/playbooks/full-job-patch-opendaylight.yml b/playbooks/full-job-patch-opendaylight.yml index 58a31e22e..dceb05f6d 100644 --- a/playbooks/full-job-patch-opendaylight.yml +++ b/playbooks/full-job-patch-opendaylight.yml @@ -22,10 +22,10 @@ yum: name=createrepo state=present - name: create repo folder - file: path=/home/{{ ansible_ssh_user }}/dist-git/ state=directory + file: path=/home/{{ ansible_user }}/dist-git/ state=directory - name: copy the generated rpms - copy: src={{ item }} dest=/home/{{ ansible_ssh_user }}/dist-git/{{ patch.dist_git.name }}/ + copy: src={{ item }} dest=/home/{{ ansible_user }}/dist-git/{{ patch.dist_git.name }}/ with_fileglob: - "{{ lookup('env', 'PWD') }}/generated_rpms/*.rpm" @@ -36,7 +36,7 @@ - name: Create local repo for patched rpm sudo: yes - shell: "createrepo /home/{{ ansible_ssh_user }}/dist-git/{{ patch.dist_git.name }}" + shell: "createrepo /home/{{ ansible_user }}/dist-git/{{ patch.dist_git.name }}" when: hostvars["localhost"].rpm_build_rc == 0 - include: install.yml diff --git a/playbooks/installer/rdo-manager/cleanup_virthost.yml b/playbooks/installer/rdo-manager/cleanup_virthost.yml index 7b4e8efe1..4e1076567 100644 --- a/playbooks/installer/rdo-manager/cleanup_virthost.yml +++ b/playbooks/installer/rdo-manager/cleanup_virthost.yml @@ -3,7 +3,7 @@ - name: clean up rdo-manager virthost hosts: virthost vars: - - ansible_ssh_user: root + - ansible_user: root roles: - { role: cleanup_nodes/rdo-manager, when: (installer.type == "rdo-manager" and provisioner.type == "manual") diff --git a/playbooks/installer/rdo-manager/environment-setup/baremetal/run.yml b/playbooks/installer/rdo-manager/environment-setup/baremetal/run.yml index c3cd7a289..0bd1aa437 100644 --- a/playbooks/installer/rdo-manager/environment-setup/baremetal/run.yml +++ b/playbooks/installer/rdo-manager/environment-setup/baremetal/run.yml @@ -2,7 +2,7 @@ - name: Ensure baremetal host has no yum repos installed hosts: undercloud vars: - - ansible_ssh_user: root + - ansible_user: root tasks: - name: clean release rpms yum: name={{ item }} state=absent @@ -22,7 +22,7 @@ - name: Update packages on the host hosts: undercloud vars: - - ansible_ssh_user: root + - ansible_user: root tasks: - name: repolist command: yum -d 7 repolist @@ -33,7 +33,7 @@ - name: Enable ip forwarding hosts: undercloud vars: - - ansible_ssh_user: root + - ansible_user: root tasks: - name: enabling ip forwarding sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes reload=yes diff --git a/playbooks/installer/rdo-manager/environment-setup/virthost/gate.yml b/playbooks/installer/rdo-manager/environment-setup/virthost/gate.yml index 240be499e..11f9370fb 100644 --- a/playbooks/installer/rdo-manager/environment-setup/virthost/gate.yml +++ b/playbooks/installer/rdo-manager/environment-setup/virthost/gate.yml @@ -2,7 +2,7 @@ - name: clean up rdo-manager virthost hosts: virthost vars: - - ansible_ssh_user: root + - ansible_user: root roles: - { role: cleanup_nodes/rdo-manager, when: (installer.type == "rdo-manager" and provisioner.type == "manual") diff --git a/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/gate.yml b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/gate.yml index 4f98c55f0..74868de71 100644 --- a/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/gate.yml +++ b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/gate.yml @@ -1,7 +1,7 @@ - name: Copy the gating package hosts: virthost vars: - - ansible_ssh_user: root + - ansible_user: root tasks: - name: make temp directory command: mktemp -d diff --git a/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/run.yml b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/run.yml index 818e8a640..8e5ffa9b7 100644 --- a/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/run.yml +++ b/playbooks/installer/rdo-manager/environment-setup/virthost/instack-virt-setup/run.yml @@ -156,9 +156,9 @@ add_host: name=undercloud groups=undercloud - ansible_ssh_host=undercloud + ansible_host=undercloud ansible_fqdn=undercloud - ansible_ssh_user="{{ provisioner.remote_user }}" + ansible_user="{{ provisioner.remote_user }}" ansible_ssh_private_key_file="{{ provisioner.key_file }}" gating_repo="{{ gating_repo is defined and gating_repo }}" diff --git a/playbooks/installer/rdo-manager/environment-setup/virthost/main.yml b/playbooks/installer/rdo-manager/environment-setup/virthost/main.yml index 21cd51e6d..5348018bf 100644 --- a/playbooks/installer/rdo-manager/environment-setup/virthost/main.yml +++ b/playbooks/installer/rdo-manager/environment-setup/virthost/main.yml @@ -2,7 +2,7 @@ - name: clean up rdo-manager virthost hosts: virthost vars: - - ansible_ssh_user: root + - ansible_user: root roles: - { role: cleanup_nodes/rdo-manager, when: (installer.type == "rdo-manager" and provisioner.type == "manual") diff --git a/playbooks/installer/rdo-manager/overcloud/ansible-inventory.yml b/playbooks/installer/rdo-manager/overcloud/ansible-inventory.yml index 7570813c8..cfdb2668c 100644 --- a/playbooks/installer/rdo-manager/overcloud/ansible-inventory.yml +++ b/playbooks/installer/rdo-manager/overcloud/ansible-inventory.yml @@ -24,9 +24,9 @@ add_host: name={{ item.key }} groups=overcloud,controller - ansible_ssh_host={{ item.key }} + ansible_host={{ item.key }} ansible_fqdn={{ item.value }} - ansible_ssh_user="heat-admin" + ansible_user="heat-admin" ansible_ssh_private_key_file="{{ lookup('env', 'PWD') }}/id_rsa_undercloud" when: item.key.startswith('overcloud-controller') @@ -36,9 +36,9 @@ add_host: name={{ item.key }} groups=overcloud,compute - ansible_ssh_host={{ item.key }} + ansible_host={{ item.key }} ansible_fqdn={{ item.value }} - ansible_ssh_user="heat-admin" + ansible_user="heat-admin" ansible_ssh_private_key_file="{{ lookup('env', 'PWD') }}/id_rsa_undercloud" when: item.key.startswith('overcloud-compute') @@ -48,9 +48,9 @@ add_host: name={{ item.key }} groups=overcloud,ceph - ansible_ssh_host={{ item.key }} + ansible_host={{ item.key }} ansible_fqdn={{ item.value }} - ansible_ssh_user="heat-admin" + ansible_user="heat-admin" ansible_ssh_private_key_file="{{ lookup('env', 'PWD') }}/id_rsa_undercloud" when: item.key.startswith('overcloud-ceph') diff --git a/playbooks/installer/rdo-manager/user/main.yml b/playbooks/installer/rdo-manager/user/main.yml index c6933f7d2..4d48e021f 100644 --- a/playbooks/installer/rdo-manager/user/main.yml +++ b/playbooks/installer/rdo-manager/user/main.yml @@ -2,7 +2,7 @@ - name: Create the stack user hosts: "{{ host }}" vars: - - ansible_ssh_user: root + - ansible_user: root tasks: - name: create user user: name="{{ provisioner.remote_user }}" state=present password=stack diff --git a/playbooks/installer/rdo-manager/yum_repos/repo-rdo.yml b/playbooks/installer/rdo-manager/yum_repos/repo-rdo.yml index 484949d29..90755836e 100644 --- a/playbooks/installer/rdo-manager/yum_repos/repo-rdo.yml +++ b/playbooks/installer/rdo-manager/yum_repos/repo-rdo.yml @@ -1,10 +1,10 @@ --- -- include: "{{ base_dir }}/khaleesi/playbooks/group_by.yml ansible_ssh_user=root" +- include: "{{ base_dir }}/khaleesi/playbooks/group_by.yml ansible_user=root" - name: RHEL RDO prep hosts: "{{ repo_host }}:&RedHat" vars: - - ansible_ssh_user: root + - ansible_user: root roles: # enable this role when rdo and rhos officially diverge #- { role: linux/rhel/rdo } @@ -13,7 +13,7 @@ - name: CentOS RDO prep hosts: "{{ repo_host }}:&CentOS" vars: - - ansible_ssh_user: root + - ansible_user: root roles: - { role: linux/centos } - { role: product/rdo/rhel } @@ -21,14 +21,14 @@ - name: Linux common prep (Collect performance data, etc.) hosts: "{{ repo_host }}" vars: - - ansible_ssh_user: root + - ansible_user: root roles: - { role: linux-common } - name: Enable EPEL hosts: "{{ repo_host }}" vars: - - ansible_ssh_user: root + - ansible_user: root tasks: - name: Install epel release command: "yum localinstall -y {{ distro.epel_release }}" @@ -36,7 +36,7 @@ - name: Add the RDO release repos hosts: "{{ repo_host }}" vars: - - ansible_ssh_user: root + - ansible_user: root tasks: - name: Install rdo-release rpm yum: @@ -47,7 +47,7 @@ - name: Update packages on the host hosts: "{{ repo_host }}" vars: - - ansible_ssh_user: root + - ansible_user: root tasks: - name: repolist command: yum -d 7 repolist @@ -67,7 +67,7 @@ local_action: wait_for_ssh reboot_first=true - host="{{ ansible_ssh_host }}" + host="{{ ansible_host }}" user="root" ssh_opts="-F {{ base_dir }}/khaleesi/ssh.config.ansible" key="{{ ansible_ssh_private_key_file }}" diff --git a/playbooks/post-deploy/packstack/opendaylight/install_odl_source.yml b/playbooks/post-deploy/packstack/opendaylight/install_odl_source.yml index 56420f2ad..9136f90a9 100644 --- a/playbooks/post-deploy/packstack/opendaylight/install_odl_source.yml +++ b/playbooks/post-deploy/packstack/opendaylight/install_odl_source.yml @@ -40,12 +40,12 @@ - name: Clone opendayligt dist-git git: repo='{{ odl.dist_git.url }}' version='{{ odl.dist_git.branch }}' - dest='/home/{{ ansible_ssh_user}}/opendaylight' + dest='/home/{{ ansible_user}}/opendaylight' accept_hostkey=true - name: Clone maven-chain-builder git: repo=https://github.com/bregman-arie/maven-chain-builder.git - dest='/home/{{ ansible_ssh_user }}/maven-chain-builder' + dest='/home/{{ ansible_user }}/maven-chain-builder' accept_hostkey=true - name: Install PME @@ -57,23 +57,23 @@ tasks: - name: Prepare chain file args: - chdir: /home/{{ ansible_ssh_user}}/maven-chain-builder + chdir: /home/{{ ansible_user}}/maven-chain-builder shell: > - sudo sed -i "s/\$TAG_TO_BUILD/rhos-{{ product.full_version }}-patches/g" /home/{{ ansible_ssh_user }}/opendaylight/make-vars; - /home/{{ ansible_ssh_user }}/opendaylight/make-vars; - cp /home/{{ ansible_ssh_user}}/opendaylight/opendaylight-chain/opendaylight-chain.ini .; - cd /home/{{ ansible_ssh_user }}/opendaylight && git checkout -- make-vars && git checkout -- opendaylight-chain/opendaylight-chain.ini && cd -; - redhat_version=`cat /home/{{ ansible_ssh_user }}/opendaylight/*/*.ini | grep "redhat_version = " | cut -d= -f2 | xargs`; + sudo sed -i "s/\$TAG_TO_BUILD/rhos-{{ product.full_version }}-patches/g" /home/{{ ansible_user }}/opendaylight/make-vars; + /home/{{ ansible_user }}/opendaylight/make-vars; + cp /home/{{ ansible_user}}/opendaylight/opendaylight-chain/opendaylight-chain.ini .; + cd /home/{{ ansible_user }}/opendaylight && git checkout -- make-vars && git checkout -- opendaylight-chain/opendaylight-chain.ini && cd -; + redhat_version=`cat /home/{{ ansible_user }}/opendaylight/*/*.ini | grep "redhat_version = " | cut -d= -f2 | xargs`; sed -i "s/\%(redhat_version)s/$redhat_version/g" *.ini; - bomver=`cat /home/{{ ansible_ssh_user }}/opendaylight/*/*.ini | grep "bomversion = " | cut -d= -f2 | xargs`; + bomver=`cat /home/{{ ansible_user }}/opendaylight/*/*.ini | grep "bomversion = " | cut -d= -f2 | xargs`; sed -i "s/\%\(bomversion\)s/$bomver/g" *.ini; sed -i "s/skipTests/skipTests=true/g" *.ini; sed -i "s/properties = /\n/g" *.ini - name: Run apache-chain-builder and build the opendaylight disturbution args: - chdir: /home/{{ ansible_ssh_user}}/maven-chain-builder - shell: "python maven-chain-builder.py opendaylight-chain.ini {{ ansible_ssh_user }}" + chdir: /home/{{ ansible_user}}/maven-chain-builder + shell: "python maven-chain-builder.py opendaylight-chain.ini {{ ansible_user }}" - name: Prepare opendaylight distribution for run hosts: odl_controller diff --git a/playbooks/provisioner/openstack/main.yml b/playbooks/provisioner/openstack/main.yml index 76eee1f77..97ad5b3a2 100644 --- a/playbooks/provisioner/openstack/main.yml +++ b/playbooks/provisioner/openstack/main.yml @@ -19,7 +19,6 @@ hosts: net_prov gather_facts: no tasks: - - debug: var="{{ provisioner.network.network_list }}" - name: Create networks os_network: state: present diff --git a/playbooks/provisioner/virsh/cleanup.yml b/playbooks/provisioner/virsh/cleanup.yml index 67b8a4182..986d3bca2 100644 --- a/playbooks/provisioner/virsh/cleanup.yml +++ b/playbooks/provisioner/virsh/cleanup.yml @@ -8,8 +8,8 @@ name="{{ item.value.name }}" groups="{{ item.value.groups| join(',') }}" node_label="{{ item.key }}" - ansible_ssh_user="{{ item.value.ssh_user }}" - ansible_ssh_host="{{ item.value.ssh_host }}" + ansible_user="{{ item.value.ssh_user }}" + ansible_host="{{ item.value.ssh_host }}" ansible_ssh_private_key_file="{{ item.value.ssh_key_file }}" with_dict: provisioner.hosts diff --git a/roles/delorean/tasks/install.yml b/roles/delorean/tasks/install.yml index f35bae064..74cfa76a7 100644 --- a/roles/delorean/tasks/install.yml +++ b/roles/delorean/tasks/install.yml @@ -8,7 +8,7 @@ - name: Add user to mock group sudo: yes - user: name={{ ansible_ssh_user }} groups=mock + user: name={{ ansible_user }} groups=mock - name: Create virtualenv for Delorean command: virtualenv {{ ansible_env.HOME }}/delorean-venv creates='{{ ansible_env.HOME }}/delorean-venv'