Merge "Add virtlet confirmance test run"
diff --git a/tcp_tests/managers/sl_manager.py b/tcp_tests/managers/sl_manager.py
index 4eb6228..a1ee3a7 100644
--- a/tcp_tests/managers/sl_manager.py
+++ b/tcp_tests/managers/sl_manager.py
@@ -93,15 +93,14 @@
target_node_name = [node_name for node_name
in self.__underlay.node_names()
if node_to_run in node_name]
- if skip_tests:
- cmd = ("cd {0}; "
- "export VOLUME_STATUS='available'; "
- "pytest -k 'not {1}' {2}".format(
- tests_path, skip_tests, test_to_run))
- else:
- cmd = ("cd {0}; "
- "export VOLUME_STATUS='available'; "
- "pytest -k {1}".format(tests_path, test_to_run))
+ cmd = ("cd {0}; "
+ ". venv-stacklight-pytest/bin/activate;"
+ "export VOLUME_STATUS='available';"
+ "pytest -k {1} {2}".format(
+ tests_path,
+ "'not " + skip_tests + "'" if skip_tests else '',
+ test_to_run))
+
with self.__underlay.remote(node_name=target_node_name[0]) \
as node_remote:
LOG.debug("Run {0} on the node {1}".format(
@@ -115,21 +114,19 @@
target_node_name = [node_name for node_name
in self.__underlay.node_names()
if node_to_run in node_name]
- if skip_tests:
- cmd = ("cd {0}; "
- "export VOLUME_STATUS='available'; "
- "pytest --json=report.json -k 'not {1}' {2}".format(
- tests_path, skip_tests, test_to_run))
- else:
- cmd = ("cd {0}; "
- "export VOLUME_STATUS='available'; "
- "pytest --json=report.json -k {1}".format(
- tests_path, test_to_run))
+ cmd = ("cd {0}; "
+ ". venv-stacklight-pytest/bin/activate;"
+ "export VOLUME_STATUS='available';"
+ "pip install pytest-json;"
+ "pytest --json=report.json -k {1} {2}".format(
+ tests_path,
+ "'not " + skip_tests + "'" if skip_tests else '',
+ test_to_run))
+
with self.__underlay.remote(node_name=target_node_name[0]) \
as node_remote:
LOG.debug("Run {0} on the node {1}".format(
cmd, target_node_name[0]))
- node_remote.execute('pip install pytest-json')
node_remote.execute(cmd)
res = node_remote.execute('cd {0}; cat report.json'.format(
tests_path))
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
index c2e88f6..63e5402 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/openstack.yaml
@@ -268,19 +268,19 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+#- description: Allow all icmp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+#- description: sync time
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
+# 'service ntp stop; ntpd -gq; service ntp start'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
- description: Temp workaround of PROD-13167
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*' cmd.run
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index 84b820a..2c5b112 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -1,4 +1,5 @@
default_context:
+ mcp_version: testing
ceph_enabled: 'False'
cicd_enabled: 'False'
cluster_domain: cookied-bm-mcp-dvr-vxlan.local
@@ -77,8 +78,8 @@
openstack_gateway_node01_hostname: gtw01
openstack_gateway_node01_tenant_address: 10.167.6.6
openstack_gateway_node02_address: 10.167.4.225
- #openstack_gateway_node02_hostname: gtw02
- #openstack_gateway_node02_tenant_address: 10.167.6.7
+ openstack_gateway_node02_hostname: gtw02
+ openstack_gateway_node02_tenant_address: 10.167.6.7
openstack_message_queue_address: 10.167.4.40
openstack_message_queue_hostname: msg
openstack_message_queue_node01_address: 10.167.4.41
@@ -101,7 +102,8 @@
openstack_proxy_node01_hostname: prx01
openstack_proxy_node02_address: 10.167.4.82
openstack_proxy_node02_hostname: prx02
- openstack_version: ocata
+ openstack_version: pike
+ cinder_version: ${_param:openstack_version}
oss_enabled: 'False'
platform: openstack_enabled
public_host: ${_param:openstack_proxy_address}
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
index b5e831d..b645bf2 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt-context-lab03-environment.yaml
@@ -17,9 +17,9 @@
- infra_kvm
- linux_system_codename_xenial
interfaces:
- enp2s0f0:
+ enp9s0f0:
role: single_mgm_dhcp
- enp2s0f1:
+ enp9s0f1:
role: bond0_ab_ovs_vlan_ctl
kvm02.cookied-bm-mcp-dvr-vxlan.local:
@@ -28,9 +28,9 @@
- infra_kvm
- linux_system_codename_xenial
interfaces:
- enp2s0f0:
+ enp9s0f0:
role: single_mgm_dhcp
- enp2s0f1:
+ enp9s0f1:
role: bond0_ab_ovs_vlan_ctl
kvm03.cookied-bm-mcp-dvr-vxlan.local:
@@ -39,26 +39,26 @@
- infra_kvm
- linux_system_codename_xenial
interfaces:
- eno1:
+ enp9s0f0:
role: single_mgm_dhcp
- eno2:
+ enp9s0f1:
role: bond0_ab_ovs_vlan_ctl
cmp001.cookied-bm-mcp-dvr-vxlan.local:
reclass_storage_name: openstack_compute_node01
roles:
- openstack_compute
-# - features_lvm_backend
+ - features_lvm_backend
- linux_system_codename_xenial
interfaces:
- enp3s0f0:
+ enp9s0f0:
role: single_dhcp
- enp3s0f1:
+ enp9s0f1:
role: bond0_ab_ovs_vxlan_ctl_mesh
single_address: ${_param:openstack_compute_node01_control_address}
tenant_address: ${_param:openstack_compute_node01_tenant_address}
- enp5s0f0:
- role: bond0_ab_ovs_vxlan_ctl_mesh
+# enp5s0f0:
+# role: bond0_ab_ovs_vxlan_ctl_mesh
# Which of enp5s0f1. enp5s0f2, enp5s0f3 is for floating?
# ens6:
# role: bond1_ab_ovs_floating
@@ -70,14 +70,14 @@
- features_lvm_backend
- linux_system_codename_xenial
interfaces:
- eno1:
+ enp9s0f0:
role: single_dhcp
- enp5s0f0:
+ enp9s0f1:
role: bond0_ab_ovs_vxlan_ctl_mesh
single_address: ${_param:openstack_compute_node02_control_address}
tenant_address: ${_param:openstack_compute_node02_tenant_address}
- enp5s0f2:
- role: bond0_ab_ovs_vxlan_ctl_mesh
+# enp5s0f2:
+# role: bond0_ab_ovs_vxlan_ctl_mesh
# Which of eno2, enp5s0f1, enp5s0f3 is for floating?
# ens6:
# role: bond1_ab_ovs_floating
@@ -88,18 +88,18 @@
- openstack_gateway
- linux_system_codename_xenial
interfaces:
- enp3s0f0:
+ enp2s0f0:
role: single_dhcp
- enp3s0f1:
+ enp2s0f1:
role: bond0_ab_dvr_vxlan_ctl_mesh_floating
- #gtw02.cookied-bm-mcp-dvr-vxlan.local:
- # reclass_storage_name: openstack_gateway_node02
- # roles:
- # - openstack_gateway
- # - linux_system_codename_xenial
- # interfaces:
- # eno1:
- # role: single_dhcp
- # eno2:
- # role: bond0_ab_dvr_vxlan_ctl_mesh_floating
+ gtw02.cookied-bm-mcp-dvr-vxlan.local:
+ reclass_storage_name: openstack_gateway_node02
+ roles:
+ - openstack_gateway
+ - linux_system_codename_xenial
+ interfaces:
+ enp2s0f0:
+ role: single_dhcp
+ enp2s0f1:
+ role: bond0_ab_dvr_vxlan_ctl_mesh_floating
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
index 64d1f38..4af4303 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/salt.yaml
@@ -51,7 +51,7 @@
retry: {count: 1, delay: 10}
skip_fail: false
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
index 4b6676f..3256b9e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
@@ -10,7 +10,7 @@
{% set HOSTNAME_CMP002 = os_env('HOSTNAME_CMP002', 'cmp002.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
-#{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW02 = os_env('HOSTNAME_GTW02', 'gtw02.' + DOMAIN_NAME) %}
{% set ETH1_IP_ADDRESS_CFG01 = os_env('ETH1_IP_ADDRESS_CFG01', '172.16.49.2') %}
{% set ETH0_IP_ADDRESS_KVM01 = os_env('ETH0_IP_ADDRESS_KVM01', '172.16.49.11') %}
{% set ETH0_IP_ADDRESS_KVM02 = os_env('ETH0_IP_ADDRESS_KVM02', '172.16.49.12') %}
@@ -18,7 +18,7 @@
{% set ETH0_IP_ADDRESS_CMP001 = os_env('ETH0_IP_ADDRESS_CMP001', '172.16.49.3') %}
{% set ETH0_IP_ADDRESS_CMP002 = os_env('ETH0_IP_ADDRESS_CMP002', '172.16.49.31') %}
{% set ETH0_IP_ADDRESS_GTW01 = os_env('ETH0_IP_ADDRESS_GTW01', '172.16.49.5') %}
-#{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
+{% set ETH0_IP_ADDRESS_GTW02 = os_env('ETH0_IP_ADDRESS_GTW02', '172.16.49.4') %}
{% import 'cookied-bm-mcp-dvr-vxlan/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
{% import 'cookied-bm-mcp-dvr-vxlan/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
@@ -49,7 +49,7 @@
default_{{ HOSTNAME_CMP001 }}: {{ ETH0_IP_ADDRESS_CMP001 }}
default_{{ HOSTNAME_CMP002 }}: {{ ETH0_IP_ADDRESS_CMP002 }}
default_{{ HOSTNAME_GTW01 }}: {{ ETH0_IP_ADDRESS_GTW01 }}
- #default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
+ default_{{ HOSTNAME_GTW02 }}: {{ ETH0_IP_ADDRESS_GTW02 }}
ip_ranges:
dhcp: [+2, -4]
private-pool01:
@@ -173,7 +173,7 @@
root_volume_name: system # see 'volumes' below
cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
@@ -191,14 +191,14 @@
cloudinit_user_data: *cloudinit_user_data_1604
interfaces:
- - label: enp2s0f0
+ - label: enp9s0f0
l2_network_device: admin
mac_address: !os_env ETH0_MAC_ADDRESS_KVM01
- - label: enp2s0f1
+ - label: enp9s0f1
mac_address: !os_env ETH1_MAC_ADDRESS_KVM01
network_config:
- enp2s0f0:
+ enp9s0f0:
networks:
- admin
bond0:
@@ -206,7 +206,7 @@
- control
aggregation: active-backup
parents:
- - enp2s0f1
+ - enp9s0f1
- name: {{ HOSTNAME_KVM02 }}
role: salt_minion
@@ -220,7 +220,7 @@
root_volume_name: system # see 'volumes' below
cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
@@ -238,14 +238,14 @@
cloudinit_user_data: *cloudinit_user_data_1604
interfaces:
- - label: enp2s0f0
+ - label: enp9s0f0
l2_network_device: admin
mac_address: !os_env ETH0_MAC_ADDRESS_KVM02
- - label: enp2s0f1
+ - label: enp9s0f1
mac_address: !os_env ETH1_MAC_ADDRESS_KVM02
network_config:
- enp2s0f0:
+ enp9s0f0:
networks:
- admin
bond0:
@@ -253,7 +253,7 @@
- control
aggregation: active-backup
parents:
- - enp2s0f1
+ - enp9s0f1
- name: {{ HOSTNAME_KVM03 }}
role: salt_minion
@@ -267,7 +267,7 @@
root_volume_name: system # see 'volumes' below
cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: eno1 # see 'interfaces' below.
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
@@ -285,14 +285,14 @@
cloudinit_user_data: *cloudinit_user_data_1604
interfaces:
- - label: eno1
+ - label: enp9s0f0
l2_network_device: admin
mac_address: !os_env ETH0_MAC_ADDRESS_KVM03
- - label: eno2
+ - label: enp9s0f1
mac_address: !os_env ETH1_MAC_ADDRESS_KVM03
network_config:
- eno1:
+ enp9s0f0:
networks:
- admin
bond0:
@@ -300,7 +300,7 @@
- control
aggregation: active-backup
parents:
- - eno2
+ - enp9s0f1
- name: {{ HOSTNAME_CMP001 }}
@@ -315,7 +315,7 @@
root_volume_name: system # see 'volumes' below
cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
@@ -333,20 +333,20 @@
cloudinit_user_data: *cloudinit_user_data_1604
interfaces:
- - label: enp3s0f0
+ - label: enp9s0f0
l2_network_device: admin
mac_address: !os_env ETH0_MAC_ADDRESS_CMP001
- - label: enp3s0f1
+ - label: enp9s0f1
mac_address: !os_env ETH1_MAC_ADDRESS_CMP001
- - label: enp5s0f0
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
- - label: enp5s0f1
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
- - label: enp5s0f2
- mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
+# - label: enp9s0f0
+# mac_address: !os_env ETH2_MAC_ADDRESS_CMP001
+# - label: enp5s0f1
+# mac_address: !os_env ETH3_MAC_ADDRESS_CMP001
+# - label: enp5s0f2
+# mac_address: !os_env ETH4_MAC_ADDRESS_CMP001
network_config:
- enp3s0f0:
+ enp9s0f0:
networks:
- admin
bond0:
@@ -354,8 +354,8 @@
- control
aggregation: active-backup
parents:
- - enp3s0f1
- - enp5s0f0
+ - enp9s0f0
+ - enp9s0f1
@@ -371,7 +371,7 @@
root_volume_name: system # see 'volumes' below
cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: eno1 # see 'interfaces' below.
+ cloud_init_iface_up: enp9s0f0 # see 'interfaces' below.
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
@@ -389,20 +389,19 @@
cloudinit_user_data: *cloudinit_user_data_1604
interfaces:
- - label: eno1
+ - label: enp9s0f0
l2_network_device: admin
mac_address: !os_env ETH0_MAC_ADDRESS_CMP002
- - label: eth0
+ - label: enp9s0f1
mac_address: !os_env ETH1_MAC_ADDRESS_CMP002
- - label: eth3
- mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
- - label: eth2
- mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
- - label: eth4
- mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
-
+# - label: eth3
+# mac_address: !os_env ETH2_MAC_ADDRESS_CMP002
+# - label: eth2
+# mac_address: !os_env ETH3_MAC_ADDRESS_CMP002
+# - label: eth4
+# mac_address: !os_env ETH4_MAC_ADDRESS_CMP002
network_config:
- eno1:
+ enp9s0f0:
networks:
- admin
bond0:
@@ -410,8 +409,8 @@
- control
aggregation: active-backup
parents:
- - eth0
- - eth3
+ - enp9s0f0
+ - enp9s0f1
- name: {{ HOSTNAME_GTW01 }}
@@ -426,7 +425,7 @@
root_volume_name: system # see 'volumes' below
cloud_init_volume_name: iso # see 'volumes' below
- cloud_init_iface_up: enp3s0f0 # see 'interfaces' below.
+ cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
@@ -444,14 +443,14 @@
cloudinit_user_data: *cloudinit_user_data_1604
interfaces:
- - label: enp3s0f0
+ - label: enp2s0f0
l2_network_device: admin
mac_address: !os_env ETH0_MAC_ADDRESS_GTW01
- - label: enp3s0f1
+ - label: enp2s0f1
mac_address: !os_env ETH1_MAC_ADDRESS_GTW01
network_config:
- enp3s0f0:
+ enp2s0f0:
networks:
- admin
bond0:
@@ -459,51 +458,51 @@
- control
aggregation: active-backup
parents:
- - enp3s0f1
+ - enp2s0f1
-# - name: {{ HOSTNAME_GTW02 }}
-# role: salt_minion
-# params:
-# ipmi_user: !os_env IPMI_USER
-# ipmi_password: !os_env IPMI_PASSWORD
-# ipmi_previlegies: OPERATOR
-# ipmi_host: !os_env IPMI_HOST_GTW02 # hostname or IP address
-# ipmi_lan_interface: lanplus
-# ipmi_port: 623
-#
-# root_volume_name: system # see 'volumes' below
-# cloud_init_volume_name: iso # see 'volumes' below
-# cloud_init_iface_up: eno1 # see 'interfaces' below.
-# volumes:
-# - name: system
-# capacity: !os_env NODE_VOLUME_SIZE, 200
-#
-# # The same as for agent URL, here is an URL to the image that should be
-# # used for deploy the node. It should also be accessible from deploying
-# # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
-# source_image: !os_env IRONIC_SOURCE_IMAGE_URL
-# source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
-#
-# - name: iso # Volume with name 'iso' will be used
-# # for store image with cloud-init metadata.
-#
-# cloudinit_meta_data: *cloudinit_meta_data
-# cloudinit_user_data: *cloudinit_user_data_1604
-#
-# interfaces:
-# - label: eno1
-# l2_network_device: admin
-# mac_address: !os_env ETH0_MAC_ADDRESS_GTW02
-# - label: eno2
-# mac_address: !os_env ETH1_MAC_ADDRESS_GTW02
-#
-# network_config:
-# eno1:
-# networks:
-# - admin
-# bond0:
-# networks:
-# - control
-# aggregation: active-backup
-# parents:
-# - eno2
+ - name: {{ HOSTNAME_GTW02 }}
+ role: salt_minion
+ params:
+ ipmi_user: !os_env IPMI_USER
+ ipmi_password: !os_env IPMI_PASSWORD
+ ipmi_previlegies: OPERATOR
+ ipmi_host: !os_env IPMI_HOST_GTW02 # hostname or IP address
+ ipmi_lan_interface: lanplus
+ ipmi_port: 623
+
+ root_volume_name: system # see 'volumes' below
+ cloud_init_volume_name: iso # see 'volumes' below
+ cloud_init_iface_up: enp2s0f0 # see 'interfaces' below.
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 200
+
+ # The same as for agent URL, here is an URL to the image that should be
+ # used for deploy the node. It should also be accessible from deploying
+ # node when nodes are provisioned by agent. Usually PXE/provision network address is used.
+ source_image: !os_env IRONIC_SOURCE_IMAGE_URL
+ source_image_checksum: !os_env IRONIC_SOURCE_IMAGE_CHECKSUM
+
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces:
+ - label: enp2s0f0
+ l2_network_device: admin
+ mac_address: !os_env ETH0_MAC_ADDRESS_GTW02
+ - label: enp2s0f1
+ mac_address: !os_env ETH1_MAC_ADDRESS_GTW02
+
+ network_config:
+ enp2s0f0:
+ networks:
+ - admin
+ bond0:
+ networks:
+ - control
+ aggregation: active-backup
+ parents:
+ - enp2s0f1
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml
index 769ded7..80f389e 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail-dpdk.yaml
@@ -1,4 +1,5 @@
default_context:
+ mcp_version: testing
cicd_enabled: 'False'
cluster_domain: cookied-bm-mcp-ocata-contrail.local
cluster_name: deployment_name
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
index f791c23..304bdab 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt-context-cookiecutter-contrail.yaml
@@ -1,4 +1,5 @@
default_context:
+ mcp_version: testing
cicd_enabled: 'False'
cluster_domain: cookied-bm-mcp-ocata-contrail.local
cluster_name: deployment_name
diff --git a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
index b4517f7..340b3c5 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ocata-contrail/salt.yaml
@@ -61,7 +61,7 @@
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins"') }}
- description: "Workaround for haproxy without listen"
cmd: |
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
index 3368698..a5006d4 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt-context-cookiecutter-cicd_oss.yaml
@@ -1,4 +1,5 @@
default_context:
+ mcp_version: testing
cicd_control_node01_address: 10.167.4.91
cicd_control_node01_hostname: cid01
cicd_control_node02_address: 10.167.4.92
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
index b3b9f12..4f2439d 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dop-sl2/salt.yaml
@@ -19,7 +19,7 @@
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
index 46deafe..315f8cc 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt-context-cookiecutter-openstack_ovs_dvr_vxlan.yaml
@@ -1,4 +1,5 @@
default_context:
+ mcp_version: testing
cicd_control_node01_address: 10.167.4.91
cicd_control_node01_hostname: cid01
cicd_control_node02_address: 10.167.4.92
diff --git a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
index 55ce489..45514bb 100644
--- a/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
+++ b/tcp_tests/templates/cookied-mcp-ocata-dvr-vxlan/salt.yaml
@@ -19,7 +19,7 @@
{{ SHARED.MACRO_GENERATE_AND_ENABLE_ENVIRONMENT_MODEL() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns" "glusterfs" "xtrabackup" "maas" "backupninja" "jenkins"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/shared-salt.yaml b/tcp_tests/templates/shared-salt.yaml
index 52ffd96..8606fac 100644
--- a/tcp_tests/templates/shared-salt.yaml
+++ b/tcp_tests/templates/shared-salt.yaml
@@ -9,6 +9,7 @@
{% set SALT_MODELS_SYSTEM_COMMIT = os_env('SALT_MODELS_SYSTEM_COMMIT','') %}
{% set SALT_MODELS_SYSTEM_REF_CHANGE = os_env('SALT_MODELS_SYSTEM_REF_CHANGE','') %}
{% set COOKIECUTTER_REF_CHANGE = os_env('COOKIECUTTER_REF_CHANGE','') %}
+{% set COOKIECUTTER_TEMPLATE_COMMIT = os_env('COOKIECUTTER_TEMPLATE_COMMIT','') %}
{% set ENVIRONMENT_TEMPLATE_REF_CHANGE = os_env('ENVIRONMENT_TEMPLATE_REF_CHANGE','') %}
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
@@ -223,6 +224,12 @@
pip install cookiecutter
export GIT_SSL_NO_VERIFY=true; git clone https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates /tmp/cookiecutter-templates
+ {%- if COOKIECUTTER_TEMPLATE_COMMIT != '' %}
+ pushd /tmp/cookiecutter-templates
+ git checkout {{ COOKIECUTTER_TEMPLATE_COMMIT }}
+ popd
+ {%- endif %}
+
{%- if COOKIECUTTER_REF_CHANGE != '' %}
pushd /tmp/cookiecutter-templates
git fetch https://gerrit.mcp.mirantis.net/mk/cookiecutter-templates {{ COOKIECUTTER_REF_CHANGE }} && git checkout FETCH_HEAD
@@ -238,6 +245,7 @@
# Override some context parameters
sed -i 's/cluster_name: .*/cluster_name: {{ CLUSTER_NAME }}/g' {{ CLUSTER_CONTEXT_PATH }}
sed -i 's/cluster_domain: .*/cluster_domain: {{ DOMAIN_NAME }}/g' {{ CLUSTER_CONTEXT_PATH }}
+ sed -i 's/mcp_version:.*/mcp_version: {{ REPOSITORY_SUITE }}/g' {{ CLUSTER_CONTEXT_PATH }}
{%- if CONTROL_VLAN %}
sed -i 's/control_vlan: .*/control_vlan: {{ CONTROL_VLAN }}/g' {{ CLUSTER_CONTEXT_PATH }}
{%- endif %}
@@ -297,10 +305,8 @@
popd
{%- endif %}
- export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
- find ${REPLACE_DIRS} -type f -exec sed -i 's/apt_mk_version:.*/apt_mk_version: {{ REPOSITORY_SUITE }}/g' {} +
-
{%- if IS_CONTRAIL_LAB %}
+ export REPLACE_DIRS="/srv/salt/reclass/classes/ /srv/salt/reclass/nodes/"
# vSRX IPs for tcp-qa images have 172.16.10.90 hardcoded
find ${REPLACE_DIRS} -type f -exec sed -i 's/opencontrail_router01_address:.*/opencontrail_router01_address: 172.16.10.90/g' {} +
find ${REPLACE_DIRS} -type f -exec sed -i 's/infra_config_deploy_address: 1.*/infra_config_deploy_address: {{ IPV4_NET_ADMIN_PREFIX }}.15/g' {} +
diff --git a/tcp_tests/templates/shared-sl-tests.yaml b/tcp_tests/templates/shared-sl-tests.yaml
index e0f7823..fe7a98a 100644
--- a/tcp_tests/templates/shared-sl-tests.yaml
+++ b/tcp_tests/templates/shared-sl-tests.yaml
@@ -6,13 +6,14 @@
{%- macro MACRO_CLONE_SL_TESTS() %}
{############################################################}
-- description: Clone repo and install tests on cfg node
+- description: Install stacklight-pytest into virlual environemnt
cmd: |
set -e;
+ apt-get -y install python-virtualenv;
+ virtualenv venv-stacklight-pytest;
+ . venv-stacklight-pytest/bin/activate;
git clone -b {{ SL_TEST_BRANCH }} {{ SL_TEST_REPO }} /root/stacklight-pytest;
- cd /root/stacklight-pytest;
- python setup.py sdist;
- pip install dist/stacklight_tests-1.0.tar.gz --process-dependency-links
+ pip install /root/stacklight-pytest;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
@@ -24,11 +25,10 @@
- description: Configure tests
cmd: |
set -e;
+ . venv-stacklight-pytest/bin/activate;
stl-tests gen-config-mk;
- cp /usr/local/lib/python2.7/dist-packages/stacklight_tests/fixtures/config.yaml /root/stacklight-pytest/stacklight_tests/fixtures/config.yaml;
+ cp venv-stacklight-pytest/lib/python2.7/site-packages/stacklight_tests/fixtures/config.yaml /root/stacklight-pytest/stacklight_tests/fixtures/config.yaml;
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
{%- endmacro %}
-
-
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
similarity index 93%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/ceph.yaml
rename to tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
index 6da1183..3c772ec 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/ceph.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install ceph mons
- description: Update grains
@@ -110,6 +110,15 @@
retry: {count: 1, delay: 10}
skip_fail: false
+{% for ssh in config.underlay.ssh %}
+- description: Restart salt-minion as workaround of PROD-16970
+ cmd: |
+ service salt-minion restart; # For case if salt-minion was already installed
+ node_name: {{ ssh['node_name'] }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+{% endfor %}
+
- description: Connect ceph to glance
cmd: |
salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
similarity index 97%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml
copy to tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
index 408230a..72f2c9d 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/common-services.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install support services
- description: Install keepalived on ctl01
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
similarity index 89%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml
copy to tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
index 8b8fa91..b9bfb8d 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/openstack.yaml
@@ -1,8 +1,8 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
@@ -232,19 +232,19 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+#- description: Allow all tcp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+#
+#- description: Allow all icmp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
- description: sync time
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
similarity index 84%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml
rename to tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
index 0b6bec6..48bf15a 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/salt.yaml
@@ -1,6 +1,6 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml' import DOMAIN_NAME with context %}
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--meta-data.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--meta-data.yaml
rename to tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
rename to tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml
similarity index 100%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data1604.yaml
rename to tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml
similarity index 97%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml
rename to tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml
index 18a9cd1..0fcc963 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr-ceph-rgw/underlay.yaml
@@ -1,9 +1,9 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% import 'virtual-mcp-ocata-dvr-ceph-rgw/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'virtual-mcp-pike-dvr-ceph-rgw/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'virtual-mcp-pike-dvr-ceph-rgw/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
@@ -12,7 +12,7 @@
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-dvr-ceph-rgw') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-dvr-ceph-rgw') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
@@ -33,7 +33,7 @@
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-dvr-ceph-rgw_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-dvr-ceph-rgw_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml
similarity index 97%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml
copy to tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml
index 408230a..965d297 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/common-services.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install support services
- description: Install keepalived on ctl01
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
similarity index 61%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml
copy to tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
index 8b8fa91..f9a4127 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/openstack.yaml
@@ -1,13 +1,35 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME') %}
+{% set OVERRIDE_POLICY = os_env('OVERRIDE_POLICY', '') %}
# Install OpenStack control services
+{%- if OVERRIDE_POLICY != '' %}
+- description: Upload policy override
+ upload:
+ local_path: {{ config.salt_deploy.templates_dir }}{{ LAB_CONFIG_NAME }}/
+ local_filename: overrides-policy.yml
+ remote_path: /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Create custom cluster control class
+ cmd: echo -e "classes:\n- cluster.{{ LAB_CONFIG_NAME }}.openstack.control_orig\n$(cat /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml)" > /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+
+- description: Rename control classes
+ cmd: mv /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control_orig.yml &&
+ ln -s /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/overrides-policy.yml /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/control.yml &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.sync_all &&
+ salt --hard-crash --state-output=mixed --state-verbose=False '*' saltutil.refresh_pillar
+ node_name: {{ HOSTNAME_CFG01 }}
+{%- endif %}
+
- description: Install glance on all controllers
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@glance:server' state.sls glance -b 1
@@ -64,7 +86,7 @@
- description: Check glance image-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; glance image-list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -79,7 +101,7 @@
- description: Check nova service-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; nova --debug service-list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 5}
skip_fail: false
@@ -89,12 +111,12 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@cinder:controller' state.sls cinder -b 1
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
+ retry: {count: 1, delay: 5}
skip_fail: false
- description: Check cinder list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; cinder list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -114,13 +136,27 @@
retry: {count: 1, delay: 5}
skip_fail: false
-- description: Check neutron agent-list
+# isntall designate
+- description: Install powerdns
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
+ -C 'I@powerdns:server' state.sls powerdns.server
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
+- description: Install designate
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@designate:server' state.sls designate -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
+- description: Check neutron agent-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
- description: Install heat service
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
@@ -183,6 +219,13 @@
retry: {count: 2, delay: 30}
skip_fail: false
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: Create net04_external
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
'. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
@@ -232,19 +275,19 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+#- description: Allow all tcp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+#
+#- description: Allow all icmp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
- description: sync time
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
@@ -253,12 +296,92 @@
retry: {count: 1, delay: 30}
skip_fail: false
+# Configure cinder-volume salt-call PROD-13167
+- description: Set disks 01
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 02
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 01
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 02
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 03
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create volume_group
+ cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install cinder-volume
+ cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install crudini
+ cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: Install docker.io on gtw
cmd: salt-call cmd.run 'apt-get install docker.io -y'
node_name: {{ HOSTNAME_GTW01 }}
retry: {count: 1, delay: 30}
skip_fail: false
+- description: Restart cinder volume
+ cmd: |
+ salt -C 'I@cinder:controller' service.restart cinder-volume;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
- description: create rc file on cfg
cmd: scp ctl01:/root/keystonercv3 /root
node_name: {{ HOSTNAME_CFG01 }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/overrides-policy.yml b/tcp_tests/templates/virtual-mcp-pike-dvr/overrides-policy.yml
new file mode 100644
index 0000000..1f35a6b
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/overrides-policy.yml
@@ -0,0 +1,40 @@
+parameters:
+ nova:
+ controller:
+ policy:
+ context_is_admin: 'role:admin or role:administrator'
+ 'compute:create': 'rule:admin_or_owner'
+ 'compute:create:attach_network':
+ cinder:
+ controller:
+ policy:
+ 'volume:delete': 'rule:admin_or_owner'
+ 'volume:extend':
+ neutron:
+ server:
+ policy:
+ create_subnet: 'rule:admin_or_network_owner'
+ 'get_network:queue_id': 'rule:admin_only'
+ 'create_network:shared':
+ glance:
+ server:
+ policy:
+ publicize_image: "role:admin"
+ add_member:
+ keystone:
+ server:
+ policy:
+ admin_or_token_subject: 'rule:admin_required or rule:token_subject'
+ heat:
+ server:
+ policy:
+ context_is_admin: 'role:admin and is_admin_project:True'
+ deny_stack_user: 'not role:heat_stack_user'
+ deny_everybody: '!'
+ 'cloudformation:ValidateTemplate': 'rule:deny_everybody'
+ 'cloudformation:DescribeStackResources':
+ ceilometer:
+ server:
+ policy:
+ segregation: 'rule:context_is_admin'
+ 'telemetry:get_resource':
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
similarity index 80%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml
copy to tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
index 0b6bec6..a7c06dd 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/salt.yaml
@@ -1,6 +1,6 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import DOMAIN_NAME with context %}
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
@@ -11,7 +11,7 @@
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
-{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch"') }}
+{{ SHARED.MACRO_CONFIGURE_RECLASS(FORMULA_SERVICES='"linux" "reclass" "salt" "openssh" "ntp" "git" "nginx" "collectd" "sensu" "heka" "sphinx" "keystone" "mysql" "grafana" "haproxy" "rsyslog" "horizon" "prometheus" "telegraf" "elasticsearch" "powerdns"') }}
{{ SHARED.MACRO_INSTALL_SALT_MINIONS() }}
diff --git a/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml
new file mode 100644
index 0000000..04b5ca7
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/sl.yaml
@@ -0,0 +1,177 @@
+{% from 'virtual-mcp-pike-dvr/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+# Install docker swarm
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure docker service
+ cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install docker swarm on master node
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+ cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt -C 'I@docker:swarm' mine.update
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Refresh modules
+ cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun swarm on slaves to proper token population
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure slave nodes
+ cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: List registered Docker swarm nodes
+ cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Install slv2 infra
+- description: Install telegraf
+ cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Configure Prometheus exporters, if pillar 'prometheus:exporters' exists on any server
+ cmd: |
+ if salt -C 'I@prometheus:exporters' match.pillar 'prometheus:exporters' ; then
+ salt -C 'I@prometheus:exporters' state.sls prometheus
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure collector
+ cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch client
+ cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana client
+ cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check influix db
+ cmd: |
+ INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+ if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+ salt -C 'I@influxdb:server' state.sls influxdb
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+ cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Sync modules
+ cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 15}
+ skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Install prometheus alertmanager
+ cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: run docker state
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: docker ps
+ cmd: salt -C 'I@docker:swarm' dockerng.ps
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--meta-data.yaml
copy to tcp_tests/templates/virtual-mcp-pike-dvr/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data1604.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data1604.yaml
copy to tcp_tests/templates/virtual-mcp-pike-dvr/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml
similarity index 74%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml
copy to tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml
index 18a9cd1..73d545d 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-dvr/underlay.yaml
@@ -1,9 +1,9 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% import 'virtual-mcp-ocata-dvr-ceph-rgw/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'virtual-mcp-pike-dvr/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'virtual-mcp-pike-dvr/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'virtual-mcp-pike-dvr/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
@@ -12,7 +12,7 @@
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-dvr-ceph-rgw') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-dvr') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
@@ -20,20 +20,17 @@
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS01 = os_env('HOSTNAME_DNS01', 'dns01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_DNS02 = os_env('HOSTNAME_DNS02', 'dns02.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-dvr-ceph-rgw_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-dvr_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
@@ -48,18 +45,15 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
- dhcp: [+70, -10]
+ dhcp: [+90, -10]
admin-pool01:
net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
@@ -73,18 +67,15 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
- dhcp: [+70, -10]
+ dhcp: [+90, -10]
tenant-pool01:
net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
@@ -98,15 +89,12 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
dhcp: [+10, -10]
@@ -123,15 +111,12 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_DNS01 }}: +111
+ default_{{ HOSTNAME_DNS02 }}: +112
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
dhcp: [+10, -10]
@@ -322,10 +307,10 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_CMN01 }}
+ - name: {{ HOSTNAME_MON01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
+ vcpu: !os_env SLAVE_NODE_CPU, 3
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -348,10 +333,10 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_CMN02 }}
+ - name: {{ HOSTNAME_MON02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
+ vcpu: !os_env SLAVE_NODE_CPU, 3
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -374,151 +359,10 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_CMN03 }}
+ - name: {{ HOSTNAME_MON03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: ceph
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: ceph
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
- - name: {{ HOSTNAME_RGW03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
+ vcpu: !os_env SLAVE_NODE_CPU, 3
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -545,7 +389,7 @@
role: salt_minion
params:
vcpu: !os_env SLAVE_NODE_CPU, 1
- memory: !os_env SLAVE_NODE_MEMORY, 2048
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
boot:
- hd
cloud_init_volume_name: iso
@@ -555,6 +399,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -567,6 +414,7 @@
interfaces: *interfaces
network_config: *network_config
+
- name: {{ HOSTNAME_CMP01 }}
role: salt_minion
params:
@@ -669,3 +517,55 @@
interfaces: *all_interfaces
network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_DNS02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/ceph.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
similarity index 89%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/ceph.yaml
copy to tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
index 6da1183..94bce47 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/ceph.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/ceph.yaml
@@ -1,4 +1,5 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
# Install ceph mons
- description: Update grains
@@ -96,9 +97,11 @@
- description: Install radosgw if exists
cmd: |
- salt -C 'I@ceph:radosgw' saltutil.sync_grains;
- salt -C 'I@ceph:radosgw' state.sls ceph.radosgw;
- salt -C 'I@keystone:client' state.sls keystone.client;
+ if salt -C 'I@ceph:radosgw' match.pillar 'ceph:radosgw' ; then
+ salt -C 'I@ceph:radosgw' saltutil.sync_grains;
+ salt -C 'I@ceph:radosgw' state.sls ceph.radosgw;
+ salt -C 'I@keystone:client' state.sls keystone.client;
+ fi
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 2, delay: 5}
skip_fail: false
@@ -110,6 +113,15 @@
retry: {count: 1, delay: 10}
skip_fail: false
+{% for ssh in config.underlay.ssh %}
+- description: Restart salt-minion as workaround of PROD-16970
+ cmd: |
+ service salt-minion restart; # For case if salt-minion was already installed
+ node_name: {{ ssh['node_name'] }}
+ retry: {count: 1, delay: 1}
+ skip_fail: false
+{% endfor %}
+
- description: Connect ceph to glance
cmd: |
salt -C 'I@ceph:common and I@glance:server' state.sls ceph.common,ceph.setup.keyring,glance;
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/common-services.yaml
similarity index 97%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml
rename to tcp_tests/templates/virtual-mcp-pike-ovs-ceph/common-services.yaml
index 408230a..a33ed13 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/common-services.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install support services
- description: Install keepalived on ctl01
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
similarity index 89%
rename from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml
rename to tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
index 8b8fa91..d064d37 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/openstack.yaml
@@ -1,8 +1,8 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
@@ -232,19 +232,19 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+#- description: Allow all tcp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; openstack security group rule create --proto tcp --dst-port 22 default'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+#
+#- description: Allow all icmp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; openstack security group rule create --proto icmp default'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
- description: sync time
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/salt.yaml
similarity index 83%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml
copy to tcp_tests/templates/virtual-mcp-pike-ovs-ceph/salt.yaml
index 0b6bec6..18f7002 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/salt.yaml
@@ -1,6 +1,6 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'virtual-mcp-pike-ovs-ceph/underlay.yaml' import DOMAIN_NAME with context %}
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--meta-data.yaml
copy to tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data1604.yaml
copy to tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml
similarity index 81%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml
copy to tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml
index 18a9cd1..bfbb969 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs-ceph/underlay.yaml
@@ -1,9 +1,9 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% import 'virtual-mcp-ocata-dvr-ceph-rgw/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'virtual-mcp-pike-ovs-ceph/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'virtual-mcp-pike-ovs-ceph/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'virtual-mcp-pike-ovs-ceph/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
@@ -12,7 +12,7 @@
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-dvr-ceph-rgw') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-ovs-ceph') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
@@ -23,9 +23,6 @@
{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.' + DOMAIN_NAME) %}
{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd01.' + DOMAIN_NAME) %}
{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd02.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
@@ -33,7 +30,7 @@
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-dvr-ceph-rgw_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-ovs-ceph_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
@@ -53,13 +50,10 @@
default_{{ HOSTNAME_CMN01 }}: +96
default_{{ HOSTNAME_CMN02 }}: +97
default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
- dhcp: [+70, -10]
+ dhcp: [+90, -10]
admin-pool01:
net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
@@ -78,13 +72,10 @@
default_{{ HOSTNAME_CMN01 }}: +96
default_{{ HOSTNAME_CMN02 }}: +97
default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
- dhcp: [+70, -10]
+ dhcp: [+90, -10]
tenant-pool01:
net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
@@ -103,9 +94,6 @@
default_{{ HOSTNAME_CMN01 }}: +96
default_{{ HOSTNAME_CMN02 }}: +97
default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
@@ -128,9 +116,6 @@
default_{{ HOSTNAME_CMN01 }}: +96
default_{{ HOSTNAME_CMN02 }}: +97
default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
@@ -325,7 +310,7 @@
- name: {{ HOSTNAME_CMN01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
+ vcpu: !os_env SLAVE_NODE_CPU, 3
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -351,7 +336,7 @@
- name: {{ HOSTNAME_CMN02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
+ vcpu: !os_env SLAVE_NODE_CPU, 3
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -377,7 +362,7 @@
- name: {{ HOSTNAME_CMN03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
+ vcpu: !os_env SLAVE_NODE_CPU, 3
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -403,7 +388,7 @@
- name: {{ HOSTNAME_OSD01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
+ vcpu: !os_env SLAVE_NODE_CPU, 3
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -417,9 +402,6 @@
- name: cinder
capacity: 50
format: qcow2
- - name: ceph
- capacity: 50
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
@@ -435,7 +417,7 @@
- name: {{ HOSTNAME_OSD02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
+ vcpu: !os_env SLAVE_NODE_CPU, 3
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -449,86 +431,6 @@
- name: cinder
capacity: 50
format: qcow2
- - name: ceph
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
- - name: {{ HOSTNAME_RGW03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml
similarity index 97%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml
copy to tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml
index 408230a..c762467 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/common-services.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install support services
- description: Install keepalived on ctl01
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
similarity index 68%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml
copy to tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
index 8b8fa91..ff9bd81 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/openstack.yaml
@@ -1,8 +1,8 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CTL03 with context %}
+{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_GTW01 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
@@ -64,7 +64,7 @@
- description: Check glance image-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; glance image-list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -79,7 +79,7 @@
- description: Check nova service-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; nova --debug service-list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova --debug service-list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 3, delay: 5}
skip_fail: false
@@ -89,12 +89,12 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@cinder:controller' state.sls cinder -b 1
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
+ retry: {count: 1, delay: 5}
skip_fail: false
- description: Check cinder list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; cinder list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -114,9 +114,24 @@
retry: {count: 1, delay: 5}
skip_fail: false
+# isntall designate
+- description: Install bind
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@bind:server' state.sls bind
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install designate
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@designate:server' state.sls designate -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 10}
+ skip_fail: false
+
- description: Check neutron agent-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -183,6 +198,13 @@
retry: {count: 2, delay: 30}
skip_fail: false
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: Create net04_external
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
'. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
@@ -232,19 +254,19 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
-
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 1, delay: 30}
- skip_fail: false
+#- description: Allow all tcp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; openstack security group rule create --proto tcp --dst-port 22 default'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
+#
+#- description: Allow all icmp
+# cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+# '. /root/keystonercv3; openstack security group rule create --proto icmp default'
+# node_name: {{ HOSTNAME_CFG01 }}
+# retry: {count: 1, delay: 30}
+# skip_fail: false
- description: sync time
cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
@@ -253,6 +275,86 @@
retry: {count: 1, delay: 30}
skip_fail: false
+# Configure cinder-volume salt-call
+- description: Set disks 01
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 02
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 01
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 02
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Create partitions 03
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: create volume_group
+ cmd: salt "ctl*" cmd.run 'vgcreate cinder-volumes /dev/vdb1'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install cinder-volume
+ cmd: salt 'ctl*' cmd.run 'apt-get install cinder-volume -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Install crudini
+ cmd: salt "ctl*" cmd.run 'apt-get install crudini -y'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 01
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 02
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL02 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Temporary WR set enabled backends value 03
+ cmd: salt-call cmd.run 'crudini --verbose --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm'
+ node_name: {{ HOSTNAME_CTL03 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
+- description: Restart cinder volume
+ cmd: |
+ salt -C 'I@cinder:controller' service.restart cinder-volume;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
- description: Install docker.io on gtw
cmd: salt-call cmd.run 'apt-get install docker.io -y'
node_name: {{ HOSTNAME_GTW01 }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
similarity index 83%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml
copy to tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
index 0b6bec6..3eb5082 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/salt.yaml
@@ -1,6 +1,6 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-mcp-pike-ovs/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'virtual-mcp-pike-ovs/underlay.yaml' import DOMAIN_NAME with context %}
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
diff --git a/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml
new file mode 100644
index 0000000..0c37346
--- /dev/null
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/sl.yaml
@@ -0,0 +1,176 @@
+{% from 'virtual-mcp-pike-ovs/underlay.yaml' import HOSTNAME_CFG01 with context %}
+
+{% import 'shared-sl-tests.yaml' as SHARED_SL_TESTS with context %}
+
+# Install docker swarm
+- description: Install keepalived on mon nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'mon*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check the VIP on mon nodes
+ cmd: |
+ SL_VIP=`salt-call --out=newline_values_only pillar.get _param:stacklight_monitor_address`;
+ echo "_param:stacklight_monitor_address (vip): ${SL_VIP}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'mon*' cmd.run "ip a | grep ${SL_VIP}" | grep -B1 ${SL_VIP}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Configure docker service
+ cmd: salt -C 'I@docker:swarm' state.sls docker.host
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install docker swarm on master node
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Send grains to the swarm slave nodes
+ cmd: salt -C 'I@docker:swarm' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt -C 'I@docker:swarm' mine.update
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Refresh modules
+ cmd: salt -C 'I@docker:swarm' saltutil.refresh_modules; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Rerun swarm on slaves to proper token population
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker.swarm
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure slave nodes
+ cmd: salt -C 'I@docker:swarm:role:manager' state.sls docker.swarm -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: List registered Docker swarm nodes
+ cmd: salt -C 'I@docker:swarm:role:master' cmd.run 'docker node ls'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Install slv2 infra
+- description: Install telegraf
+ cmd: salt -C 'I@telegraf:agent or I@telegraf:remote_agent' state.sls telegraf
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Configure Prometheus exporters
+ cmd: salt -C 'I@prometheus:exporters' state.sls prometheus
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure collector
+ cmd: salt -C 'I@heka:log_collector' state.sls heka.log_collector
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch server
+ cmd: salt -C 'I@elasticsearch:server' state.sls elasticsearch.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana server
+ cmd: salt -C 'I@kibana:server' state.sls kibana.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install elasticsearch client
+ cmd: salt -C 'I@elasticsearch:client' state.sls elasticsearch.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Install kibana client
+ cmd: salt -C 'I@kibana:client' state.sls kibana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Check influix db
+ cmd: |
+ INFLUXDB_SERVICE=`salt -C 'I@influxdb:server' test.ping 1>/dev/null 2>&1 && echo true`;
+ echo "Influxdb service presence: ${INFLUXDB_SERVICE}";
+ if [[ "$INFLUXDB_SERVICE" == "true" ]]; then
+ salt -C 'I@influxdb:server' state.sls influxdb
+ fi
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+# Collect grains needed to configure the services
+
+- description: Get grains
+ cmd: salt -C 'I@salt:minion' state.sls salt.minion.grains
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Sync modules
+ cmd: salt -C 'I@salt:minion' saltutil.refresh_modules
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Update mine
+ cmd: salt -C 'I@salt:minion' mine.update; sleep 5;
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+# Configure the services running in Docker Swarm
+- description: Install prometheus alertmanager
+ cmd: salt -C 'I@docker:swarm' state.sls prometheus,heka.remote_collector -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: run docker state
+ cmd: salt -C 'I@docker:swarm:role:master' state.sls docker
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: docker ps
+ cmd: salt -C 'I@docker:swarm' dockerng.ps
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+- description: Configure Grafana dashboards and datasources
+ cmd: sleep 30; salt -C 'I@grafana:client' state.sls grafana.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 10}
+ skip_fail: false
+
+- description: Run salt minion to create cert files
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False "*" state.sls salt.minion
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: false
+
+{{ SHARED_SL_TESTS.MACRO_CLONE_SL_TESTS() }}
+{{ SHARED_SL_TESTS.MACRO_CONFIGURE_TESTS() }}
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--meta-data.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--meta-data.yaml
copy to tcp_tests/templates/virtual-mcp-pike-ovs/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data1604.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data1604.yaml
copy to tcp_tests/templates/virtual-mcp-pike-ovs/underlay--user-data1604.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml
similarity index 68%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml
copy to tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml
index 18a9cd1..9b49286 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml
+++ b/tcp_tests/templates/virtual-mcp-pike-ovs/underlay.yaml
@@ -1,9 +1,9 @@
# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
-{% import 'virtual-mcp-ocata-dvr-ceph-rgw/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
-{% import 'virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
-{% import 'virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+{% import 'virtual-mcp-pike-ovs/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'virtual-mcp-pike-ovs/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'virtual-mcp-pike-ovs/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
---
aliases:
@@ -12,7 +12,7 @@
- &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
- &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
-{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-ocata-dvr-ceph-rgw') %}
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-mcp-pike-ovs') %}
{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
@@ -20,20 +20,15 @@
{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN01 = os_env('HOSTNAME_CMN01', 'cmn01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN02 = os_env('HOSTNAME_CMN02', 'cmn02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_CMN03 = os_env('HOSTNAME_CMN03', 'cmn03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW01 = os_env('HOSTNAME_CMN01', 'rgw01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW02 = os_env('HOSTNAME_CMN02', 'rgw02.' + DOMAIN_NAME) %}
-{% set HOSTNAME_RGW03 = os_env('HOSTNAME_CMN03', 'rgw03.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD01 = os_env('HOSTNAME_OSD01', 'osd01.' + DOMAIN_NAME) %}
-{% set HOSTNAME_OSD02 = os_env('HOSTNAME_OSD02', 'osd02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON01 = os_env('HOSTNAME_MON01', 'mon01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON02 = os_env('HOSTNAME_MON02', 'mon02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_MON03 = os_env('HOSTNAME_MON03', 'mon03.' + DOMAIN_NAME) %}
{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
template:
devops_settings:
- env_name: {{ os_env('ENV_NAME', 'virtual-mcp-ocata-dvr-ceph-rgw_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+ env_name: {{ os_env('ENV_NAME', 'virtual-mcp-pike-ovs_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
address_pools:
private-pool01:
@@ -48,18 +43,13 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
- dhcp: [+70, -10]
+ dhcp: [+90, -10]
admin-pool01:
net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
@@ -73,18 +63,13 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
- dhcp: [+70, -10]
+ dhcp: [+90, -10]
tenant-pool01:
net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
@@ -98,14 +83,9 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
@@ -123,14 +103,9 @@
default_{{ HOSTNAME_CTL03 }}: +103
default_{{ HOSTNAME_CMP01 }}: +105
default_{{ HOSTNAME_CMP02 }}: +106
- default_{{ HOSTNAME_OSD01 }}: +94
- default_{{ HOSTNAME_OSD02 }}: +95
- default_{{ HOSTNAME_CMN01 }}: +96
- default_{{ HOSTNAME_CMN02 }}: +97
- default_{{ HOSTNAME_CMN03 }}: +98
- default_{{ HOSTNAME_RGW01 }}: +76
- default_{{ HOSTNAME_RGW02 }}: +77
- default_{{ HOSTNAME_RGW03 }}: +78
+ default_{{ HOSTNAME_MON01 }}: +107
+ default_{{ HOSTNAME_MON02 }}: +108
+ default_{{ HOSTNAME_MON03 }}: +109
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
@@ -322,10 +297,10 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_CMN01 }}
+ - name: {{ HOSTNAME_MON01 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
+ vcpu: !os_env SLAVE_NODE_CPU, 3
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -348,10 +323,10 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_CMN02 }}
+ - name: {{ HOSTNAME_MON02 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
+ vcpu: !os_env SLAVE_NODE_CPU, 3
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -374,151 +349,10 @@
interfaces: *interfaces
network_config: *network_config
- - name: {{ HOSTNAME_CMN03 }}
+ - name: {{ HOSTNAME_MON03 }}
role: salt_minion
params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: ceph
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_OSD02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: cinder
- capacity: 50
- format: qcow2
- - name: ceph
- capacity: 50
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW01 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
-
- - name: {{ HOSTNAME_RGW02 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
- memory: !os_env SLAVE_NODE_MEMORY, 2048
- boot:
- - hd
- cloud_init_volume_name: iso
- cloud_init_iface_up: ens3
- volumes:
- - name: system
- capacity: !os_env NODE_VOLUME_SIZE, 150
- backing_store: cloudimage1604
- format: qcow2
- - name: iso # Volume with name 'iso' will be used
- # for store image with cloud-init metadata.
- capacity: 1
- format: raw
- device: cdrom
- bus: ide
- cloudinit_meta_data: *cloudinit_meta_data
- cloudinit_user_data: *cloudinit_user_data_1604
-
- interfaces: *interfaces
- network_config: *network_config
- - name: {{ HOSTNAME_RGW03 }}
- role: salt_minion
- params:
- vcpu: !os_env SLAVE_NODE_CPU, 2
+ vcpu: !os_env SLAVE_NODE_CPU, 3
memory: !os_env SLAVE_NODE_MEMORY, 2048
boot:
- hd
@@ -555,6 +389,9 @@
capacity: !os_env NODE_VOLUME_SIZE, 150
backing_store: cloudimage1604
format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
- name: iso # Volume with name 'iso' will be used
# for store image with cloud-init metadata.
capacity: 1
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/common-services.yaml
similarity index 97%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml
copy to tcp_tests/templates/virtual-pike-ovs-dpdk/common-services.yaml
index 408230a..84e4829 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/common-services.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/common-services.yaml
@@ -1,4 +1,4 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
# Install support services
- description: Install keepalived on ctl01
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
similarity index 81%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml
copy to tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
index 8b8fa91..0b3825d 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/openstack.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/openstack.yaml
@@ -1,8 +1,7 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL02 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CTL03 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_GTW01 with context %}
+{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL01 with context %}
+{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL02 with context %}
+{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CTL03 with context %}
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
@@ -64,7 +63,7 @@
- description: Check glance image-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; glance image-list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; glance image-list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -79,9 +78,9 @@
- description: Check nova service-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; nova --debug service-list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; nova service-list'
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 3, delay: 5}
+ retry: {count: 1, delay: 5}
skip_fail: false
@@ -89,12 +88,12 @@
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
-C 'I@cinder:controller' state.sls cinder -b 1
node_name: {{ HOSTNAME_CFG01 }}
- retry: {count: 2, delay: 5}
+ retry: {count: 1, delay: 5}
skip_fail: false
- description: Check cinder list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; cinder list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; cinder list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -114,9 +113,10 @@
retry: {count: 1, delay: 5}
skip_fail: false
+
- description: Check neutron agent-list
cmd: salt --hard-crash --state-output=mixed --state-verbose=False
- -C 'I@keystone:server' cmd.run '. /root/keystonercv3; neutron agent-list'
+ -C 'I@keystone:server' cmd.run '. /root/keystonerc; neutron agent-list'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 5}
skip_fail: false
@@ -183,6 +183,13 @@
retry: {count: 2, delay: 30}
skip_fail: false
+- description: Register image in glance
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
+ '. /root/keystonercv3; glance --timeout 120 image-create --name cirros --visibility public --disk-format qcow2 --container-format bare --progress < /root/cirros-0.3.4-i386-disk.img'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 30}
+ skip_fail: false
+
- description: Create net04_external
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
'. /root/keystonercv3; neutron net-create net04_ext --router:external True --provider:physical_network physnet1 --provider:network_type flat'
@@ -199,7 +206,7 @@
- description: Create net04
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron net-create net04'
+ '. /root/keystonercv3; neutron net-create net04 --provider:network_type gre'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -213,7 +220,7 @@
- description: Create router
cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; neutron router-create net04_router01'
+ '. /root/keystonercv3; neutron router-create net04_router01 --ha False'
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 30}
skip_fail: false
@@ -232,41 +239,39 @@
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Allow all tcp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default tcp 1 65535 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
+# Configure cinder-volume salt-call
+- description: Set disks 01
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL01 }}
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Allow all icmp
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl01*' cmd.run
- '. /root/keystonercv3; nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0'
- node_name: {{ HOSTNAME_CFG01 }}
+- description: Set disks 02
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
-- description: sync time
- cmd: salt --hard-crash --state-output=mixed --state-verbose=False '*' cmd.run
- 'service ntp stop; ntpd -gq; service ntp start'
- node_name: {{ HOSTNAME_CFG01 }}
+- description: Set disks 03
+ cmd: salt-call cmd.run 'echo -e "nn\np\n\n\n\nw" | fdisk /dev/vdb'
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Install docker.io on gtw
- cmd: salt-call cmd.run 'apt-get install docker.io -y'
- node_name: {{ HOSTNAME_GTW01 }}
+- description: Create partitions 01
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL01 }}
retry: {count: 1, delay: 30}
skip_fail: false
-- description: create rc file on cfg
- cmd: scp ctl01:/root/keystonercv3 /root
- node_name: {{ HOSTNAME_CFG01 }}
+- description: Create partitions 02
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL02 }}
retry: {count: 1, delay: 30}
skip_fail: false
-- description: Copy rc file
- cmd: scp /root/keystonercv3 gtw01:/root
- node_name: {{ HOSTNAME_CFG01 }}
+- description: Create partitions 03
+ cmd: salt-call cmd.run 'pvcreate /dev/vdb1'
+ node_name: {{ HOSTNAME_CTL03 }}
retry: {count: 1, delay: 30}
skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
similarity index 83%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml
copy to tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
index 0b6bec6..990136b 100644
--- a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/salt.yaml
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/salt.yaml
@@ -1,6 +1,6 @@
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import LAB_CONFIG_NAME with context %}
-{% from 'virtual-mcp-ocata-dvr-ceph-rgw/underlay.yaml' import DOMAIN_NAME with context %}
+{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import HOSTNAME_CFG01 with context %}
+{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import LAB_CONFIG_NAME with context %}
+{% from 'virtual-pike-ovs-dpdk/underlay.yaml' import DOMAIN_NAME with context %}
{% set SALT_MODELS_REPOSITORY = os_env('SALT_MODELS_REPOSITORY','https://gerrit.mcp.mirantis.net/salt-models/mcp-virtual-lab') %}
# Other salt model repository parameters see in shared-salt.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--meta-data.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--meta-data.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--meta-data.yaml
copy to tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--meta-data.yaml
diff --git a/tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data-cfg01.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
similarity index 100%
copy from tcp_tests/templates/virtual-mcp-ocata-dvr-ceph-rgw/underlay--user-data-cfg01.yaml
copy to tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data1604.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data1604.yaml
new file mode 100644
index 0000000..8f001bb
--- /dev/null
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay--user-data1604.yaml
@@ -0,0 +1,78 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: root
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ {% for key in config.underlay.ssh_keys %}
+ - ssh-rsa {{ key['public'] }}
+ {% endfor %}
+
+ disable_root: false
+ chpasswd:
+ list: |
+ root:r00tme
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ # Enable root access
+ - sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin yes/' /etc/ssh/sshd_config
+ - service sshd restart
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ - export TERM=linux
+ - export LANG=C
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup ens3
+ #- sudo route add default gw {gateway} {interface_name}
+
+ # Create swap
+ - fallocate -l 4G /swapfile
+ - chmod 600 /swapfile
+ - mkswap /swapfile
+ - swapon /swapfile
+ - echo "/swapfile none swap defaults 0 0" >> /etc/fstab
+
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ - echo "deb [arch=amd64] http://apt.mirantis.com/xenial {{ REPOSITORY_SUITE }} salt extra" > /etc/apt/sources.list.d/mcp_salt.list;
+ - wget -O - http://apt.mirantis.com/public.gpg | apt-key add -;
+ - echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3 xenial main" > /etc/apt/sources.list.d/saltstack.list;
+ - wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.3/SALTSTACK-GPG-KEY.pub | apt-key add -;
+
+ - apt-get clean
+ - eatmydata apt-get update && apt-get -y upgrade
+
+ # Install common packages
+ - eatmydata apt-get install -y python-pip git curl tmux byobu iputils-ping traceroute htop tree mc hugepages
+
+ # Enable on nodes hugepages
+ - echo 2048 > /proc/sys/vm/nr_hugepages
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces
+ content: |
+ auto ens3
+ iface ens3 inet dhcp
+
diff --git a/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml
new file mode 100644
index 0000000..1bb3c58
--- /dev/null
+++ b/tcp_tests/templates/virtual-pike-ovs-dpdk/underlay.yaml
@@ -0,0 +1,427 @@
+# Set the repository suite, one of the: 'nightly', 'testing', 'stable', or any other required
+{% set REPOSITORY_SUITE = os_env('REPOSITORY_SUITE', 'testing') %}
+
+{% import 'virtual-pike-ovs-dpdk/underlay--meta-data.yaml' as CLOUDINIT_META_DATA with context %}
+{% import 'virtual-pike-ovs-dpdk/underlay--user-data-cfg01.yaml' as CLOUDINIT_USER_DATA_CFG01 with context %}
+{% import 'virtual-pike-ovs-dpdk/underlay--user-data1604.yaml' as CLOUDINIT_USER_DATA_1604 with context %}
+
+---
+aliases:
+ - &interface_model {{ os_env('INTERFACE_MODEL', 'virtio') }}
+ - &cloudinit_meta_data {{ CLOUDINIT_META_DATA }}
+ - &cloudinit_user_data_cfg01 {{ CLOUDINIT_USER_DATA_CFG01 }}
+ - &cloudinit_user_data_1604 {{ CLOUDINIT_USER_DATA_1604 }}
+
+{% set LAB_CONFIG_NAME = os_env('LAB_CONFIG_NAME', 'virtual-pike-ovs-dpdk') %}
+{% set DOMAIN_NAME = os_env('DOMAIN_NAME', LAB_CONFIG_NAME) + '.local' %}
+{% set HOSTNAME_CFG01 = os_env('HOSTNAME_CFG01', 'cfg01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL01 = os_env('HOSTNAME_CTL01', 'ctl01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL02 = os_env('HOSTNAME_CTL02', 'ctl02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CTL03 = os_env('HOSTNAME_CTL03', 'ctl03.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP01 = os_env('HOSTNAME_CMP01', 'cmp01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_CMP02 = os_env('HOSTNAME_CMP02', 'cmp02.' + DOMAIN_NAME) %}
+{% set HOSTNAME_GTW01 = os_env('HOSTNAME_GTW01', 'gtw01.' + DOMAIN_NAME) %}
+{% set HOSTNAME_PRX01 = os_env('HOSTNAME_PRX01', 'prx01.' + DOMAIN_NAME) %}
+
+template:
+ devops_settings:
+ env_name: {{ os_env('ENV_NAME', 'virtual-pike-ovs-dpdk_' + REPOSITORY_SUITE + "_" + os_env('BUILD_NUMBER', '')) }}
+
+ address_pools:
+ private-pool01:
+ net: {{ os_env('PRIVATE_ADDRESS_POOL01', '10.60.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ admin-pool01:
+ net: {{ os_env('ADMIN_ADDRESS_POOL01', '10.70.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +90
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+90, -10]
+
+ tenant-pool01:
+ net: {{ os_env('TENANT_ADDRESS_POOL01', '10.80.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+ external-pool01:
+ net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '10.90.0.0/16:24') }}
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ default_{{ HOSTNAME_CFG01 }}: +100
+ default_{{ HOSTNAME_CTL01 }}: +101
+ default_{{ HOSTNAME_CTL02 }}: +102
+ default_{{ HOSTNAME_CTL03 }}: +103
+ default_{{ HOSTNAME_CMP01 }}: +105
+ default_{{ HOSTNAME_CMP02 }}: +106
+ default_{{ HOSTNAME_GTW01 }}: +110
+ default_{{ HOSTNAME_PRX01 }}: +121
+ ip_ranges:
+ dhcp: [+10, -10]
+
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: True
+ use_hugepages: True
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+ use_hugepages: !os_env DRIVER_USE_HUGEPAGES, false
+
+ network_pools:
+ admin: admin-pool01
+ private: private-pool01
+ tenant: tenant-pool01
+ external: external-pool01
+
+ l2_network_devices:
+ private:
+ address_pool: private-pool01
+ dhcp: true
+
+ admin:
+ address_pool: admin-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ tenant:
+ address_pool: tenant-pool01
+ dhcp: true
+
+ external:
+ address_pool: external-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+
+ group_volumes:
+ - name: cloudimage1604 # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH1604 # https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img or
+ # http://apt.tcpcloud.eu/images/ubuntu-16-04-x64-201608231004.qcow2
+ format: qcow2
+
+ nodes:
+ - name: {{ HOSTNAME_CFG01 }}
+ role: salt_master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_cfg01
+
+ interfaces:
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: &interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config: &network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+
+ - name: {{ HOSTNAME_CTL02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_CTL03 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+ - name: {{ HOSTNAME_PRX01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: cinder
+ capacity: 50
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *interfaces
+ network_config: *network_config
+
+
+ - name: {{ HOSTNAME_CMP01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 12
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ numa:
+ - cpus: 0,1,2,3,4,5
+ memory: 4096
+ - cpus: 6,7,8,9,10,11
+ memory: 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+
+ interfaces: &all_interfaces
+ - label: ens3
+ l2_network_device: admin
+ interface_model: *interface_model
+ - label: ens4
+ l2_network_device: private
+ interface_model: *interface_model
+ - label: ens5
+ l2_network_device: tenant
+ interface_model: *interface_model
+ - label: ens6
+ l2_network_device: external
+ interface_model: *interface_model
+ network_config: &all_network_config
+ ens3:
+ networks:
+ - admin
+ ens4:
+ networks:
+ - private
+ ens5:
+ networks:
+ - tenant
+ ens6:
+ networks:
+ - external
+
+ - name: {{ HOSTNAME_CMP02 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 12
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ numa:
+ - cpus: 0,1,2,3,4,5
+ memory: 4096
+ - cpus: 6,7,8,9,10,11
+ memory: 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
+
+ - name: {{ HOSTNAME_GTW01 }}
+ role: salt_minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: ens3
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage1604
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: *cloudinit_meta_data
+ cloudinit_user_data: *cloudinit_user_data_1604
+
+ interfaces: *all_interfaces
+ network_config: *all_network_config
diff --git a/tcp_tests/tests/system/test_install_mcp_ovs_pike.py b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
new file mode 100644
index 0000000..299c7af
--- /dev/null
+++ b/tcp_tests/tests/system/test_install_mcp_ovs_pike.py
@@ -0,0 +1,149 @@
+# Copyright 2018 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+LOG = logger.logger
+
+
+@pytest.mark.deploy
+class TestMcpInstallOvsPike(object):
+ """Test class for testing mcp11 vxlan deploy"""
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_mcp_pike_ovs_install(self, underlay,
+ openstack_deployed,
+ openstack_actions):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Run tempest
+
+ """
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ if settings.RUN_TEMPEST:
+ openstack_actions.run_tempest(pattern=settings.PATTERN)
+ openstack_actions.download_tempest_report()
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_mcp_pike_ovs_sl_install(self, underlay, config,
+ openstack_deployed,
+ sl_deployed):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Get monitoring nodes
+ 5. Check that docker services are running
+ 6. Check current prometheus targets are UP
+ 7. Run SL component tests
+ 8. Download SL component tests report
+ """
+ mon_nodes = sl_deployed.get_monitoring_nodes()
+ LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+
+ sl_deployed.check_prometheus_targets(mon_nodes)
+
+ # Run SL component tetsts
+ sl_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus',
+ 'test_alerts.py')
+
+ # Download report
+ sl_deployed.download_sl_test_report(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/report.xml')
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_mcp_pike_dvr_install(self,
+ underlay,
+ openstack_deployed,
+ openstack_actions):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+
+ """
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ if settings.RUN_TEMPEST:
+ openstack_actions.run_tempest(pattern=settings.PATTERN)
+ openstack_actions.download_tempest_report()
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_mcp_pike_dvr_sl_install(self, underlay, config,
+ openstack_deployed,
+ sl_deployed):
+ """Test for deploying an mcp environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Get monitoring nodes
+ 5. Check that docker services are running
+ 6. Check current prometheus targets are UP
+ 7. Run SL component tests
+ 8. Download SL component tests report
+ """
+
+ mon_nodes = sl_deployed.get_monitoring_nodes()
+ LOG.debug('Mon nodes list {0}'.format(mon_nodes))
+
+ sl_deployed.check_prometheus_targets(mon_nodes)
+
+ # Run SL component tests
+ sl_deployed.run_sl_functional_tests(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/',
+ 'tests/prometheus',
+ 'test_alerts.py')
+
+ # Download report
+ sl_deployed.download_sl_test_report(
+ 'cfg01',
+ '/root/stacklight-pytest/stacklight_tests/report.xml')
+ LOG.info("*************** DONE **************")
+
+ @pytest.mark.fail_snapshot
+ def test_mcp11_pike_dpdk_install(self, underlay, openstack_deployed,
+ show_step):
+ """Test for deploying an mcp dpdk environment and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ """
+ LOG.info("*************** DONE **************")
diff --git a/tcp_tests/tests/system/test_ovs_pike_ceph.py b/tcp_tests/tests/system/test_ovs_pike_ceph.py
new file mode 100644
index 0000000..080be26
--- /dev/null
+++ b/tcp_tests/tests/system/test_ovs_pike_ceph.py
@@ -0,0 +1,49 @@
+# Copyright 2018 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+LOG = logger.logger
+
+
+@pytest.mark.deploy
+class TestInstallOvsPikeCeph(object):
+ """Test class for test openstack with ceph and ovs deploy"""
+
+ @pytest.mark.grab_versions
+ @pytest.mark.fail_snapshot
+ def test_pike_ceph_all_ovs_install(self, underlay, openstack_deployed,
+ ceph_deployed,
+ openstack_actions):
+ """Test for deploying pike ovs with ceph and check it
+ Scenario:
+ 1. Prepare salt on hosts
+ 2. Setup controller nodes
+ 3. Setup compute nodes
+ 4. Setup ceph RBD, replication factor 2 for cinder, nova, glance
+ 5. Run tempest
+
+ """
+ openstack_actions._salt.local(
+ tgt='*', fun='cmd.run',
+ args='service ntp stop; ntpd -gq; service ntp start')
+
+ if settings.RUN_TEMPEST:
+ openstack_actions.run_tempest(pattern=settings.PATTERN,
+ conf_name='ceph_mcp.conf')
+ openstack_actions.download_tempest_report()
+ LOG.info("*************** DONE **************")