Merge "[mitaka-trusty] Restore ability to deploy mitaka release on trusty"
diff --git a/tcp_tests/templates/virtual-mcp-trusty/Readme.txt b/tcp_tests/templates/virtual-mcp-trusty/Readme.txt
index fc9f978..da47d0b 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/Readme.txt
+++ b/tcp_tests/templates/virtual-mcp-trusty/Readme.txt
@@ -2,4 +2,14 @@
- virtual-mcp-mitaka-dvr-trusty
- virtual-mcp-mitaka-ovs-trusty
-Used by maintenance team.
\ No newline at end of file
+Used by maintenance team.
+
+Use following env vars should be used:
+SALT_MODELS_COMMIT = 'fa85f84'
+SALT_MODELS_SYSTEM_TAG = '2018.8.0'
+REPOSITORY_SUITE = '2018.8.0'
+OVERRIDES = 'openstack_log_appender: true
+linux_system_repo_mk_openstack_version: testing
+'
+
+Also VCP 2018.8.0 images should be used
\ No newline at end of file
diff --git a/tcp_tests/templates/virtual-mcp-trusty/core.yaml b/tcp_tests/templates/virtual-mcp-trusty/core.yaml
index 42f7e52..a433aee 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/core.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/core.yaml
@@ -1,12 +1,129 @@
{% from 'virtual-mcp-trusty/underlay.yaml' import HOSTNAME_CFG01 with context %}
-{% import 'shared-core.yaml' as SHARED_CORE with context %}
+# vkhlyunev: shared steps are constantly updating due master development so
+# we cant use them for old release (e.g. new steps for gluster are using
+# glusterfs:server:role:primary pillar for targeting which does not exists in
+# 2018.8.0 release model (and we can't update the model)
-{{ SHARED_CORE.MACRO_INSTALL_KEEPALIVED() }}
-{{ SHARED_CORE.MACRO_INSTALL_GLUSTERFS() }}
-{{ SHARED_CORE.MACRO_INSTALL_RABBITMQ() }}
-{{ SHARED_CORE.MACRO_INSTALL_GALERA() }}
-{{ SHARED_CORE.MACRO_INSTALL_HAPROXY() }}
-{{ SHARED_CORE.MACRO_INSTALL_NGINX() }}
-{{ SHARED_CORE.MACRO_INSTALL_MEMCACHED() }}
-{{ SHARED_CORE.MACRO_CHECK_VIP() }}
+# Install support services
+- description: Install keepalived on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster and *01*' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install keepalived
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keepalived:cluster' state.sls keepalived
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 10}
+ skip_fail: true
+
+- description: Install glusterfs
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs.server.service && sleep 20
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on primary controller
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server and *01*' state.sls glusterfs.server.setup -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 5}
+ skip_fail: false
+
+- description: Setup glusterfs on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' state.sls glusterfs -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 5, delay: 5}
+ skip_fail: false
+
+- description: Check the gluster status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@glusterfs:server' cmd.run 'gluster peer status && gluster volume status' -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ on ctl01
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install RabbitMQ
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' state.sls rabbitmq
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the rabbitmq status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@rabbitmq:server' cmd.run 'rabbitmqctl cluster_status'
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on first server
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:master' state.sls galera
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install Galera on other servers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:slave' state.sls galera -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check mysql status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@galera:*' mysql.status | grep -A1 -e "wsrep_incoming_addresses\|wsrep_cluster_size"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: true
+
+
+- description: Install haproxy
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' state.sls haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check haproxy status
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.status haproxy
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Restart rsyslog
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@haproxy:proxy' service.restart rsyslog
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install memcached on all controllers
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@memcached:server' state.sls memcached
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check the VIP
+ cmd: |
+ OPENSTACK_CONTROL_ADDRESS=`salt-call --out=newline_values_only pillar.get _param:openstack_control_address`;
+ echo "_param:openstack_control_address (vip): ${OPENSTACK_CONTROL_ADDRESS}";
+ salt --hard-crash --state-output=mixed --state-verbose=False -C 'I@keepalived:cluster' cmd.run "ip a | grep ${OPENSTACK_CONTROL_ADDRESS}" | grep -B1 ${OPENSTACK_CONTROL_ADDRESS}
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 3, delay: 10}
+ skip_fail: false
diff --git a/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml b/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
index c001d39..fff0966 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/openstack.yaml
@@ -6,6 +6,10 @@
{% from 'shared-salt.yaml' import IPV4_NET_EXTERNAL_PREFIX with context %}
{% from 'shared-salt.yaml' import IPV4_NET_TENANT_PREFIX with context %}
+# vkhlyunev: shared steps are constantly updating due master development so
+# we cant use them for old release. For openstack.yaml we can use some shared
+# steps for now but TODO: bind deployment workflow to 2018.8.0 state
+
# Install OpenStack control services
- description: Sync time
cmd: salt --hard-crash --state-output=mixed --state-verbose=False -G 'oscodename:trusty' cmd.run "service ntp stop && ntpdate pool.ntp.org && service ntp start"
@@ -23,11 +27,94 @@
retry: {count: 1, delay: 5}
skip_fail: true
-{{ SHARED_OPENSTACK.MACRO_INSTALL_KEYSTONE() }}
+- description: Install keystone service on primary node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server and *01*' state.sls keystone.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Install keystone service on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 15}
+ skip_fail: false
+
+- description: Restart apache due to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*'
+ cmd.run "service apache2 restart"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Check apache status to PROD-10477
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False 'ctl*'
+ cmd.run "service apache2 status"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 15}
+ skip_fail: false
+
+- description: Mount glusterfs.client volumes (resuires created 'keystone' system user)
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls glusterfs.client -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Update fernet keys for keystone server on the mounted glusterfs volume
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:server' state.sls keystone.server -b 1
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Populate keystone services/tenants/admins
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C 'I@keystone:client' state.sls keystone.client
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 2, delay: 5}
+ skip_fail: false
+
+- description: Check keystone service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@keystone:server" cmd.run ". /root/keystonercv3;
+ openstack service list"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
{{ SHARED_OPENSTACK.MACRO_INSTALL_GLANCE() }}
-{{ SHARED_OPENSTACK.MACRO_INSTALL_NOVA() }}
+- description: Install nova service on primary node
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@nova:controller and *01*" state.sls nova.controller
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Install nova service on other nodes
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@nova:controller" state.sls nova.controller
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check nova service-list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@keystone:server" cmd.run ". /root/keystonercv3;
+ openstack compute service list"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
+
+- description: Check nova list
+ cmd: salt --hard-crash --state-output=mixed --state-verbose=False
+ -C "I@keystone:server" cmd.run ". /root/keystonercv3;
+ openstack server list"
+ node_name: {{ HOSTNAME_CFG01 }}
+ retry: {count: 1, delay: 5}
+ skip_fail: false
{{ SHARED_OPENSTACK.MACRO_INSTALL_CINDER() }}
diff --git a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
index ab04cfb..d8f7fb7 100644
--- a/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
+++ b/tcp_tests/templates/virtual-mcp-trusty/salt.yaml
@@ -10,6 +10,15 @@
{% import 'shared-salt.yaml' as SHARED with context %}
+# vkhlyunev: sometimes we have to verify fixes to mitaka openstack based on
+# ubuntu trusty OS deployment. Last discovered deploy-able configuration is based on
+# mcp-virtual-lab/salt-formulas/reclass-system parameters/commits/tags listed below
+# SALT_MODELS_COMMIT = 'fa85f84'
+# SALT_MODELS_SYSTEM_TAG = '2018.8.0'
+# REPOSITORY_SUITE = '2018.8.0'
+# OVERRIDES = 'openstack_log_appender: true
+# linux_system_repo_mk_openstack_version: testing'
+
{{ SHARED.MACRO_INSTALL_SALT_MASTER() }}
{{ SHARED.MACRO_CLONE_RECLASS_MODELS() }}
@@ -30,13 +39,27 @@
retry: {count: 1, delay: 1}
skip_fail: false
{%- endfor %}
+{%- endif %}
+
+# vkhlyunev: fa85f84 model commit contains sphinx on cfg01 node
+# which is not required for mitaka-trusty testing. Unfortunately we can not fix
+# it in model itself due to constantly updating models according to development
+# of main release.
+- description: Apply sphinx workaround - delete system.sphinx class
+ cmd: sed -i -e '/system.sphinx/d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
+
+- description: Apply sphinx workaround - delete nginx section
+ cmd: sed -i -e '/ nginx:/,+8d' /srv/salt/reclass/classes/cluster/{{ SHARED.CLUSTER_NAME }}/infra/config.yml
+ node_name: {{ HOSTNAME_CFG01 }}
+ skip_fail: False
- description: Refresh pillar
cmd: salt '*' saltutil.refresh_pillar
node_name: {{ HOSTNAME_CFG01 }}
retry: {count: 1, delay: 1}
skip_fail: false
-{%- endif %}
{{ SHARED.MACRO_GENERATE_INVENTORY() }}