Merge "Add extra chack to galera restore test"
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
index 1ea4d5c..a4903c8 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-heat.groovy
@@ -82,7 +82,7 @@
                     # export OS_PASSWORD=${OS_PASSWORD}
                     # export OS_PROJECT_NAME=${OS_PROJECT_NAME}
                     openstack --insecure stack delete -y ${ENV_NAME} || true
-                    while openstack --insecure stack show ${ENV_NAME} -f value -c stack_status; do sleep 10; done
+                    timeout 20m /bin/bash -c "while openstack --insecure stack show ${ENV_NAME} -f value -c stack_status; do sleep 10; done"
                 """)
 
                 println "Remove config drive ISO"
diff --git a/jobs/pipelines/swarm-testrail-report.groovy b/jobs/pipelines/swarm-testrail-report.groovy
index 1967016..a8af4e5 100644
--- a/jobs/pipelines/swarm-testrail-report.groovy
+++ b/jobs/pipelines/swarm-testrail-report.groovy
@@ -80,9 +80,6 @@
                     testrail_name_template = '{title}'
                     reporter_extra_options = [
                       "--testrail-add-missing-cases",
-                      "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
-                      "--testrail-case-section-name \'All\'",
-                      "--testrail_configuration_name \'tcp-qa\'",
                     ]
                     ret = shared.upload_results_to_testrail(deployment_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
                     common.printMsg(ret.stdout, "blue")
@@ -103,9 +100,7 @@
                     testrail_name_template = "{title}"
                     reporter_extra_options = [
                       "--testrail-add-missing-cases",
-                      "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
-                      "--testrail-case-section-name \'All\'",
-                      "--testrail_configuration_name \'tcp-qa\'",
+                      "--testrail_configuration_name tcp-qa",
                     ]
                     ret = shared.upload_results_to_testrail(tcpqa_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
                     common.printMsg(ret.stdout, "blue")
@@ -127,7 +122,7 @@
                     reporter_extra_options = [
                       "--send-duplicates",
                       "--testrail-add-missing-cases",
-                      "--testrail_configuration_name \'tcp-qa\'",
+                      "--testrail_configuration_name tcp-qa",
                     ]
                     ret = shared.upload_results_to_testrail(tempest_report_name,
                                                             testSuiteName,
@@ -200,7 +195,11 @@
                     testSuiteName = "LMA2.0_Automated"
                     methodname = "{methodname}"
                     testrail_name_template = "{title}"
-                    ret = shared.upload_results_to_testrail(stacklight_report_name, testSuiteName, methodname, testrail_name_template)
+                    reporter_extra_options = [
+                      "--testrail-add-missing-cases",
+                      "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
+                    ]
+                    ret = shared.upload_results_to_testrail(stacklight_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
                     common.printMsg(ret.stdout, "blue")
                     report_url = ret.stdout.split("\n").each {
                         if (it.contains("[TestRun URL]")) {
@@ -220,9 +219,7 @@
                     reporter_extra_options = [
                       "--send-duplicates",
                       "--testrail-add-missing-cases",
-                      "--testrail-case-custom-fields {\\\"custom_qa_team\\\":\\\"9\\\"}",
-                      "--testrail-case-section-name \'All\'",
-                      "--testrail_configuration_name \'tcp-qa\'",
+                      "--testrail_configuration_name tcp-qa",
                     ]
                     ret = shared.upload_results_to_testrail(cvp_sanity_report_name, testSuiteName, methodname, testrail_name_template, reporter_extra_options)
                     common.printMsg(ret.stdout, "blue")
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index ddb82bb..34049e7 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -701,7 +701,7 @@
   def script = """
     . ${venvPath}/bin/activate
     set -ex
-    report ${reporterOptions.join(' ')} '${report_name}'
+    report ${reporterOptions.join(' ')} ${report_name}
   """
 
   def testrail_cred_id = params.TESTRAIL_CRED ?: 'testrail_system_tests'
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-environment.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-environment.yaml
index 7722eaa..50bb147 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-environment.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-environment.yaml
@@ -4,7 +4,7 @@
       roles:
       - infra_config
       - linux_system_codename_xenial
-      - features_runtest_cfg
+      - features_runtest
       interfaces:
         ens3:
           role: single_static_mgm
diff --git a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml
index c274129..0083582 100644
--- a/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml
+++ b/tcp_tests/templates/bm-cicd-queens-ovs-maas/salt-context-vcp-environment.yaml
@@ -4,6 +4,7 @@
       roles:
       - openstack_control_leader
       - linux_system_codename_xenial
+      - features_ironic_baremetal_nodes
       interfaces:
         ens2:
           role: single_dhcp
diff --git a/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-queens-ovs-maas.yaml b/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-queens-ovs-maas.yaml
index f34cc38..65d1d18 100644
--- a/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-queens-ovs-maas.yaml
+++ b/tcp_tests/templates/cookied-model-generator/salt_bm-cicd-queens-ovs-maas.yaml
@@ -35,8 +35,8 @@
   cmd: |
     sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
     sed -i 's/br\-mgm/br\_mgm/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
-    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
     sed -i 's/br\-baremetal/br\_baremetal/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/infra/kvm.yml;
+    sed -i 's/br\-ctl/br\_ctl/g' /srv/salt/reclass/classes/cluster/{{ LAB_CONFIG_NAME }}/openstack/gateway.yml;
     salt '*' saltutil.refresh_pillar;
   node_name: {{ HOSTNAME_CFG01 }}
   retry: {count: 1, delay: 10}
diff --git a/tcp_tests/tests/system/test_cvp_pipelines.py b/tcp_tests/tests/system/test_cvp_pipelines.py
index c73bb88..d010664 100644
--- a/tcp_tests/tests/system/test_cvp_pipelines.py
+++ b/tcp_tests/tests/system/test_cvp_pipelines.py
@@ -294,10 +294,20 @@
             tgt=tgt, pillar="jenkins:client:master:username")
         jenkins_pass = salt.get_single_pillar(
             tgt=tgt, pillar="jenkins:client:master:password")
+        cirros_image = salt.get_single_pillar(
+            tgt="I@salt:master",
+            pillar="_param:glance_image_cirros_location")
         jenkins_start_timeout = 60
-        jenkins_build_timeout = 1800
+        jenkins_build_timeout = 50 * 60
 
         job_name = 'cvp-stacklight'
+        job_parameters = {
+            "EXTRA_PARAMS": """
+            envs:
+              - SL_AUTOCONF=True
+              - CIRROS_QCOW2_URL={image}
+            """.format(image=cirros_image)
+        }
 
         show_step(2)
         cvp_stacklight_result = run_jenkins_job.run_job(
@@ -308,7 +318,7 @@
             build_timeout=jenkins_build_timeout,
             verbose=True,
             job_name=job_name,
-            job_parameters={},
+            job_parameters=job_parameters,
             job_output_prefix='[cvp-stacklight/{build_number}:platform {time}]'
         )
 
diff --git a/tcp_tests/tests/system/test_mcp_update.py b/tcp_tests/tests/system/test_mcp_update.py
index 1ff7642..ab38698 100644
--- a/tcp_tests/tests/system/test_mcp_update.py
+++ b/tcp_tests/tests/system/test_mcp_update.py
@@ -32,25 +32,37 @@
     underlay = underlay_ssh_manager.UnderlaySSHManager(config)
     saltmanager = salt_manager.SaltManager(config, underlay)
     targets = list()
+    telemetry_exists = False
+    barbican_exists = False
     try:
         targets += saltmanager.run_state(
             "I@keystone:server", 'test.ping')[0]['return'][0].keys()
         targets += saltmanager.run_state(
             "I@nginx:server and not I@salt:master",
             "test.ping")[0]['return'][0].keys()
+        telemetry_exists = saltmanager.get_single_pillar(
+            "I@salt:master",
+            "_param:openstack_telemetry_hostname")
+        barbican_exists = saltmanager.get_single_pillar(
+            "I@salt:master",
+            "_param:barbican_enabled")
     except BaseException as err:
         LOG.warning("Can't retrieve data from Salt. \
             Maybe cluster is not deployed completely.\
             Err: {}".format(err))
 
-    # TODO: add check for Manila  existence
-    # # Commented to avoid fails during OpenStack updates.
-    # # Anyway we don't have deployments with Manila yet
-    # targets.append('share*')
-    # TODO: add check for Tenant Telemetry  existence
-    targets.append('mdb*')
-    # TODO: add check for Barbican existence
-    targets.append('kmn*')
+    # check for Manila  existence
+    # if saltmanager.get_single_pillar("I@salt:master",
+    #                                  "_param:manila_service_protocol"):
+    #     targets.append('share*')
+
+    # check for Tenant Telemetry  existence
+    if telemetry_exists:
+        targets.append('mdb*')
+
+    # check for Barbican existence
+    if barbican_exists:
+        targets.append('kmn*')
     return targets
 
 
@@ -79,6 +91,7 @@
     reclass.add_key(repo_param, proposed_repo, "cluster/*/ceph/init.yml")
 
     salt_actions.run_state("*", "saltutil.refresh_pillar")
+    salt_actions.enforce_state("*", "salt.minion")
     salt_actions.enforce_state("I@jenkins:client", "jenkins.client")
 
 
diff --git a/tcp_tests/tests/system/test_upgrade_pike_queens.py b/tcp_tests/tests/system/test_upgrade_pike_queens.py
index 6314364..9d3288e 100644
--- a/tcp_tests/tests/system/test_upgrade_pike_queens.py
+++ b/tcp_tests/tests/system/test_upgrade_pike_queens.py
@@ -25,18 +25,21 @@
     Created by https://mirantis.jira.com/browse/PROD-32683
     """
     def execute_pre_post_steps(self, underlay_actions,
-                               cfg_node, verbose, type):
+                               cfg_node, verbose, type, with_verify=False):
 
         # ### Get the list of all upgradable OpenStack components ############
         ret = underlay_actions.check_call(
             node_name=cfg_node, verbose=verbose,
             cmd="salt 'cfg01*' config.get"
-                " orchestration:upgrade:applications --out=json")
-        cfg_nodes_list = json.loads(ret['stdout_str'])
-        services_for_upgrade = []
-        for i in cfg_nodes_list:
-            for j in cfg_nodes_list[i]:
-                services_for_upgrade.append(j)
+                " orchestration:upgrade:applications --out=json")['stdout_str']
+        # It returns json like {"local": {"galera":{"priority":910},"heat":{..
+        all_formulas = json.loads(ret).get("local")
+        sorted_formulas = sorted(all_formulas.items(),
+                                 key=lambda x: x[1].get("priority"),
+                                 reverse=True)
+        # Because 'sorted' function returns list of tuples like
+        # [('aodh', {'priority': 2000}), ('ceilometer', {'priority': 1950}) ...
+        services_for_upgrade = [item[0] for item in sorted_formulas]
         LOG.info(services_for_upgrade)
 
         # ###### Get the list of all target node #############################
@@ -46,12 +49,6 @@
                 "grep -v Accepted")['stdout_str'].splitlines()
         LOG.info(list_nodes)
 
-        # #### guarantee that the KeystoneRC metadata is exported to mine ####
-        ret = underlay_actions.check_call(
-            node_name=cfg_node, verbose=verbose,
-            cmd="salt -C 'I@keystone:client:enabled' state.sls"
-                " keystone.upgrade.pre")
-
         # ## For each target node, get the list of the installed applications
         for node in list_nodes:
             salt_pillars = underlay_actions.check_call(
@@ -66,11 +63,22 @@
                 node_applications = node_app_output[node][need_output]
                 LOG.info(node_applications)
                 for service in services_for_upgrade:
-                    if service in node_applications:
-                        underlay_actions.check_call(
-                            node_name=cfg_node, verbose=verbose,
-                            cmd="salt {} state.apply "
-                                "{}.upgrade.{}".format(node, service, type))
+                    if service not in node_applications:
+                        continue
+                    cmd = "salt {} state.apply {}.upgrade.{}".\
+                        format(node, service, type)
+                    LOG.info("Apply: {}".format(cmd))
+                    underlay_actions.check_call(
+                        node_name=cfg_node, verbose=verbose, cmd=cmd)
+
+                    # Run upgrade.verify if needed
+                    if not with_verify:
+                        continue
+                    cmd = "salt {} state.apply {}.upgrade.verify".\
+                        format(node, service)
+                    LOG.info("Apply: {}".format(cmd))
+                    underlay_actions.check_call(
+                        node_name=cfg_node, verbose=verbose, cmd=cmd)
 
     @pytest.mark.day1_underlay
     def test_upgrade_pike_queens(self,
@@ -97,7 +105,18 @@
         infra_init_yaml = "cluster/*/infra/init.yml"
         # ########## Perform the pre-upgrade activities ##########
         show_step(1)
+        salt_actions.enforce_state("I@keystone:client:os_client_config",
+                                   "keystone.client.os_client_config")
+        # #### guarantee that the KeystoneRC metadata is exported to mine ####
+        underlay_actions.check_call(
+            node_name=cfg_node, verbose=verbose,
+            cmd="salt -C 'I@keystone:client:enabled' state.sls"
+                " keystone.upgrade.pre")
+        # ### Run upgrade.pre and upgrade.verify
+        self.execute_pre_post_steps(underlay_actions, cfg_node,
+                                    verbose, 'pre', with_verify=True)
         LOG.info('Add parameters to {}'.format(infra_init_yaml))
+        # ### Edit Infra INIT #####
         reclass_actions.add_bool_key(
             'parameters._param.openstack_upgrade_enabled',
             'true',
@@ -112,6 +131,18 @@
             'parameters._param.openstack_old_version',
             'pike',
             infra_init_yaml)
+
+        # ### Edit Openstack INIT #####
+        openstack_init_yaml = "cluster/*/openstack/init.yml"
+        LOG.info('Add parameters to {}'.format(openstack_init_yaml))
+        reclass_actions.add_key('parameters._param.gnocchi_version',
+                                4.2,
+                                openstack_init_yaml)
+        reclass_actions.add_key('parameters._param.gnocchi_old_version',
+                                4.0,
+                                openstack_init_yaml)
+
+        # ### Edit Openstack control #####
         reclass_actions.add_class(
             'system.keystone.client.v3',
             'cluster/*/openstack/control_init.yml'
@@ -120,14 +151,38 @@
             node_name=cfg_node, verbose=verbose,
             cmd="cd /srv/salt/reclass; git add -u && "
                 "git commit --allow-empty -m 'Cluster model update'")
+
+        # ### Apply state to enable changes in reclass
         LOG.info('Perform refresh_pillar')
         salt_actions.run_state("*", "saltutil.refresh_pillar")
+
         self.execute_pre_post_steps(underlay_actions, cfg_node,
                                     verbose, 'pre')
         LOG.info('Perform refresh_pillar')
         salt_actions.run_state("*", "saltutil.refresh_pillar")
         # ########## Upgrade control VMs #########
         show_step(2)
+        LOG.info("Enable upgrade jobs in cluster Jenkins")
+        cicd_leader = "cluster/*/cicd/control/leader.yml"
+        reclass_actions.add_class(
+            "system.jenkins.client.job.deploy.update.upgrade",
+            cicd_leader
+        )
+        reclass_actions.add_class(
+            "system.jenkins.client.job.deploy.update.upgrade_ovs_gateway",
+            cicd_leader
+        )
+        reclass_actions.add_class(
+            "system.jenkins.client.job.deploy.update.upgrade_compute",
+            cicd_leader
+        )
+        salt_actions.enforce_state("I@jenkins:client",
+                                   "jenkins.client")
+        # #### Add QUEENS's repos
+        salt_actions.enforce_state("*", "linux.system.repo")
+        salt_actions.enforce_state("*", "salt.minion")
+
+        # ########## Upgrade control nodes  ###########
         LOG.info('Upgrade control VMs')
         job_name = 'deploy-upgrade-control'
         job_parameters = {
@@ -135,18 +190,43 @@
             'OS_DIST_UPGRADE': False,
             'OS_UPGRADE': False
         }
+        # ####### Run job for ctl* ###
+        job_parameters["TARGET_SERVERS"] = "ctl*"
         update_control_vms = dt.start_job_on_cid_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
         assert update_control_vms == 'SUCCESS'
-        # ########## Upgrade gatewey nodes  ###########
+
+        # ####### Run job for mdb* ###
+        job_parameters["TARGET_SERVERS"] = "mdb*"
+        update_control_vms = dt.start_job_on_cid_jenkins(
+            job_name=job_name,
+            job_parameters=job_parameters)
+        assert update_control_vms == 'SUCCESS'
+
+        # ####### Run job for kmn* ###
+        job_parameters["TARGET_SERVERS"] = "kmn*"
+        update_control_vms = dt.start_job_on_cid_jenkins(
+            job_name=job_name,
+            job_parameters=job_parameters)
+        assert update_control_vms == 'SUCCESS'
+
+        # ####### Run job for prx* ###
+        job_parameters["TARGET_SERVERS"] = "prx*"
+        update_control_vms = dt.start_job_on_cid_jenkins(
+            job_name=job_name,
+            job_parameters=job_parameters)
+        assert update_control_vms == 'SUCCESS'
+
+        # ########## Upgrade gateway nodes  ###########
         show_step(3)
         LOG.info('Upgrade gateway')
         job_name = 'deploy-upgrade-ovs-gateway'
         job_parameters = {
             'INTERACTIVE': False,
             'OS_DIST_UPGRADE': False,
-            'OS_UPGRADE': False
+            'OS_UPGRADE': False,
+            'TARGET_SERVERS': "gtw*"
         }
         update_gateway = dt.start_job_on_cid_jenkins(
             job_name=job_name,
@@ -159,12 +239,14 @@
         job_parameters = {
             'INTERACTIVE': False,
             'OS_DIST_UPGRADE': False,
-            'OS_UPGRADE': False
+            'OS_UPGRADE': False,
+            'TARGET_SERVERS': "cmp*"
         }
         update_computes = dt.start_job_on_cid_jenkins(
             job_name=job_name,
             job_parameters=job_parameters)
         assert update_computes == 'SUCCESS'
+
         # ############ Perform the post-upgrade activities ##########
         show_step(5)
         LOG.info('Add parameters._param.openstack_upgrade_enabled false'