Fixes for test scenarios

PROD-36273

Change-Id: I135d9fed6d4c750ea6b03cb51c6b54d3820913d6
diff --git a/jobs/pipelines/run-test-scenarios.groovy b/jobs/pipelines/run-test-scenarios.groovy
index f62825f..31aff06 100644
--- a/jobs/pipelines/run-test-scenarios.groovy
+++ b/jobs/pipelines/run-test-scenarios.groovy
@@ -48,12 +48,13 @@
                     }
                 } catch (e) {
                     common.printMsg("Tests are failed: " + e.message, "purple")
+                    currentBuild.result = 'FAILURE'
                 }
             } // stage("Run tests")
 
             stage("Archive all xml reports") {
                 dir("${env.slave_workdir }") {
-                    archiveArtifacts artifacts: "**/*.xml,**/*.ini,**/*.log,**/*.tar.gz"
+                    archiveArtifacts artifacts: "**/*.xml,**/*.log"
                     }
             }
 
diff --git a/jobs/templates/test-scenarios.yml b/jobs/templates/test-scenarios.yml
index 730341e..6b54a3c 100644
--- a/jobs/templates/test-scenarios.yml
+++ b/jobs/templates/test-scenarios.yml
@@ -65,14 +65,17 @@
       - backup-saltmaster-queens-dvr-sl:
           run-test-opts: '-k TestBackupRestoreMaster'
           deployment: heat-cicd-queens-dvr-sl
+          display-name: Backup/Restore SaltMaster (queens)
 
       - backup-saltmaster-pike-dvr-sl:
          run-test-opts: '-k TestBackupRestoreMaster'
          deployment: heat-cicd-pike-dvr-sl
+         display-name: Backup/Restore SaltMaster (pike)
 
       - backup-cassandra-queens-contrail-sl:
          run-test-opts: '-k TestBackupRestoreCassandra'
          deployment: heat-cicd-queens-contrail41-sl
+         display-name: Backup/restore Cassandra
 
     jobs:
       - '{test_scenario}'
@@ -81,8 +84,17 @@
     name: 'ceph-tests'
     test_scenario:
       - ceph_osd-queens-dvr-sl:
-         run-test-opts: '-k TestCephOsd'
-         deployment: heat-cicd-queens-dvr-sl
+          run-test-opts: '-k TestCephOsd'
+          deployment: heat-cicd-queens-dvr-sl
+          display-name: Add/Remove OSD node
+      - ceph_cmn-queens-dvr-sl:
+          run-test-opts: '-k TestCephMon'
+          deployment: heat-cicd-queens-dvr-sl
+          display-name: Add/Remove CMN node
+      - ceph_mgr-queens-dvr-sl:
+          run-test-opts: '-k TestCephMgr'
+          deployment: heat-cicd-queens-dvr-sl
+          display-name: Add/Remove MGR node
     jobs:
       - '{test_scenario}'
 
@@ -119,30 +131,36 @@
           deployment: released-heat-cicd-pike-dvr-sl
           disabled: true
           run-test-opts: '{test-opt}'
+          display-name: MCP update (pike)
 
       - mcp-update-queens-dvr-sl:
           deployment: released-heat-cicd-queens-dvr-sl
           disabled: true
           run-test-opts: '{test-opt}'
+          display-name: MCP update (queens)
 
       - mcp-update-pike-contrail-sl:
           deployment: released-heat-cicd-pike-contrail41-sl
           disabled: true
           run-test-opts: '{test-opt-with-contrail}'
+          display-name: MCP update (pike + OC)
 
       - mcp-update-queens-contrail-sl:
           deployment: released-heat-cicd-queens-contrail41-sl
           disabled: true
           run-test-opts: '{test-opt-with-contrail}'
+          display-name: MCP update (queens + OC)
 
       - os-update-pike-to-queens:
           deployment: heat-cicd-pike-dvr-sl
           run-test-opts: '-k TestUpdatePikeToQueens'
+          display-name: Update Pike -> Queens
 
       - ceph-update-luminous-to-nautilus:
           deployment: heat-cicd-pike-dvr-sl
           disabled: true
           run-test-opts: '-k TestCephUpdate'
+          display-name: Update Ceph Luminous -> Nautilus
 
     jobs:
       - '{test_scenario}'
@@ -150,6 +168,7 @@
 ###################### JOB TEMPLATE ###################
 - job-template:
     name: '{test_scenario}'
+    display-name: '{display-name}'
     project-type: pipeline
     concurrent: false
     disabled: '{disabled|false}'
@@ -192,12 +211,6 @@
         name: ENV_NAME
         trim: 'false'
     - string:
-        default: ''
-        description: 'Example: refs/changes/89/411189/36
-                       (for now - only one reference allowed)'
-        name: TCP_QA_REFS
-        trim: 'false'
-    - string:
         default: 'openstack_slave_{deployment}'
         description: 'Required: Name of the jenkins slave to create the environment
                       To be set by the parent deployment job.'
@@ -210,6 +223,11 @@
         name: PARENT_WORKSPACE
         trim: 'false'
     - string:
+        default: '2019.2.0'
+        description: 'MCP version'
+        name: MCP_VERSION
+        trim: 'false'
+    - string:
         default: ''
         description: 'Completed steps to install components on the environment.
                       If tests require some additional components, it may be installed in
diff --git a/tcp_tests/managers/reclass_manager.py b/tcp_tests/managers/reclass_manager.py
index 56116fc..7c75086 100644
--- a/tcp_tests/managers/reclass_manager.py
+++ b/tcp_tests/managers/reclass_manager.py
@@ -155,6 +155,7 @@
                 value,
                 short_path
             ))
+            return
 
         self.ssh.check_call(
             "{reclass_tools} add-key classes {value} \
@@ -164,6 +165,23 @@
                 path=short_path
             ))
 
+    def delete_class(self, value, short_path):
+        """
+        Shows warning if class doesn't exist
+        :param value: role to delete from 'classes' parameter in the reclass
+        :param short_path: path to reclass yaml file.
+            It takes into account default path where the reclass locates.
+            May look like cluster/*/cicd/control/leader.yml
+        :return: None
+        """
+        self.ssh.check_call(
+            "{reclass_tools} del-key classes {value} \
+            /srv/salt/reclass/classes/{path}".format(
+                reclass_tools=self.reclass_tools_cmd,
+                value=value,
+                path=short_path
+            ))
+
     def delete_key(self, key, short_path):
         """
         Remove key from the provided file
diff --git a/tcp_tests/tests/system/test_ceph_operations.py b/tcp_tests/tests/system/test_ceph_operations.py
index 02a6888..6f2fb52 100644
--- a/tcp_tests/tests/system/test_ceph_operations.py
+++ b/tcp_tests/tests/system/test_ceph_operations.py
@@ -66,7 +66,7 @@
 """
 
 
-@pytest.fixture(scope='module')
+@pytest.fixture(scope='session')
 def add_xtra_node_to_salt(salt_actions, underlay_actions,
                           config, reclass_actions):
     """
@@ -80,6 +80,15 @@
     cfg_node = [node['node_name'] for node in config.underlay.ssh
                 if 'salt_master' in node.get('roles')][0]
 
+    salt_actions.enforce_state("*", "reclass")
+    reclass_actions.add_class(
+        "environment.heat-cicd-queens-dvr-sl.linux_network_interface",
+        short_path="../nodes/_generated/xtra.*.yml")
+    reclass_actions.add_class("environment.heat-cicd-queens-dvr-sl.overrides",
+                              short_path="../nodes/_generated/xtra.*.yml")
+    reclass_actions.merge_context(yaml_context=xtra_network_interface,
+                                  short_path="../nodes/_generated/xtra.*.yml")
+
     underlay_actions.check_call(
             "salt-key -a {node} --include-all -y".format(node=xtra_node),
             node_name=cfg_node,
@@ -90,9 +99,8 @@
         node_name=xtra_node,
         raise_on_err=False)
     salt_actions.enforce_state("I@salt:master", "reclass")
-
-    reclass_actions.merge_context(yaml_context=xtra_network_interface,
-                                  short_path="../nodes/_generated/xtra.*.yml")
+    salt_actions.enforce_state("xtra*", "linux")
+    salt_actions.enforce_state("xtra*", "openssh")
 
     yield
 
@@ -103,6 +111,12 @@
     #         raise_on_err=False)
 
 
+@pytest.fixture(scope='session')
+def wa_prod36167(reclass_actions):
+    reclass_actions.delete_class("system.salt.control.virt",
+                                 "classes/cluster/*/infra/kvm.yml")
+
+
 @pytest.mark.usefixtures("add_xtra_node_to_salt")
 class TestCephOsd(object):
 
@@ -310,6 +324,12 @@
     def test_add_node_process(self,
                               drivetrain_actions,
                               describe_node_in_reclass):
+        """
+        https://docs.mirantis.com/mcp/q4-18/mcp-operations-guide/openstack-operations/ceph-operations/manage-nodes/add-mon-nodes.html
+        :param drivetrain_actions:
+        :param describe_node_in_reclass:
+        :return:
+        """
         dt = drivetrain_actions
 
         job_name = "ceph-add-node"
@@ -340,6 +360,139 @@
         assert job_result == 'SUCCESS', job_description
 
 
+add_rgw_ceph_init_yml = """
+parameters:
+    _param:
+        ceph_rgw_node04_hostname: xtra
+        ceph_rgw_node04_address: 10.6.0.205
+        ceph_rgw_node04_ceph_public_address: 10.166.49.209
+    linux:
+        network:
+            host:
+                rgw04:
+                    address: ${_param:ceph_rgw_node04_address}
+                    names:
+                    - ${_param:ceph_rgw_node04_hostname}
+                    - ${_param:ceph_rgw_node04_hostname}.${_param:cluster_domain}
+"""  # noqa: E501
+
+add_rgw_ceph_rgw_yml = """
+parameters:
+  _param:
+    cluster_node04_hostname: ${_param:ceph_rgw_node04_hostname}
+    cluster_node04_address: ${_param:ceph_rgw_node04_address}
+  ceph:
+    common:
+      keyring:
+        rgw.xtra:
+          caps:
+            mon: "allow rw"
+            osd: "allow rwx"
+  haproxy:
+    proxy:
+      listen:
+        radosgw:
+          servers:
+            - name: ${_param:cluster_node04_hostname}
+              host: ${_param:cluster_node04_address}
+              port: ${_param:haproxy_radosgw_source_port}
+              params: check
+"""
+
+add_rgw_config_init_yml = """
+parameters:
+  reclass:
+    storage:
+      node:
+        ceph_rgw_node04:
+          name: ${_param:ceph_rgw_node04_hostname}
+          domain: ${_param:cluster_domain}
+          classes:
+          - cluster.${_param:cluster_name}.ceph.rgw
+          params:
+            salt_master_host: ${_param:reclass_config_master}
+            linux_system_codename:  ${_param:ceph_rgw_system_codename}
+            single_address: ${_param:ceph_rgw_node04_address}
+            deploy_address: ${_param:ceph_rgw_node04_deploy_address}
+            ceph_public_address: ${_param:ceph_rgw_node04_public_address}
+            keepalived_vip_priority: 104
+"""
+
+
+@pytest.mark.usefixtures("add_xtra_node_to_salt")
+class TestCephRgw(object):
+    @pytest.fixture
+    def describe_node_in_reclass(self,
+                                 reclass_actions, salt_actions):
+        LOG.info("Executing pytest SETUP "
+                 "from describe_node_in_reclass fixture")
+        reclass = reclass_actions
+        # ---- cluster/*/ceph/init.yml --------------
+        reclass.merge_context(yaml_context=add_rgw_ceph_init_yml,
+                              short_path="cluster/*/ceph/init.yml")
+
+        reclass.merge_context(yaml_context=add_rgw_ceph_rgw_yml,
+                              short_path="cluster/*/ceph/rgw.yml")
+
+        reclass.merge_context(yaml_context=add_rgw_config_init_yml,
+                              short_path="cluster/*/infra/config/init.yml")
+
+        salt_actions.run_state("*", "saltutil.refresh_pillar")
+
+    @pytest.fixture
+    def remove_node_from_reclass(self,
+                                 reclass_actions, salt_actions):
+        LOG.info("Executing pytest SETUP "
+                 "from remove_node_from_reclass fixture")
+        # reclass = reclass_actions
+        # reclass.delete_key(
+        #     key="parameters.reclass.storage.node.ceph_rgw_node04",
+        #     short_path="cluster/*/infra/config/init.yml")
+        # reclass.delete_key(
+        #     key="parameters.linux.network.host.xtra",
+        #     short_path="cluster/*/ceph/init.yml"
+        # )
+
+    def test_add_node_process(self,
+                              drivetrain_actions,
+                              describe_node_in_reclass):
+        """
+        https://docs.mirantis.com/mcp/q4-18/mcp-operations-guide/openstack-operations/ceph-operations/manage-nodes/add-rgw-nodes.html
+        :param drivetrain_actions:
+        :param describe_node_in_reclass:
+        :return:
+        """
+        dt = drivetrain_actions
+
+        job_name = "ceph-add-node"
+        job_parameters = {
+            'HOST': 'xtra*',
+            'USE_UPMAP': True
+            }
+        job_result, job_description = dt.start_job_on_jenkins(
+            job_name=job_name,
+            job_parameters=job_parameters,
+            verbose=True)
+        assert job_result == 'SUCCESS', job_description
+
+    def test_delete_node_process(self,
+                                 remove_node_from_reclass,
+                                 drivetrain_actions):
+        dt = drivetrain_actions
+
+        job_name = "ceph-remove-node"
+        job_parameters = {
+            'HOST': 'xtra*',
+            'USE_UPMAP': True
+            }
+        job_result, job_description = dt.start_job_on_jenkins(
+            job_name=job_name,
+            job_parameters=job_parameters,
+            verbose=True)
+        assert job_result == 'SUCCESS', job_description
+
+
+@pytest.mark.usefixtures("add_xtra_node_to_salt")
 class TestCephMgr(object):
     def test_add_node(self):
         pass