Merge "Add ssh jenkins agents for released-* deployments"
diff --git a/checklist.yaml b/checklist.yaml
index 8091d24..7757900 100644
--- a/checklist.yaml
+++ b/checklist.yaml
@@ -140,6 +140,12 @@
     status: ProdFailed
     defects: PROD-35212
 
+  - title: tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_from_image_with_decreasing_size[id-5b810c91-0ad1-47ce-aee8-615f789be78f,image,negative]
+    errors:
+      - 502 PUT https://10.6.0.80:9292/v2/images/
+    status: ProdFailed
+    defects: PROD-35212
+
   - title: tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_server_while_in_shelved_state[id-bb0cb402-09dd-4947-b6e5-5e7e1cfa61ad]
     errors:
       - 'tempest.lib.exceptions.TimeoutException: Request timed out'
@@ -166,6 +172,13 @@
     status: ProdFailed
     defects: PROD-34693
 
+  - title: tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_create_backup[id-b963d4f1-94b3-4c40-9e97-7b583f46e470,image]
+    errors:
+      - "message': u'No image found with ID"
+      - "u'code': u'404 Not Found'"
+    status: ProdFailed
+    defects: PROD-36275
+
   - title: octavia_tempest_plugin.tests.api.v2.test_load_balancer.LoadBalancerAPITest.test_load_balancer_list[id-6546ef3c-c0e2-46af-b892-f795f4d01119,smoke]
     errors:
       - show_loadbalancer provisioning_status updated to an invalid state of ERROR
@@ -259,6 +272,13 @@
     status: ProdFailed
     defects: PROD-25221
 
+  - title: tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_fixed_ip[id-d290c06c-f5b3-11e7-8ec8-002293781009,network]
+    errors:
+      - 'tempest.lib.exceptions.NotFound: Object not found'
+      - "u'type': u'PortNotFound'"
+    status: ProdFailed
+    defects: PROD-29650
+
 ## ==================================================
 #             CVP SANITY
 ## ==================================================
@@ -483,10 +503,16 @@
     status: ProdFailed
 
   - title: test_docker_container_status
-    comment: New in 2019.2.12
+    comment: New in 2019.2.14
     errors:
-      - Container monitoring_alertmanager
-      - "has incorrect state 'Ready'. Current state: 'Assigned less than a second ago'"
+      - "Container monitoring_alerta.1 on the node mon01 has incorrect state 'Ready'. Current state: 'Ready 1 second ago'"
+    defects: PROD-35958
+    status: ProdFailed
+
+  - title: test_docker_service_replicas
+    comment: New in 2019.2.14
+    errors:
+      - "Service 'monitoring_alerta' in mode 'replicated' has incorrect count of replicas: 0/1"
     defects: PROD-35958
     status: ProdFailed
 
diff --git a/jobs/global.yaml b/jobs/global.yaml
index 87a9d7f..a62bfab 100644
--- a/jobs/global.yaml
+++ b/jobs/global.yaml
@@ -4,5 +4,5 @@
       Do not edit this job through the web ! <br>
       Please use jenkins-job-builder in git <br>
       git clone ssh://gerrit.mcp.mirantis.com:29418/mcp/tcp-qa
-    current-version: 2019.2.13
-    previous-version: 2019.2.12
\ No newline at end of file
+    current-version: 2019.2.14
+    previous-version: 2019.2.13
\ No newline at end of file
diff --git a/jobs/pipelines/run-test-scenarios.groovy b/jobs/pipelines/run-test-scenarios.groovy
index 877c0c8..f62825f 100644
--- a/jobs/pipelines/run-test-scenarios.groovy
+++ b/jobs/pipelines/run-test-scenarios.groovy
@@ -24,6 +24,7 @@
 
 def steps = env.PASSED_STEPS
 def make_snapshot_stages = false
+env.LAB_CONFIG_NAME = env.LAB_CONFIG_NAME ?: env.ENV_NAME
 
 timeout(time: 23, unit: 'HOURS') {
     node ("${PARENT_NODE_NAME}") {
diff --git a/jobs/templates/test-scenarios.yml b/jobs/templates/test-scenarios.yml
index 10388e0..730341e 100644
--- a/jobs/templates/test-scenarios.yml
+++ b/jobs/templates/test-scenarios.yml
@@ -72,7 +72,7 @@
 
       - backup-cassandra-queens-contrail-sl:
          run-test-opts: '-k TestBackupRestoreCassandra'
-         deployment: heat-cicd-queens-contrail-sl
+         deployment: heat-cicd-queens-contrail41-sl
 
     jobs:
       - '{test_scenario}'
@@ -92,12 +92,12 @@
       --keep-duplicates --maxfail=1 \
       tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_drivetrain \
       tcp_tests/tests/system/test_mcp_update.py::TestOpenstackUpdate \
-      tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_galera
+      tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_galera \
       tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_rabbit \
       tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_stacklight \
       tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_ceph \
       \
-      tcp_tests/tests/system/test_3rdparty_suites.py::Test3rdpartySuites::test_run_tempest \
+      tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_tempest \
       tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_func_sanity \
       tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_stacklight
     test-opt-with-contrail: |-
@@ -105,12 +105,12 @@
       tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_drivetrain \
       tcp_tests/tests/system/test_upgrade_contrail.py::TestUpdateContrail \
       tcp_tests/tests/system/test_mcp_update.py::TestOpenstackUpdate \
-      tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_galera
+      tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_galera \
       tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_rabbit \
       tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_stacklight \
       tcp_tests/tests/system/test_mcp_update.py::TestUpdateMcpCluster::test_update_ceph \
       \
-      tcp_tests/tests/system/test_3rdparty_suites.py::Test3rdpartySuites::test_run_tempest \
+      tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_tempest \
       tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_func_sanity \
       tcp_tests/tests/system/test_cvp_pipelines.py::TestCvpPipelines::test_run_cvp_stacklight
 
diff --git a/tcp_tests/managers/jenkins/client.py b/tcp_tests/managers/jenkins/client.py
index 063d400..79b248c 100644
--- a/tcp_tests/managers/jenkins/client.py
+++ b/tcp_tests/managers/jenkins/client.py
@@ -28,22 +28,25 @@
             :return: response
             :raise ConnectionError after several unsuccessful connections
             """
-            err = None
+            err_msg = None
             for count in range(max_count):
                 try:
                     return func(*args, **kwargs)
                 except Exception as err:
-                    print("Try {count}/{max_count} caught Exception: {err}. \
-                    \n... repeat after {secs} secs".
+                    err_msg = err
+                    print("Try {count}/{max_count} caught "
+                          "Exception in {fn}: {err}."
+                          "\n... repeat after {secs} secs".
                           format(err=err,
                                  count=count+1,
                                  max_count=max_count,
-                                 secs=sleep_before_retry))
+                                 secs=sleep_before_retry,
+                                 fn=func.__name__))
                 time.sleep(sleep_before_retry)
             print("Function failed in {total_time} seconds".format(
                 total_time=max_count*sleep_before_retry)
             )
-            raise err
+            raise err_msg
         return __retry
     return _retry
 
@@ -191,7 +194,11 @@
 
         def building():
             try:
-                status = not self.build_info(name, build_id)['building']
+                # Nested retry decorator. Need to wait >30 min
+                # During mcp-upgrade job the Jenkins can being upgrading
+                # and can be inaccessible for >20 min
+                status = not retry(max_count=30)(
+                    self.build_info)(name, build_id)['building']
             except ConnectionError:
                 status = False
 
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
index e9c2917..462fa77 100644
--- a/tcp_tests/requirements.txt
+++ b/tcp_tests/requirements.txt
@@ -1,4 +1,5 @@
 # git+git://github.com/openstack/fuel-devops.git@887368d#egg=project[postgre]   # Use this requirement for PostgreSQL
+mock>=1.2,<4.0.0 # pinning first to avoid dependency meat grinder below
 libvirt-python>=3.5.0,<4.1.0  # LGPLv2+
 git+git://github.com/openstack/fuel-devops.git@10f4ac744e89bfefcba3d7d009de82669c52fa6e   # Use this requirement for Sqlite3, or if requirements for PostgreSQL are already installed
 git+git://github.com/dis-xcom/fuel-devops-driver-ironic
@@ -19,7 +20,6 @@
 salt-pepper<=0.5.3
 setuptools<=36.2.0
 netaddr
-mock>=1.2
 python-jenkins
 cmd2<0.9
 PyYAML!=5.1
diff --git a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
index 47192cc..f45108a 100644
--- a/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-pike-contrail-sl/salt-context-cookiecutter-contrail.yaml
@@ -930,7 +930,3 @@
     password_regex: "'^[a-zA-Z0-9~!@#%^&\\*_=+]{32,}$$'"
     password_regex_description: "Your password could contains capital letters, lowercase letters, digits, symbols '~ ! @ # % ^ & * _ = +' and have a minimum length of 32 characters"
     change_password_upon_first_use: False
-  secrets_encryption_enabled: 'True'
-  secrets_encryption_key_id: 'F5CB2ADC36159B03'
-  # Used on CI only.
-  secrets_encryption_private_key: ''
diff --git a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
index aa424e4..835952c 100644
--- a/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
+++ b/tcp_tests/templates/heat-bm-cicd-queens-contrail-sl/salt-context-cookiecutter-contrail.yaml
@@ -932,8 +932,3 @@
     password_regex: "'^[a-zA-Z0-9~!@#%^&\\*_=+]{32,}$$'"
     password_regex_description: "Your password could contains capital letters, lowercase letters, digits, symbols '~ ! @ # % ^ & * _ = +' and have a minimum length of 32 characters"
     change_password_upon_first_use: False
-
-  secrets_encryption_enabled: 'True'
-  secrets_encryption_key_id: 'F5CB2ADC36159B03'
-  # Used on CI only.
-  secrets_encryption_private_key: ''
diff --git a/tcp_tests/tests/system/conftest.py b/tcp_tests/tests/system/conftest.py
index 0627ede..2ea36cf 100644
--- a/tcp_tests/tests/system/conftest.py
+++ b/tcp_tests/tests/system/conftest.py
@@ -80,3 +80,11 @@
     # tempest
     'tempest_actions'
 ])
+
+
+def pytest_addoption(parser):
+    parser.addoption("--dont-switch-to-proposed",
+                     action="store_true",
+                     help="Skips switching Jenkins on cluster-under-test to "
+                          "the proposed branch before the applying "
+                          "the MCP updates")
diff --git a/tcp_tests/tests/system/test_ceph_operations.py b/tcp_tests/tests/system/test_ceph_operations.py
index bc5dc0c..02a6888 100644
--- a/tcp_tests/tests/system/test_ceph_operations.py
+++ b/tcp_tests/tests/system/test_ceph_operations.py
@@ -4,12 +4,39 @@
 
 LOG = logger.logger
 
+xtra_network_interface = """
+parameters:
+  _param:
+    linux_network_interfaces:
+      br_ctl:
+        address: ${_param:single_address}
+        enabled: True
+        name_servers:
+        - ${_param:dns_server01}
+        - ${_param:dns_server02}
+        netmask: ${_param:control_network_netmask}
+        proto: static
+        require_interfaces: ['ens4']
+        type: bridge
+        use_interfaces: ['ens4']
+      ens3:
+        enabled: True
+        name: ens3
+        proto: dhcp
+        type: eth
+      ens4:
+        enabled: True
+        ipflush_onchange: True
+        name: ens4
+        proto: manual
+        type: eth
+"""
+
 add_osd_ceph_init_yml = """
 parameters:
   _param:
     ceph_osd_node04_hostname: xtra
     ceph_osd_node04_address: 10.6.0.205
-    ceph_mon_node04_ceph_public_address: #10.166.49.205
     ceph_osd_system_codename: xenial
   linux:
     network:
@@ -40,7 +67,8 @@
 
 
 @pytest.fixture(scope='module')
-def add_xtra_node_to_salt(salt_actions, underlay_actions, config):
+def add_xtra_node_to_salt(salt_actions, underlay_actions,
+                          config, reclass_actions):
     """
 
     :return:
@@ -61,6 +89,11 @@
         "systemctl restart salt-minion",
         node_name=xtra_node,
         raise_on_err=False)
+    salt_actions.enforce_state("I@salt:master", "reclass")
+
+    reclass_actions.merge_context(yaml_context=xtra_network_interface,
+                                  short_path="../nodes/_generated/xtra.*.yml")
+
     yield
 
     # LOG.info("Executing pytest TEARDOWN from add_xtra_node_to_salt fixture")
@@ -87,6 +120,21 @@
         # ------- cluster/infra/config/init.yml -----------
         reclass.merge_context(yaml_context=add_osd_config_init_yml,
                               short_path="cluster/*/infra/config/init.yml")
+        salt_actions.run_state("*", "saltutil.refresh_pillar")
+
+    @pytest.fixture
+    def remove_node_from_reclass(self,
+                                 reclass_actions):
+        reclass = reclass_actions
+
+        reclass.delete_key(
+            key="parameters.reclass.storage.node.ceph_osd_node04",
+            short_path="cluster/*/infra/config/init.yml"
+            )
+        reclass.delete_key(
+            key="parameters.linux.network.host.xtra",
+            short_path="cluster/*/ceph/init.yml"
+            )
 
     def test_add_node_process(self, describe_node_in_reclass,
                               drivetrain_actions):
@@ -132,7 +180,9 @@
         # 11   hdd 0.01549         osd.11      up  1.00000 1.00000
         pass
 
-    def test_delete_node_process(self, drivetrain_actions):
+    def test_delete_node_process(self,
+                                 remove_node_from_reclass,
+                                 drivetrain_actions):
         dt = drivetrain_actions
 
         job_name = "ceph-remove-node"
@@ -147,12 +197,147 @@
         assert job_result == 'SUCCESS', job_description
 
 
-class TestCephMon(object):
-    def test_add_node(self):
-        pass
+add_mon_ceph_init_yml = """
+parameters:
+  _param:
+    ceph_mon_node04_hostname: xtra
+    ceph_mon_node04_address: 10.6.0.205
+    ceph_mon_node04_ceph_public_address: 10.166.49.209
+    ceph_mon_node04_ceph_backup_hour: 4
+    ceph_mon_node04_ceph_backup_minute: 0
+  linux:
+    network:
+      host:
+        xtra:
+          address: ${_param:ceph_mon_node04_address}
+          names:
+          - ${_param:ceph_mon_node04_hostname}
+          - ${_param:ceph_mon_node04_hostname}.${_param:cluster_domain}
+"""
 
-    def test_delete_node(self):
-        pass
+add_mon_ceph_common_yml = """
+parameters:
+  ceph:
+    common:
+      members:
+        - name: ${_param:ceph_mon_node04_hostname}
+          host: ${_param:ceph_mon_node04_address}
+"""
+
+add_mon_config_node_yml = """
+parameters:
+  reclass:
+    storage:
+      node:
+        ceph_mon_node04:
+          name: ${_param:ceph_mon_node04_hostname}
+          domain: ${_param:cluster_domain}
+          classes:
+          - cluster.${_param:cluster_name}.ceph.mon
+          params:
+            ceph_public_address: ${_param:ceph_mon_node04_ceph_public_address}
+            ceph_backup_time_hour: ${_param:ceph_mon_node04_ceph_backup_hour}
+            ceph_backup_time_minute: ${_param:ceph_mon_node04_ceph_backup_minute}
+            salt_master_host: ${_param:reclass_config_master}
+            linux_system_codename: ${_param:ceph_mon_system_codename}
+            single_address: ${_param:ceph_mon_node04_address}
+            keepalived_vip_priority: 104
+"""  # noqa: E501
+
+add_mon_infra_kvm_yml = """
+parameters:
+  salt:
+    control:
+      size:
+        ceph.mon:
+          cpu: 8
+          ram: 16384
+          disk_profile: small
+          net_profile: default
+      cluster:
+        internal:
+          node:
+            cmn04:
+              name: ${_param:ceph_mon_node04_hostname}
+              provider: ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
+              image: ${_param:salt_control_xenial_image}
+              size: ceph.mon
+"""  # noqa: E501
+
+
+@pytest.mark.usefixtures("add_xtra_node_to_salt")
+class TestCephMon(object):
+    @pytest.fixture
+    def describe_node_in_reclass(self,
+                                 reclass_actions, salt_actions):
+        LOG.info("Executing pytest SETUP "
+                 "from describe_node_in_reclass fixture")
+        reclass = reclass_actions
+        # ---- cluster/*/ceph/init.yml --------------
+        reclass.merge_context(yaml_context=add_mon_ceph_init_yml,
+                              short_path="cluster/*/ceph/init.yml")
+
+        # ------- cluster/infra/config/init.yml -----------
+        reclass.merge_context(yaml_context=add_mon_ceph_common_yml,
+                              short_path="cluster/*/ceph/common.yml")
+        reclass.merge_context(yaml_context=add_mon_config_node_yml,
+                              short_path="cluster/*/infra/config/nodes.yml")
+
+        # ------- define settings for new mon node in KVM cluster -----------
+        reclass.merge_context(yaml_context=add_mon_infra_kvm_yml,
+                              short_path="cluster/*/infra/kvm.yml")
+
+        salt_actions.run_state("*", "saltutil.refresh_pillar")
+
+    @pytest.fixture
+    def remove_node_from_reclass(self,
+                                 reclass_actions, salt_actions):
+        LOG.info("Executing pytest SETUP "
+                 "from remove_node_from_reclass fixture")
+        reclass = reclass_actions
+        reclass.delete_key(
+            key="parameters.reclass.storage.node.ceph_mon_node04",
+            short_path="cluster/*/infra/config/init.yml")
+        reclass.delete_key(
+            key="parameters.salt.control.cluster.internal.node.cmn04",
+            short_path="cluster/*/infra/kvm.yml"
+            )
+        reclass.delete_key(
+            key="parameters.linux.network.host.xtra",
+            short_path="cluster/*/ceph/init.yml"
+            )
+
+    def test_add_node_process(self,
+                              drivetrain_actions,
+                              describe_node_in_reclass):
+        dt = drivetrain_actions
+
+        job_name = "ceph-add-node"
+        job_parameters = {
+            'HOST': 'xtra*',
+            'USE_UPMAP': True
+            }
+        job_result, job_description = dt.start_job_on_jenkins(
+            job_name=job_name,
+            job_parameters=job_parameters,
+            verbose=True)
+        assert job_result == 'SUCCESS', job_description
+
+    def test_delete_node_process(self,
+                                 remove_node_from_reclass,
+                                 drivetrain_actions):
+        dt = drivetrain_actions
+
+        job_name = "ceph-remove-node"
+        job_parameters = {
+            'HOST': 'xtra*',
+            'USE_UPMAP': True
+            }
+        job_result, job_description = dt.start_job_on_jenkins(
+            job_name=job_name,
+            job_parameters=job_parameters,
+            verbose=True)
+        assert job_result == 'SUCCESS', job_description
 
 
 class TestCephMgr(object):
diff --git a/tcp_tests/tests/system/test_mcp_update.py b/tcp_tests/tests/system/test_mcp_update.py
index 6957f64..240b481 100644
--- a/tcp_tests/tests/system/test_mcp_update.py
+++ b/tcp_tests/tests/system/test_mcp_update.py
@@ -67,7 +67,16 @@
 
 
 @pytest.fixture(scope='class')
-def switch_to_proposed_pipelines(reclass_actions, salt_actions):
+def dont_switch_to_proposed(request):
+    return request.config.getoption("--dont-switch-to-proposed")
+
+
+@pytest.fixture(scope='class')
+def switch_to_proposed_pipelines(reclass_actions, salt_actions,
+                                 dont_switch_to_proposed):
+    if dont_switch_to_proposed:
+        return True
+
     reclass = reclass_actions
     proposed_repo = "http://mirror.mirantis.com/update/proposed/"
     repo_param = "parameters._param.linux_system_repo_update_url"
@@ -111,6 +120,7 @@
         reclass_actions.commit(
           "[from TCP-QA] Add galera_clustercheck_password")
     else:
+
         LOG.info("Skipping WA for Galera Clustercheck Password")