Merge "test_oss launched if cicd node  available"
diff --git a/test_set/cvp-sanity/fixtures/base.py b/test_set/cvp-sanity/fixtures/base.py
index 8c5be34..4858c83 100644
--- a/test_set/cvp-sanity/fixtures/base.py
+++ b/test_set/cvp-sanity/fixtures/base.py
@@ -102,11 +102,23 @@
 @pytest.fixture(scope='session')
 def check_kdt(local_salt_client):
     kdt_nodes_available = local_salt_client.test_ping(
-        tgt="I@gerrit:client and I@kubernetes:pool",
+        tgt="I@gerrit:client and I@kubernetes:pool and not I@salt:master",
         expr_form='compound'
     )
     if not kdt_nodes_available:
         pytest.skip("No 'kdt' nodes found. Skipping this test...")
+    return kdt_nodes_available.keys()
+
+
+@pytest.fixture(scope='session')
+def check_kfg(local_salt_client):
+    kfg_nodes_available = local_salt_client.cmd(
+        tgt="I@kubernetes:pool and I@salt:master",
+        expr_form='compound'
+    )
+    if not kfg_nodes_available:
+        pytest.skip("No cfg-under-Kubernetes nodes found. Skipping this test...")
+    return kfg_nodes_available.keys()
 
 
 @pytest.fixture(scope='session')
@@ -159,4 +171,4 @@
         yield
     except Exception as e:
         print("print_node_version:: some error occurred: {}".format(e))
-        yield
\ No newline at end of file
+        yield
diff --git a/test_set/cvp-sanity/tests/test_drivetrain.py b/test_set/cvp-sanity/tests/test_drivetrain.py
index b5beba5..3a9f1b6 100644
--- a/test_set/cvp-sanity/tests/test_drivetrain.py
+++ b/test_set/cvp-sanity/tests/test_drivetrain.py
@@ -207,7 +207,7 @@
         '''Test user was not found'''
 
 
-def test_drivetrain_services_replicas(local_salt_client):
+def test_drivetrain_services_replicas(local_salt_client, check_cicd):
     """
         # Execute ` salt -C 'I@gerrit:client' cmd.run 'docker service ls'` command to get info  for each docker service like that:
         "x5nzktxsdlm6        jenkins_slave02     replicated          0/1                 docker-prod-local.artifactory.mirantis.com/mirantis/cicd/jnlp-slave:2019.2.0         "
@@ -272,7 +272,7 @@
               {}'''.format(json.dumps(mismatch, indent=4))
 
 
-def test_jenkins_jobs_branch(local_salt_client):
+def test_jenkins_jobs_branch(local_salt_client, check_cicd):
     """ This test compares Jenkins jobs versions
         collected from the cloud vs collected from pillars.
     """
@@ -282,8 +282,6 @@
 
     config = utils.get_configuration()
     drivetrain_version = config.get('drivetrain_version', '')
-    if not drivetrain_version:
-        pytest.skip("drivetrain_version is not defined. Skipping")
     jenkins_password = get_password(local_salt_client, 'jenkins:client')
     version_mismatch = []
     server = join_to_jenkins(local_salt_client, 'admin', jenkins_password)
@@ -311,7 +309,7 @@
             continue
 
         actual_version = BranchSpec[0].getElementsByTagName('name')[0].childNodes[0].data
-        if actual_version not in [expected_version, "release/{}".format(drivetrain_version)]:
+        if actual_version not in expected_version and expected_version != '':
             version_mismatch.append("Job {0} has {1} branch."
                                     "Expected {2}".format(job_name,
                                                           actual_version,
@@ -362,3 +360,76 @@
     assert job_result == 'SUCCESS', \
         '''Test job '{0}' build was not successful or timeout is too small
          '''.format(jenkins_test_job)
+
+
+def test_kdt_all_pods_are_available(local_salt_client, check_kdt):
+    """
+     # Run kubectl get pods -n drivetrain on kdt-nodes to get status for each pod
+     # Check that each pod has fulfilled status in the READY column
+
+    """
+    pods_statuses_output = local_salt_client.cmd_any(
+        tgt='L@'+','.join(check_kdt),
+        param='kubectl get pods -n drivetrain |  awk {\'print $1"; "$2\'} | column -t',
+        expr_form='compound')
+
+    assert pods_statuses_output != "/bin/sh: 1: kubectl: not found", \
+        "Nodes {} don't have kubectl".format(check_kdt)
+    # Convert string to list and remove first row with column names
+    pods_statuses = pods_statuses_output.split('\n')
+    pods_statuses = pods_statuses[1:]
+
+    report_with_errors = ""
+    for pod_status in pods_statuses:
+        pod, status = pod_status.split('; ')
+        actual_replica, expected_replica = status.split('/')
+
+        if actual_replica.strip() != expected_replica.strip():
+            report_with_errors += "Pod [{pod}] doesn't have all containers. Expected {expected} containers, actual {actual}\n".format(
+                pod=pod,
+                expected=expected_replica,
+                actual=actual_replica
+            )
+
+    print report_with_errors
+    assert report_with_errors == "", \
+        "\n{sep}{kubectl_output}{sep} \n\n {report} ".format(
+            sep="\n" + "-"*20 + "\n",
+            kubectl_output=pods_statuses_output,
+            report=report_with_errors
+        )
+
+def test_kfg_all_pods_are_available(local_salt_client, check_kfg):
+    """
+     # Run kubectl get pods -n drivetrain on cfg node to get status for each pod
+     # Check that each pod has fulfilled status in the READY column
+
+    """
+    # TODO collapse similar tests into one to check pods and add new fixture
+    pods_statuses_output = local_salt_client.cmd_any(
+        tgt='L@' + ','.join(check_kfg),
+        param='kubectl get pods -n drivetrain |  awk {\'print $1"; "$2\'} | column -t',
+        expr_form='compound')
+    # Convert string to list and remove first row with column names
+    pods_statuses = pods_statuses_output.split('\n')
+    pods_statuses = pods_statuses[1:]
+
+    report_with_errors = ""
+    for pod_status in pods_statuses:
+        pod, status = pod_status.split('; ')
+        actual_replica, expected_replica = status.split('/')
+
+        if actual_replica.strip() == expected_replica.strip():
+            report_with_errors += "Pod [{pod}] doesn't have all containers. Expected {expected} containers, actual {actual}\n".format(
+                pod=pod,
+                expected=expected_replica,
+                actual=actual_replica
+            )
+
+    print report_with_errors
+    assert report_with_errors != "", \
+        "\n{sep}{kubectl_output}{sep} \n\n {report} ".format(
+            sep="\n" + "-" * 20 + "\n",
+            kubectl_output=pods_statuses_output,
+            report=report_with_errors
+        )
\ No newline at end of file
diff --git a/test_set/cvp-sanity/tests/test_mounts.py b/test_set/cvp-sanity/tests/test_mounts.py
index 6fcca56..c9ba9ce 100644
--- a/test_set/cvp-sanity/tests/test_mounts.py
+++ b/test_set/cvp-sanity/tests/test_mounts.py
@@ -19,7 +19,8 @@
              "I@salt:control or "
              "I@prometheus:server and not I@influxdb:server or "
              "I@kubernetes:* and not I@etcd:* or "
-             "I@docker:host and not I@prometheus:server and not I@kubernetes:*",
+             "I@docker:host and not I@prometheus:server and not I@kubernetes:* or "
+             "I@gerrit:client and I@kubernetes:pool and not I@salt:master",
          expr_form='compound').keys()
 
     if len(mounts_by_nodes.keys()) < 2:
diff --git a/test_set/cvp-sanity/tests/test_services.py b/test_set/cvp-sanity/tests/test_services.py
index e25015d..c704437 100644
--- a/test_set/cvp-sanity/tests/test_services.py
+++ b/test_set/cvp-sanity/tests/test_services.py
@@ -15,7 +15,7 @@
     Skips services if they are not consistent for all node.
     Inconsistent services will be checked with another test case
     """
-    exclude_services = utils.get_configuration().get("exclude_services", [])
+    exclude_services = utils.get_configuration().get("skipped_services", [])
     services_by_nodes = local_salt_client.cmd(tgt="L@"+','.join(nodes_in_group),
                                               fun='service.get_all',
                                               expr_form='compound')
diff --git a/test_set/cvp-sanity/tests/test_single_vip.py b/test_set/cvp-sanity/tests/test_single_vip.py
index 6fa0a41..7a1c2f8 100644
--- a/test_set/cvp-sanity/tests/test_single_vip.py
+++ b/test_set/cvp-sanity/tests/test_single_vip.py
@@ -1,30 +1,26 @@
-from collections import Counter
+import utils
+import json
 
 
-def test_single_vip(local_salt_client, nodes_in_group):
-    local_salt_client.cmd(tgt="L@"+','.join(nodes_in_group),
-                          fun='saltutil.sync_all',
-                          expr_form='compound')
-    nodes_list = local_salt_client.cmd(
-        tgt="L@"+','.join(nodes_in_group),
-        fun='grains.item',
-        param='ipv4',
-        expr_form='compound')
-
-    ipv4_list = []
-
-    for node in nodes_list:
-        if not nodes_list.get(node):
-            # TODO: do not skip node
-            print "Node {} is skipped".format (node)
+def test_single_vip_exists(local_salt_client):
+    """Test checks that there is only one VIP address
+       within one group of nodes (where applicable).
+       Steps:
+       1. Get IP addresses for nodes via salt cmd.run 'ip a | grep /32'
+       2. Check that at least 1 node responds with something.
+    """
+    groups = utils.calculate_groups()
+    no_vip = {}
+    for group in groups:
+        if group in ['cmp', 'cfg', 'kvm', 'cmn', 'osd', 'gtw']:
             continue
-        ipv4_list.extend(nodes_list.get(node).get('ipv4'))
-
-    cnt = Counter(ipv4_list)
-
-    for ip in cnt:
-        if ip == '127.0.0.1':
-            continue
-        elif cnt[ip] > 1:
-            assert "VIP IP duplicate found " \
-                   "\n{}".format(ipv4_list)
+        nodes_list = local_salt_client.cmd(
+            "L@" + ','.join(groups[group]), 'cmd.run', 'ip a | grep /32', expr_form='compound')
+        result = [x for x in nodes_list.values() if x]
+        if len(result) != 1:
+            if len(result) == 0:
+                no_vip[group] = 'No vip found'
+            else:
+                no_vip[group] = nodes_list
+    assert len(no_vip) < 1, "Some groups of nodes have problem with vip " \
+           "\n{}".format(json.dumps(no_vip, indent=4))
diff --git a/test_set/cvp-sanity/tests/test_ui_addresses.py b/test_set/cvp-sanity/tests/test_ui_addresses.py
index e5ef112..0c65451 100644
--- a/test_set/cvp-sanity/tests/test_ui_addresses.py
+++ b/test_set/cvp-sanity/tests/test_ui_addresses.py
@@ -186,6 +186,7 @@
         'Public Alerta page is not reachable on {} from ctl nodes'.format(url)
 
 
+@pytest.mark.usefixtures('check_openstack')
 @pytest.mark.usefixtures('check_drivetrain')
 def test_public_ui_jenkins(local_salt_client, ctl_nodes_pillar):
     IP = local_salt_client.pillar_get(param='_param:cluster_public_host')
@@ -201,6 +202,7 @@
         'Public Jenkins page is not reachable on {} from ctl nodes'.format(url)
 
 
+@pytest.mark.usefixtures('check_openstack')
 @pytest.mark.usefixtures('check_drivetrain')
 def test_public_ui_gerrit(local_salt_client, ctl_nodes_pillar):
     IP = local_salt_client.pillar_get(param='_param:cluster_public_host')
diff --git a/test_set/cvp-spt/global_config.yaml b/test_set/cvp-spt/global_config.yaml
index f55a2a6..9a8e738 100644
--- a/test_set/cvp-spt/global_config.yaml
+++ b/test_set/cvp-spt/global_config.yaml
@@ -23,8 +23,9 @@
 skipped_nodes: []
 # example for Jenkins: networks=net1,net2
 networks: "10.101.0.0/24"
+external_network: ''
 HW_NODES: []
 CMP_HOSTS: []
 nova_timeout: 30
 iperf_prep_string: "sudo /bin/bash -c 'echo \"91.189.88.161        archive.ubuntu.com\" >> /etc/hosts'"
-IMAGE_SIZE_MB: 2000
\ No newline at end of file
+IMAGE_SIZE_MB: 2000
diff --git a/test_set/cvp-spt/utils/os_client.py b/test_set/cvp-spt/utils/os_client.py
index c17617f..fb84265 100644
--- a/test_set/cvp-spt/utils/os_client.py
+++ b/test_set/cvp-spt/utils/os_client.py
@@ -9,7 +9,7 @@
 import os
 import random
 import time
-
+import utils
 
 class OfficialClientManager(object):
     """Manager that provides access to the official python clients for
@@ -241,15 +241,18 @@
         return net
 
     def get_external_network(self):
-        networks = [
-            net for net in self.os_clients.network.list_networks()["networks"]
-            if net["admin_state_up"] and net["router:external"] and
-            len(net["subnets"])
-        ]
-        if networks:
-            ext_net = networks[0]
-        else:
-            ext_net = self.create_fake_external_network()
+        config = utils.get_configuration()
+        ext_net = config.get('external_network') or ''
+        if not ext_net:
+            networks = [
+                net for net in self.os_clients.network.list_networks()["networks"]
+                if net["admin_state_up"] and net["router:external"] and
+                len(net["subnets"])
+            ]
+            if networks:
+                ext_net = networks[0]
+            else:
+                ext_net = self.create_fake_external_network()
         return ext_net
 
     def create_flavor(self, name, ram=256, vcpus=1, disk=2):