Merge "Add more public url tests for UIs"
diff --git a/Dockerfile b/Dockerfile
index cd6b073..960ec96 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -10,6 +10,8 @@
 
 USER root
 ARG UBUNTU_MIRROR_URL="http://archive.ubuntu.com/ubuntu"
+ARG SL_TEST_REPO='http://gerrit.mcp.mirantis.com/mcp/stacklight-pytest'
+ARG SL_TEST_BRANCH='master'
 
 WORKDIR /var/lib/
 COPY bin/ /usr/local/bin/
@@ -23,26 +25,37 @@
     echo "deb [arch=amd64] $UBUNTU_MIRROR_URL xenial-updates main restricted universe multiverse" >> sources.list && \
     echo "deb [arch=amd64] $UBUNTU_MIRROR_URL xenial-backports main restricted universe multiverse" >> sources.list && \
     popd ; apt-get update && apt-get  upgrade -y && \
-    apt-get install -y build-essential curl git-core iputils-ping libffi-dev libldap2-dev libsasl2-dev libssl-dev patch python-dev python-pip python3-dev vim-tiny wget \
-    python-virtualenv python3-virtualenv && \
+    apt-get install -y build-essential curl git-core iputils-ping libffi-dev libldap2-dev libsasl2-dev libssl-dev patch python-dev python-pip  vim-tiny wget \
+    python-virtualenv \
+# Enable these packages while porting to Python3  =>  python3-virtualenv python3-dev  \
 # Due to upstream bug we should use fixed version of pip
-    python -m pip install --upgrade 'pip==9.0.3' \
+    && python -m pip install --upgrade 'pip==9.0.3'  \
     # initialize cvp sanity test suite
           && pushd cvp-sanity  \
-          && virtualenv  venv \
+          && virtualenv --python=python2  venv \
           && . venv/bin/activate \
           && pip install -r requirements.txt \
           && deactivate \
           && popd \
     # initialize cvp spt test suite
           && pushd cvp-spt  \
-          && virtualenv  venv \
+          && virtualenv --python=python2  venv \
           && . venv/bin/activate \
           && pip install -r requirements.txt \
           && deactivate \
-          && popd && \
+          && popd  \
+    # initialize cvp stacklight test suite
+          && mkdir cvp-stacklight \
+          && pushd cvp-stacklight  \
+          && virtualenv --system-site-packages venv \
+          && . venv/bin/activate \
+          && git clone -b $SL_TEST_BRANCH $SL_TEST_REPO  \
+          && pip install ./stacklight-pytest \
+          && pip install -r stacklight-pytest/requirements.txt  \
+          && deactivate \
+          && popd  \
 # Cleanup
-    apt-get -y purge libx11-data xauth libxmuu1 libxcb1 libx11-6 libxext6 ppp pppconfig pppoeconf popularity-contest cpp gcc g++ libssl-doc && \
+    && apt-get -y purge libx11-data xauth libxmuu1 libxcb1 libx11-6 libxext6 ppp pppconfig pppoeconf popularity-contest cpp gcc g++ libssl-doc && \
     apt-get -y autoremove; apt-get -y clean ; rm -rf /root/.cache; rm -rf /var/lib/apt/lists/* && \
     rm -rf /tmp/* ; rm -rf /var/tmp/* ; rm -rfv /etc/apt/sources.list.d/* ; echo > /etc/apt/sources.list
 
diff --git a/test_set/cvp-sanity/global_config.yaml b/test_set/cvp-sanity/global_config.yaml
index b1af7a4..4146147 100644
--- a/test_set/cvp-sanity/global_config.yaml
+++ b/test_set/cvp-sanity/global_config.yaml
@@ -71,6 +71,16 @@
   {
     "skipped_ifaces": ["lo", "virbr0", "docker_gwbridge", "docker0"]}
 
+# packages test 'test_packages_are_latest' setting
+# this can skip scecial packages
+# True value for 'skip_test' will skip this test. Set False to run the test.
+# TODO: remove default False value when prod env is fixed
+test_packages:
+  { # "skipped_packages": ["update-notifier-common", "wget"]
+    "skipped_packages": [""],
+    "skip_test": True
+  }
+
 # specify what mcp version (tag) is deployed
 drivetrain_version: ''
 
diff --git a/test_set/cvp-sanity/tests/test_drivetrain.py b/test_set/cvp-sanity/tests/test_drivetrain.py
index 4628e70..bd81a82 100644
--- a/test_set/cvp-sanity/tests/test_drivetrain.py
+++ b/test_set/cvp-sanity/tests/test_drivetrain.py
@@ -11,6 +11,7 @@
 import ldap
 import ldap.modlist as modlist
 
+
 def join_to_gerrit(local_salt_client, gerrit_user, gerrit_password):
     gerrit_port = local_salt_client.cmd(
         'I@gerrit:client and not I@salt:master',
@@ -27,6 +28,7 @@
     rest = GerritRestAPI(url=url, auth=auth)
     return rest
 
+
 def join_to_jenkins(local_salt_client, jenkins_user, jenkins_password):
     jenkins_port = local_salt_client.cmd(
         'I@jenkins:client and not I@salt:master',
@@ -42,6 +44,7 @@
     server = jenkins.Jenkins(jenkins_url, username=jenkins_user, password=jenkins_password)
     return server
 
+
 def get_password(local_salt_client,service):
     password = local_salt_client.cmd(
         service,
@@ -50,6 +53,7 @@
         expr_form='pillar').values()[0]
     return password
 
+
 def test_drivetrain_gerrit(local_salt_client):
     gerrit_password = get_password(local_salt_client,'gerrit:client')
     gerrit_error = ''
@@ -109,6 +113,7 @@
     assert gerrit_error == '',\
         'Something is wrong with Gerrit'.format(gerrit_error)
 
+
 def test_drivetrain_openldap(local_salt_client):
     '''Create a test user 'DT_test_user' in openldap,
     add the user to admin group, login using the user to Jenkins.
@@ -207,6 +212,7 @@
     assert ldap_result !=[], \
         '''Test user was not found'''
 
+
 def test_drivetrain_jenkins_job(local_salt_client):
     jenkins_password = get_password(local_salt_client,'jenkins:client')
     server = join_to_jenkins(local_salt_client,'admin',jenkins_password)
@@ -240,26 +246,33 @@
         '''Test job '{0}' build was not successfull or timeout is too small
          '''.format(jenkins_test_job)
 
+
 def test_drivetrain_services_replicas(local_salt_client):
-    salt_output = local_salt_client.cmd(
-        'I@gerrit:client',
-        'cmd.run',
-        ['docker service ls'],
-        expr_form='compound')
-    wrong_items = []
-    for line in salt_output[salt_output.keys()[0]].split('\n'):
-        if line[line.find('/') - 1] != line[line.find('/') + 1] \
-           and 'replicated' in line:
-            wrong_items.append(line)
-    assert len(wrong_items) == 0, \
-        '''Some DriveTrain services doesn't have expected number of replicas:
-              {}'''.format(json.dumps(wrong_items, indent=4))
+    # TODO: replace with rerunfalures plugin
+    for _ in range(4):
+        salt_output = local_salt_client.cmd(
+            'I@gerrit:client',
+            'cmd.run',
+            ['docker service ls'],
+            expr_form='compound')
+        wrong_items = []
+        for line in salt_output[salt_output.keys()[0]].split('\n'):
+            if line[line.find('/') - 1] != line[line.find('/') + 1] \
+               and 'replicated' in line:
+                wrong_items.append(line)
+        if len(wrong_items) == 0:
+            break
+        else:
+            print('''Some DriveTrain services doesn't have expected number of replicas:
+                  {}\n'''.format(json.dumps(wrong_items, indent=4)))
+            time.sleep(5)
+        assert len(wrong_items) == 0
 
 
 def test_drivetrain_components_and_versions(local_salt_client):
     config = utils.get_configuration()
     if not config['drivetrain_version']:
-        version = \
+        expected_version = \
             local_salt_client.cmd(
                 'I@salt:master',
                 'pillar.get',
@@ -270,61 +283,75 @@
                 'pillar.get',
                 ['_param:apt_mk_version'],
                 expr_form='compound').values()[0]
-        if not version:
+        if not expected_version:
             pytest.skip("drivetrain_version is not defined. Skipping")
     else:
-        version = config['drivetrain_version']
-    salt_output = local_salt_client.cmd(
-        'I@gerrit:client',
-        'cmd.run',
-        ['docker service ls'],
-        expr_form='compound')
-    #  'ldap_server' removed because it is an external component now v 1.1.8
-    not_found_services = ['gerrit_db', 'gerrit_server', 'jenkins_master',
-                          'jenkins_slave01', 'jenkins_slave02',
-                          'jenkins_slave03', 'ldap_admin', 'docker_registry',
-                          'docker_visualizer']
-    version_mismatch = []
-    for line in salt_output[salt_output.keys()[0]].split('\n'):
-        for service in not_found_services:
-            if service in line:
-                not_found_services.remove(service)
-                if version != line.split()[4].split(':')[1]:
-                    version_mismatch.append("{0}: expected "
-                        "version is {1}, actual - {2}".format(service,version,
-                                                              line.split()[4].split(':')[1]))
-                continue
-    assert len(not_found_services) == 0, \
-        '''Some DriveTrain components are not found:
-              {}'''.format(json.dumps(not_found_services, indent=4))
-    assert len(version_mismatch) == 0, \
+        expected_version = config['drivetrain_version']
+    table_with_docker_services = local_salt_client.cmd('I@gerrit:client',
+                                                       'cmd.run',
+                                                       ['docker service ls --format "{{.Image}}"'],
+                                                       expr_form='compound')
+    table_from_pillar = local_salt_client.cmd('I@gerrit:client',
+                                              'pillar.get',
+                                              ['docker:client:images'],
+                                              expr_form='compound')
+
+    expected_images = table_from_pillar[table_from_pillar.keys()[0]]
+    actual_images = table_with_docker_services[table_with_docker_services.keys()[0]].split('\n')
+
+    # ---------------- Check that all docker services are found regarding the 'pillar.get docker:client:images' ----
+    not_found_services = list(set(expected_images) - set(actual_images))
+    assert not_found_services.__len__() == 0, \
+        ''' Some DriveTrain components are not found: {}'''.format(json.dumps(not_found_services, indent=4))
+
+    # ---------- Check that all docker services has label that equals to mcp_version (except of external images) ----
+    version_mismatch = [
+        "{image}: expected version - {expected_version}, actual - {version}".format(version=image.split(":")[-1], **locals())
+        for image in actual_images
+        if image.split(":")[-1] != expected_version and "mirantis/external" not in image]
+
+    assert version_mismatch.__len__() == 0, \
         '''Version mismatch found:
               {}'''.format(json.dumps(version_mismatch, indent=4))
 
 
 def test_jenkins_jobs_branch(local_salt_client):
+    excludes = ['upgrade-mcp-release', 'deploy-update-salt']
+
     config = utils.get_configuration()
-    expected_version = config['drivetrain_version'] or []
-    if not expected_version or expected_version == '':
+    drivetrain_version = config.get('drivetrain_version', '')
+    if not drivetrain_version:
         pytest.skip("drivetrain_version is not defined. Skipping")
-    jenkins_password = get_password(local_salt_client,'jenkins:client')
+    jenkins_password = get_password(local_salt_client, 'jenkins:client')
     version_mismatch = []
-    server = join_to_jenkins(local_salt_client,'admin',jenkins_password)
+    server = join_to_jenkins(local_salt_client, 'admin', jenkins_password)
     for job_instance in server.get_jobs():
         job_name = job_instance.get('name')
+        if job_name in excludes:
+            continue
+
         job_config = server.get_job_config(job_name)
         xml_data = minidom.parseString(job_config)
         BranchSpec = xml_data.getElementsByTagName('hudson.plugins.git.BranchSpec')
-        #We use master branch for pipeline-library in case of 'testing,stable,nighlty' versions
-        if expected_version in ['testing','nightly','stable']:
+
+        # We use master branch for pipeline-library in case of 'testing,stable,nighlty' versions
+        # Leave proposed version as is
+        # in other cases we get release/{drivetrain_version}  (e.g release/2019.2.0)
+        if drivetrain_version in ['testing','nightly','stable']:
             expected_version = 'master'
-        if BranchSpec:
-            actual_version = BranchSpec[0].getElementsByTagName('name')[0].childNodes[0].data
-            if ( actual_version != expected_version ) and ( job_name not in ['upgrade-mcp-release'] ) :
-                version_mismatch.append("Job {0} has {1} branch."
-                                        "Expected {2}".format(job_name,
-                                                              actual_version,
-                                                              expected_version))
+        else:
+            expected_version = drivetrain_version
+
+        if not BranchSpec:
+            print("No BranchSpec has found for {} job".format(job_name))
+            continue
+
+        actual_version = BranchSpec[0].getElementsByTagName('name')[0].childNodes[0].data
+        if (actual_version not in [expected_version, "release/{}".format(drivetrain_version)]):
+            version_mismatch.append("Job {0} has {1} branch."
+                                    "Expected {2}".format(job_name,
+                                                          actual_version,
+                                                          expected_version))
     assert len(version_mismatch) == 0, \
         '''Some DriveTrain jobs have version/branch mismatch:
               {}'''.format(json.dumps(version_mismatch, indent=4))
diff --git a/test_set/cvp-sanity/tests/test_duplicate_ips.py b/test_set/cvp-sanity/tests/test_duplicate_ips.py
index 50bdcba..afe1afe 100644
--- a/test_set/cvp-sanity/tests/test_duplicate_ips.py
+++ b/test_set/cvp-sanity/tests/test_duplicate_ips.py
@@ -14,6 +14,7 @@
                 dup_ifaces[node] = {iface: nodes[node]['ip4_interfaces'][iface]}
     return dup_ifaces
 
+
 def test_duplicate_ips(local_salt_client):
     active_nodes = utils.get_active_nodes()
 
diff --git a/test_set/cvp-sanity/tests/test_k8s.py b/test_set/cvp-sanity/tests/test_k8s.py
index 022eb1c..2212025 100644
--- a/test_set/cvp-sanity/tests/test_k8s.py
+++ b/test_set/cvp-sanity/tests/test_k8s.py
@@ -140,3 +140,39 @@
         print '{} is AVAILABLE'.format(hostname)
     else:
         print '{} IS NOT AVAILABLE'.format(hostname)
+
+
+def test_k8s_dashboard_available(local_salt_client):
+    result = local_salt_client.cmd(
+        'etcd:server', 'cmd.run',
+        ['kubectl get svc -n kube-system'],
+        expr_form='pillar'
+    )
+    if not result:
+        pytest.skip("k8s is not found on this environment")
+
+    # service name 'kubernetes-dashboard' is hardcoded in kubernetes formula
+    dashboard_enabled = local_salt_client.cmd(
+        'etcd:server', 'pillar.get',
+        ['kubernetes:common:addons:dashboard:enabled'],
+        expr_form='pillar'
+    ).values()[0]
+    if not dashboard_enabled:
+        pytest.skip("Kubernetes dashboard is not enabled in the cluster.")
+
+    external_ip = local_salt_client.cmd(
+        'etcd:server', 'pillar.get',
+        ['kubernetes:common:addons:dashboard:public_ip'],
+        expr_form='pillar'
+    ).values()[0]
+
+    # dashboard port 8443 is hardcoded in kubernetes formula
+    url = "https://{}:8443".format(external_ip)
+    check = local_salt_client.cmd(
+        'etcd:server', 'cmd.run',
+        ['curl {} 2>&1 | grep kubernetesDashboard'.format(url)],
+        expr_form='pillar'
+    )
+    assert len(check.values()[0]) != 0, \
+        'Kubernetes dashboard is not reachable on {} ' \
+        'from ctl nodes'.format(url)
diff --git a/test_set/cvp-sanity/tests/test_nodes_in_maas.py b/test_set/cvp-sanity/tests/test_nodes_in_maas.py
new file mode 100644
index 0000000..c20d241
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_nodes_in_maas.py
@@ -0,0 +1,80 @@
+import json
+import pytest
+
+import utils
+
+
+def get_maas_logged_in_profiles(local_salt_client):
+    get_apis = local_salt_client.cmd(
+        'maas:cluster',
+        'cmd.run',
+        ['maas list'],
+        expr_form='pillar')
+    return get_apis.values()[0]
+
+
+def login_to_maas(local_salt_client, user):
+    login = local_salt_client.cmd(
+        'maas:cluster',
+        'cmd.run',
+        ["source /var/lib/maas/.maas_login.sh  ; echo {}=${{PROFILE}}"
+         "".format(user)],
+        expr_form='pillar')
+    return login.values()[0]
+
+
+def test_nodes_deployed_in_maas(local_salt_client):
+    config = utils.get_configuration()
+
+    # 1. Check MAAS is present on some node
+    check_maas = local_salt_client.cmd(
+        'maas:cluster',
+        'test.ping',
+        expr_form='pillar')
+    if not check_maas:
+        pytest.skip("Could not find MAAS on the environment")
+
+    # 2. Get MAAS admin user from model
+    maas_admin_user = local_salt_client.cmd(
+        'maas:cluster', 'pillar.get',
+        ['_param:maas_admin_username'],
+        expr_form='pillar'
+    ).values()[0]
+    if not maas_admin_user:
+        pytest.skip("Could not find MAAS admin user in the model by parameter "
+                    "'maas_admin_username'")
+
+    # 3. Check maas has logged in profiles and try to log in if not
+    logged_profiles = get_maas_logged_in_profiles(local_salt_client)
+    if maas_admin_user not in logged_profiles:
+        login = login_to_maas(local_salt_client, maas_admin_user)
+        newly_logged = get_maas_logged_in_profiles(local_salt_client)
+        if maas_admin_user not in newly_logged:
+            pytest.skip(
+                "Could not find '{}' profile in MAAS and could not log in.\n"
+                "Current MAAS logged in profiles: {}.\nLogin output: {}"
+                "".format(maas_admin_user, newly_logged, login))
+
+    # 4. Get nodes in MAAS
+    get_nodes = local_salt_client.cmd(
+        'maas:cluster',
+        'cmd.run',
+        ['maas {} nodes read'.format(maas_admin_user)],
+        expr_form='pillar')
+    result = ""
+    try:
+        result = json.loads(get_nodes.values()[0])
+    except ValueError as e:
+        assert result, "Could not get nodes: {}\n{}". \
+            format(get_nodes, e)
+
+    # 5. Check all nodes are in Deployed status
+    failed_nodes = []
+    for node in result:
+        if node["fqdn"] in config.get("skipped_nodes"):
+            continue
+        if "status_name" in node.keys():
+            if node["status_name"] != 'Deployed':
+                failed_nodes.append({node["fqdn"]: node["status_name"]})
+    assert not failed_nodes, "Some nodes have unexpected status in MAAS:" \
+                             "\n{}".format(json.dumps(failed_nodes, indent=4))
diff --git a/test_set/cvp-sanity/tests/test_ntp_sync.py b/test_set/cvp-sanity/tests/test_ntp_sync.py
index 6e35215..23a98a3 100644
--- a/test_set/cvp-sanity/tests/test_ntp_sync.py
+++ b/test_set/cvp-sanity/tests/test_ntp_sync.py
@@ -1,6 +1,8 @@
-import utils
+import json
 import os
 
+import utils
+
 
 def test_ntp_sync(local_salt_client):
     testname = os.path.basename(__file__).split('.')[0]
@@ -26,3 +28,38 @@
     assert not fail, 'SaltMaster time: {}\n' \
                      'Nodes with time mismatch:\n {}'.format(saltmaster_time,
                                                              fail)
+
+
+def test_ntp_peers_state(local_salt_client):
+    """Test gets ntpq peers state and check the system peer is declared"""
+
+    active_nodes = utils.get_active_nodes(os.path.basename(__file__))
+    state = local_salt_client.cmd(
+        utils.list_to_target_string(active_nodes, 'or'),
+        'cmd.run',
+        ['ntpq -pn'],
+        expr_form='compound')
+    final_result = {}
+    for node in state:
+        sys_peer_declared = False
+        ntpq_output = state[node].split('\n')
+        # if output has no 'remote' in the head of ntpq output
+        # the 'ntqp -np' command failed and cannot check peers
+        if 'remote' not in ntpq_output[0]:
+            final_result[node] = ntpq_output
+            continue
+
+        # take 3rd+ line of output (the actual peers)
+        try:
+            peers = ntpq_output[2:]
+        except IndexError:
+            final_result[node] = ntpq_output
+            continue
+        for p in peers:
+            if p.split()[0].startswith("*"):
+                sys_peer_declared = True
+        if not sys_peer_declared:
+            final_result[node] = ntpq_output
+    assert not final_result,\
+        "NTP peers state is not expected on some nodes, could not find " \
+        "declared system peer:\n{}".format(json.dumps(final_result, indent=4))
diff --git a/test_set/cvp-sanity/tests/test_packet_checker.py b/test_set/cvp-sanity/tests/test_packet_checker.py
index f76c339..e9448ee 100644
--- a/test_set/cvp-sanity/tests/test_packet_checker.py
+++ b/test_set/cvp-sanity/tests/test_packet_checker.py
@@ -1,6 +1,8 @@
 import pytest
 import json
 
+import utils
+
 
 def test_check_package_versions(local_salt_client, nodes_in_group):
     output = local_salt_client.cmd("L@"+','.join(nodes_in_group),
@@ -40,6 +42,27 @@
         json.dumps(pkts_data, indent=4))
 
 
+def test_packages_are_latest(local_salt_client, nodes_in_group):
+    config = utils.get_configuration()
+    skip = config.get("test_packages")["skip_test"]
+    if skip:
+        pytest.skip("Test for the latest packages is disabled")
+    skipped_pkg = config.get("test_packages")["skipped_packages"]
+    info_salt = local_salt_client.cmd(
+        'L@' + ','.join(nodes_in_group),
+        'cmd.run', ['apt list --upgradable 2>/dev/null | grep -v Listing'],
+        expr_form='compound')
+    for node in nodes_in_group:
+        result = []
+        if info_salt[node]:
+            upg_list = info_salt[node].split('\n')
+            for i in upg_list:
+                if i.split('/')[0] not in skipped_pkg:
+                    result.append(i)
+        assert not result, "Please check not latest packages at {}:\n{}".format(
+            node, "\n".join(result))
+
+
 def test_check_module_versions(local_salt_client, nodes_in_group):
     pre_check = local_salt_client.cmd(
         "L@"+','.join(nodes_in_group),
diff --git a/test_set/cvp-spt/tests/test_vm2vm.py b/test_set/cvp-spt/tests/test_vm2vm.py
index 7b3851b..4c2ec98 100644
--- a/test_set/cvp-spt/tests/test_vm2vm.py
+++ b/test_set/cvp-spt/tests/test_vm2vm.py
@@ -71,19 +71,19 @@
         
         transport1 = ssh.SSHTransport(vm_info[0]['fip'], 'ubuntu', password='dd', private_key=os_resources['keypair'].private_key)
 
-        result1 = transport1.exec_command('iperf -c {} | tail -n 1'.format(vm_info[1]['private_address']))
+        result1 = transport1.exec_command('iperf -c {} -t 60 | tail -n 1'.format(vm_info[1]['private_address']))
         print ' '.join(result1.split()[-2::])
         record_property("same {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result1.split()[-2::]))
-        result2 = transport1.exec_command('iperf -c {} | tail -n 1'.format(vm_info[2]['private_address']))
+        result2 = transport1.exec_command('iperf -c {} -t 60 | tail -n 1'.format(vm_info[2]['private_address']))
         print ' '.join(result2.split()[-2::])
         record_property("diff host {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result2.split()[-2::]))
-        result3 = transport1.exec_command('iperf -c {} -P 10 | tail -n 1'.format(vm_info[2]['private_address']))
+        result3 = transport1.exec_command('iperf -c {} -P 10 -t 60 | tail -n 1'.format(vm_info[2]['private_address']))
         print ' '.join(result3.split()[-2::])
         record_property("dif host 10 threads {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result3.split()[-2::]))
-        result4 = transport1.exec_command('iperf -c {} | tail -n 1'.format(vm_info[2]['fip']))
+        result4 = transport1.exec_command('iperf -c {} -t 60 | tail -n 1'.format(vm_info[2]['fip']))
         print ' '.join(result4.split()[-2::])
         record_property("diff host fip {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result4.split()[-2::]))
-        result5 = transport1.exec_command('iperf -c {} | tail -n 1'.format(vm_info[3]['private_address']))
+        result5 = transport1.exec_command('iperf -c {} -t 60 | tail -n 1'.format(vm_info[3]['private_address']))
         print ' '.join(result5.split()[-2::])
         record_property("diff host, diff net {0}-{1}".format(zone1[0],zone2[0]), ' '.join(result5.split()[-2::]))
 
diff --git a/test_set/cvp_sanity/tests/test_nodes.py b/test_set/cvp_sanity/tests/test_nodes.py
new file mode 100644
index 0000000..b33dc58
--- /dev/null
+++ b/test_set/cvp_sanity/tests/test_nodes.py
@@ -0,0 +1,35 @@
+import json
+import pytest
+
+from cvp_sanity import utils
+
+
+def test_nodes_deployed_in_maas(local_salt_client):
+    config = utils.get_configuration()
+    get_apis = local_salt_client.cmd(
+        'maas:cluster',
+        'cmd.run',
+        ['maas list'],
+        expr_form='pillar')
+    if not get_apis:
+        pytest.skip("Could not find MAAS on the environment")
+    profile = get_apis.values()[0].split(' ')[0]
+    get_nodes = local_salt_client.cmd('maas:cluster', 'cmd.run',
+                                      ['maas {} nodes read'.format(profile)],
+                                      expr_form='pillar')
+    result = ""
+    try:
+        result = json.loads(get_nodes.values()[0])
+    except Exception as e:
+        assert result, "Could not get nodes: {}\n{}".\
+            format(get_nodes.values()[0], e)
+
+    failed_nodes = []
+    for node in result:
+        if node["fqdn"] in config.get("skipped_nodes"):
+            continue
+        if "status_name" in node.keys():
+            if node["status_name"] != 'Deployed':
+                failed_nodes.append({node["fqdn"]: node["status_name"]})
+    assert not failed_nodes, "Some nodes have unexpected status in MAAS:" \
+                             "\n{}".format(failed_nodes)