Update error messages in sanity tests
Update error messages throughout sanity tests
to use the same (or similar) format of error message.
Change-Id: I4db0517a5b264151ddaa6ea2c2a04d37e139944e
Related-PROD: #PROD-32415
diff --git a/test_set/cvp-sanity/fixtures/base.py b/test_set/cvp-sanity/fixtures/base.py
index 822d908..560f906 100644
--- a/test_set/cvp-sanity/fixtures/base.py
+++ b/test_set/cvp-sanity/fixtures/base.py
@@ -29,7 +29,7 @@
nodes = utils.calculate_groups()
-@pytest.fixture(scope='session', params=nodes.values(), ids=nodes.keys())
+@pytest.fixture(scope='session', params=nodes.items(), ids=nodes.keys())
def nodes_in_group(request):
return request.param
diff --git a/test_set/cvp-sanity/tests/ceph/test_ceph_haproxy.py b/test_set/cvp-sanity/tests/ceph/test_ceph_haproxy.py
index 4d2566c..1dad16d 100644
--- a/test_set/cvp-sanity/tests/ceph/test_ceph_haproxy.py
+++ b/test_set/cvp-sanity/tests/ceph/test_ceph_haproxy.py
@@ -19,4 +19,4 @@
continue
else:
fail[name] = info
- assert not fail, "Failed monitors: {}".format(fail)
+ assert not fail, "Some Ceph monitors are in wrong state:\n{}".format(fail)
diff --git a/test_set/cvp-sanity/tests/ceph/test_ceph_pg_count.py b/test_set/cvp-sanity/tests/ceph/test_ceph_pg_count.py
index 28783e8..3911d27 100644
--- a/test_set/cvp-sanity/tests/ceph/test_ceph_pg_count.py
+++ b/test_set/cvp-sanity/tests/ceph/test_ceph_pg_count.py
@@ -86,9 +86,11 @@
if pools_pg_num[pool] < correct_pg_num:
wrong_pg_num_pools.append(pool)
- assert not pg_pgp_not_equal_pools, \
- "For pools {} PG and PGP are not equal " \
- "but should be".format(pg_pgp_not_equal_pools)
- assert not wrong_pg_num_pools, "For pools {} " \
- "PG number lower than Correct PG number, " \
- "but should be equal or higher".format(wrong_pg_num_pools)
+ assert not pg_pgp_not_equal_pools, (
+ "PG and PGP are not equal for the following pools:\n{}".format(
+ pg_pgp_not_equal_pools)
+ )
+ assert not wrong_pg_num_pools, (
+ "PG number is lower than Correct PG number (but should be equal or "
+ "higher) for the following pools:\n{}".format(wrong_pg_num_pools)
+ )
diff --git a/test_set/cvp-sanity/tests/ceph/test_ceph_replicas.py b/test_set/cvp-sanity/tests/ceph/test_ceph_replicas.py
index 4c93fe6..7d8c63a 100644
--- a/test_set/cvp-sanity/tests/ceph/test_ceph_replicas.py
+++ b/test_set/cvp-sanity/tests/ceph/test_ceph_replicas.py
@@ -39,5 +39,8 @@
if replica == 'size' and value < 3:
error.append(pool + " " + replica + " "
+ str(value) + " must be 3")
-
- assert not error, "Wrong pool replicas found\n{}".format(error)
+
+ assert not error, (
+ "There are wrong pool replicas for the following pools:\n{}".format(
+ error)
+ )
diff --git a/test_set/cvp-sanity/tests/ceph/test_ceph_status.py b/test_set/cvp-sanity/tests/ceph/test_ceph_status.py
index 0c0ef0c..2c32106 100644
--- a/test_set/cvp-sanity/tests/ceph/test_ceph_status.py
+++ b/test_set/cvp-sanity/tests/ceph/test_ceph_status.py
@@ -9,9 +9,8 @@
expr_form='pillar')
if not osd_fail:
pytest.skip("Ceph is not found on this environment")
- assert not osd_fail.values()[0], \
- "Some osds are in down state or ceph is not found".format(
- osd_fail.values()[0])
+ assert not osd_fail.values()[0], (
+ "Some OSDs are in down state:\n{}".format(osd_fail.values()[0]))
def test_ceph_health(local_salt_client):
@@ -32,5 +31,4 @@
else:
summary = status["summary"]
- assert health == "HEALTH_OK",\
- "Ceph status is not expected. {}".format(summary)
+ assert health == "HEALTH_OK", "Ceph is not healthy:\n{}".format(summary)
diff --git a/test_set/cvp-sanity/tests/ceph/test_ceph_tell_bench.py b/test_set/cvp-sanity/tests/ceph/test_ceph_tell_bench.py
index b275022..5dee40f 100644
--- a/test_set/cvp-sanity/tests/ceph/test_ceph_tell_bench.py
+++ b/test_set/cvp-sanity/tests/ceph/test_ceph_tell_bench.py
@@ -49,8 +49,8 @@
if math.fabs(mbps_avg - mbps) > 10:
result[osd] = osd_pool[osd]
- assert len(result) == 0, \
- "Performance of {0} OSD(s) lower " \
- "than AVG performance ({1} mbps), " \
- "please check Ceph for possible problems".format(
- json.dumps(result, indent=4), mbps_avg)
+ assert len(result) == 0, (
+ "Performance of {0} OSD(s) is lower than AVG performance ({1} mbps).\n"
+ "Please check Ceph for possible problems.".format(
+ json.dumps(result, indent=4), mbps_avg)
+ )
diff --git a/test_set/cvp-sanity/tests/test_cinder_services.py b/test_set/cvp-sanity/tests/test_cinder_services.py
index a612bca..9e13e36 100644
--- a/test_set/cvp-sanity/tests/test_cinder_services.py
+++ b/test_set/cvp-sanity/tests/test_cinder_services.py
@@ -13,8 +13,9 @@
service_down = local_salt_client.cmd_any(
tgt='keystone:server',
param='. /root/keystonercv3; cinder service-list | grep "down\|disabled"')
- assert service_down == '', \
- '''Some cinder services are in wrong state'''
+ assert service_down == '', (
+ "Some Cinder services are in wrong state:\n{}".format(service_down))
+
@pytest.mark.full
def test_cinder_services_has_all_backends(local_salt_client, check_cinder_backends):
@@ -30,7 +31,7 @@
tgt='keystone:server',
param='. /root/keystonercv3; cinder service-list | grep "volume" |grep -c -v -e "lvm"')
backends_num = len(backends_cinder.keys())
- assert cinder_volume == str(backends_num), \
- 'Number of cinder-volume services ({0}) does not match ' \
- 'number of volume backends ({1})'.format(
- cinder_volume, str(backends_num))
+ assert cinder_volume == str(backends_num), (
+ 'Number of cinder-volume services ({0}) does not match number of '
+ 'volume backends ({1}).'.format(cinder_volume, str(backends_num))
+ )
diff --git a/test_set/cvp-sanity/tests/test_contrail.py b/test_set/cvp-sanity/tests/test_contrail.py
index 2ea9bf4..716c864 100644
--- a/test_set/cvp-sanity/tests/test_contrail.py
+++ b/test_set/cvp-sanity/tests/test_contrail.py
@@ -36,9 +36,10 @@
node=node, service=name, status=status)
broken_services.append(err_msg)
- assert not broken_services, 'Broken services: {}'.format(json.dumps(
- broken_services,
- indent=4))
+ assert not broken_services, (
+ 'Some Contrail services are in wrong state on computes: {}'.format(
+ json.dumps(broken_services, indent=4))
+ )
@pytest.mark.smoke
def test_contrail_node_status(local_salt_client, check_openstack):
@@ -66,9 +67,10 @@
node=node, service=name, status=status)
broken_services.append(err_msg)
- assert not broken_services, 'Broken services: {}'.format(json.dumps(
- broken_services,
- indent=4))
+ assert not broken_services, (
+ 'Some Contrail services are in wrong state on Contrail controllers: '
+ '{}'.format(json.dumps(broken_services, indent=4))
+ )
@pytest.mark.smoke
def test_contrail_vrouter_count(local_salt_client, check_openstack):
diff --git a/test_set/cvp-sanity/tests/test_default_gateway.py b/test_set/cvp-sanity/tests/test_default_gateway.py
index c1a36da..259abba 100644
--- a/test_set/cvp-sanity/tests/test_default_gateway.py
+++ b/test_set/cvp-sanity/tests/test_default_gateway.py
@@ -4,8 +4,9 @@
@pytest.mark.full
def test_check_default_gateways(local_salt_client, nodes_in_group):
+ group, nodes = nodes_in_group
netstat_info = local_salt_client.cmd(
- tgt="L@"+','.join(nodes_in_group),
+ tgt="L@"+','.join(nodes),
param='ip r | sed -n 1p',
expr_form='compound')
@@ -20,7 +21,7 @@
else:
gateways[gateway].append(node)
- assert len(gateways.keys()) == 1, \
- "There were found few gateways: {gw}".format(
- gw=json.dumps(gateways, indent=4)
+ assert len(gateways.keys()) == 1, (
+ "There is a problem with default gateway for '{}' group of nodes:\n"
+ "{}".format(group, json.dumps(gateways, indent=4))
)
diff --git a/test_set/cvp-sanity/tests/test_drivetrain.py b/test_set/cvp-sanity/tests/test_drivetrain.py
index 5281081..6741380 100644
--- a/test_set/cvp-sanity/tests/test_drivetrain.py
+++ b/test_set/cvp-sanity/tests/test_drivetrain.py
@@ -130,8 +130,8 @@
finally:
# Delete test project
server.post("/projects/"+test_proj_name+"/deleteproject~delete")
- assert gerrit_error == '',\
- 'Something is wrong with Gerrit'.format(gerrit_error)
+ assert gerrit_error == '', (
+ 'There is an error during Gerrit operations:\n{}'.format(gerrit_error))
@pytest.mark.full
@@ -223,17 +223,13 @@
ldap_server.modify_s(admin_gr_dn,[(ldap.MOD_DELETE, 'memberUid', [test_user_name],)],)
ldap_server.delete_s(test_user)
ldap_server.unbind_s()
- assert ldap_error == '', \
- '''Something is wrong with connection to LDAP:
- {0}'''.format(e)
- assert jenkins_error == '', \
- '''Connection to Jenkins was not established:
- {0}'''.format(e)
- assert gerrit_error == '', \
- '''Connection to Gerrit was not established:
- {0}'''.format(e)
- assert ldap_result !=[], \
- '''Test user was not found'''
+ assert ldap_error == '', (
+ "There is an error with connection to LDAP:\n{}".format(e))
+ assert jenkins_error == '', (
+ "Connection to Jenkins is not established:\n{}".format(e))
+ assert gerrit_error == '', (
+ "Connection to Gerrit is not established:\n{}".format(e))
+ assert ldap_result != [], "Test user {} is not found".format(ldap_result)
@pytest.mark.sl_dup
@@ -260,10 +256,11 @@
if len(wrong_items) == 0:
break
else:
- logging.error('''Some DriveTrain services doesn't have expected number of replicas:
- {}\n'''.format(json.dumps(wrong_items, indent=4)))
time.sleep(5)
- assert len(wrong_items) == 0
+ assert len(wrong_items) == 0, (
+ "Some DriveTrain services don't have expected number of replicas:\n"
+ "{}".format(json.dumps(wrong_items, indent=4))
+ )
@pytest.mark.full
@@ -306,9 +303,10 @@
elif get_tag(image) != actual_images[im_name]:
mismatch[im_name] = 'has {actual} version instead of {expected}'.format(
actual=actual_images[im_name], expected=get_tag(image))
- assert len(mismatch) == 0, \
- '''Some DriveTrain components do not have expected versions:
- {}'''.format(json.dumps(mismatch, indent=4))
+ assert len(mismatch) == 0, (
+ "Some DriveTrain components do not have expected versions:\n{}".format(
+ json.dumps(mismatch, indent=4))
+ )
@pytest.mark.full
@@ -355,9 +353,10 @@
"Expected {2}".format(job_name,
actual_version,
expected_version))
- assert len(version_mismatch) == 0, \
- '''Some DriveTrain jobs have version/branch mismatch:
- {}'''.format(json.dumps(version_mismatch, indent=4))
+ assert len(version_mismatch) == 0, (
+ "Some DriveTrain jobs have version/branch mismatch:\n{}".format(
+ json.dumps(version_mismatch, indent=4))
+ )
@pytest.mark.full
@@ -399,6 +398,7 @@
job_result = server.get_build_info(jenkins_test_job, next_build_num)['result']
else:
pytest.skip("The job {0} was not found").format(jenkins_test_job)
- assert job_result == 'SUCCESS', \
- '''Test job '{0}' build was not successful or timeout is too small
- '''.format(jenkins_test_job)
+ assert job_result == 'SUCCESS', (
+ "Test job '{}' build is not successful or timeout is too "
+ "small.".format(jenkins_test_job)
+ )
diff --git a/test_set/cvp-sanity/tests/test_duplicate_ips.py b/test_set/cvp-sanity/tests/test_duplicate_ips.py
index 392838f..ba52f64 100644
--- a/test_set/cvp-sanity/tests/test_duplicate_ips.py
+++ b/test_set/cvp-sanity/tests/test_duplicate_ips.py
@@ -50,5 +50,5 @@
msg = ("\nDuplicate IP addresses found:\n{}"
"\n\nThe following interfaces are affected:\n{}"
- "".format(pformat(dup_ips), pformat(dup_ifaces)))
+ "".format(pformat(dup_ips), pformat(dup_ifaces)))
assert no_dups, msg
diff --git a/test_set/cvp-sanity/tests/test_etc_hosts.py b/test_set/cvp-sanity/tests/test_etc_hosts.py
index 61d36ef..bf881e7 100644
--- a/test_set/cvp-sanity/tests/test_etc_hosts.py
+++ b/test_set/cvp-sanity/tests/test_etc_hosts.py
@@ -21,4 +21,4 @@
result[node] = nd
assert len(result) <= 1, \
"Some hosts are not presented in /etc/hosts: {0}".format(
- json.dumps(result, indent=4))
\ No newline at end of file
+ json.dumps(result, indent=4))
diff --git a/test_set/cvp-sanity/tests/test_k8s.py b/test_set/cvp-sanity/tests/test_k8s.py
index 24b53e4..aac6edf 100644
--- a/test_set/cvp-sanity/tests/test_k8s.py
+++ b/test_set/cvp-sanity/tests/test_k8s.py
@@ -22,9 +22,8 @@
if 'Healthy' not in line:
errors.append(line)
break
- assert not errors, 'k8s is not healthy: {}'.format(json.dumps(
- errors,
- indent=4))
+ assert not errors, 'k8s is not healthy:\n{}'.format(
+ json.dumps(errors, indent=4))
@pytest.mark.xfail
@@ -46,9 +45,8 @@
if 'Ready' != line.split()[1]:
errors.append(line)
break
- assert not errors, 'k8s is not healthy: {}'.format(json.dumps(
- errors,
- indent=4))
+ assert not errors, 'k8s is not healthy:\n{}'.format(
+ json.dumps(errors, indent=4))
def test_k8s_get_calico_status(local_salt_client):
@@ -68,9 +66,8 @@
else:
if 'up' not in line or 'Established' not in line:
errors.append(line)
- assert not errors, 'Calico node status is not good: {}'.format(json.dumps(
- errors,
- indent=4))
+ assert not errors, 'Calico node status is not good:\n{}'.format(
+ json.dumps(errors, indent=4))
def test_k8s_cluster_status(local_salt_client):
@@ -90,9 +87,8 @@
if 'is running' not in line:
errors.append(line)
break
- assert not errors, 'k8s cluster info is not good: {}'.format(json.dumps(
- errors,
- indent=4))
+ assert not errors, 'k8s cluster info is not good:\n{}'.format(
+ json.dumps(errors, indent=4))
def test_k8s_kubelet_status(local_salt_client):
@@ -108,8 +104,8 @@
for node in result:
if not result[node]:
errors.append(node)
- assert not errors, 'Kublete is not running on these nodes: {}'.format(
- errors)
+ assert not errors, 'Kublete is not running on the nodes:\n{}'.format(
+ errors)
def test_k8s_check_system_pods_status(local_salt_client):
@@ -130,9 +126,8 @@
if 'Running' not in line:
errors.append(line)
break
- assert not errors, 'Some system pods are not running: {}'.format(json.dumps(
- errors,
- indent=4))
+ assert not errors, 'Some system pods are not running:\n{}'.format(
+ json.dumps(errors, indent=4))
def test_check_k8s_image_availability(local_salt_client):
@@ -173,8 +168,10 @@
tgt='etcd:server',
param='kubernetes:common:addons:dashboard:public_ip')
- assert external_ip, "Kubernetes dashboard public ip is not found in pillars"
- assert external_ip.__len__() > 0, "Kubernetes dashboard is enabled but not defined in pillars"
+ assert external_ip, (
+ "Kubernetes dashboard public ip is not found in pillars")
+ assert external_ip.__len__() > 0, (
+ "Kubernetes dashboard is enabled but not defined in pillars")
# dashboard port 8443 is hardcoded in kubernetes formula
url = "https://{}:8443".format(external_ip)
check = local_salt_client.cmd(
@@ -182,6 +179,7 @@
param='curl {} 2>&1 | grep kubernetesDashboard'.format(url),
expr_form='pillar'
)
- assert len(check.values()[0]) != 0, \
- 'Kubernetes dashboard is not reachable on {} ' \
- 'from ctl nodes'.format(url)
+ assert len(check.values()[0]) != 0, (
+ 'Kubernetes dashboard is not reachable on {} from '
+ 'ctl nodes'.format(url)
+ )
diff --git a/test_set/cvp-sanity/tests/test_kernel_settings.py b/test_set/cvp-sanity/tests/test_kernel_settings.py
index fa1d946..f13f685 100644
--- a/test_set/cvp-sanity/tests/test_kernel_settings.py
+++ b/test_set/cvp-sanity/tests/test_kernel_settings.py
@@ -10,6 +10,7 @@
# Compare that value in sysctl equals to the same value in pillars
"""
+
def normalize_value(value_in_string):
"""
Changes to INT if value_in_string is parcible to int
@@ -29,8 +30,9 @@
return value_in_string
issues = dict()
+ group, nodes = nodes_in_group
expected_kernel_params_by_nodes = local_salt_client.cmd(
- tgt="L@"+','.join(nodes_in_group),
+ tgt="L@"+','.join(nodes),
fun='pillar.get',
param="linux:system:kernel:sysctl",
expr_form='compound'
@@ -65,6 +67,8 @@
if differences.__len__() > 0:
issues[node] = differences
- assert issues.__len__() == 0, json.dumps(issues, indent=4)
-
-
+ assert issues.__len__() == 0, (
+ "There are inconsistencies between kernel settings defined in pillars "
+ "and actual settings on nodes of '{}' group: {}".format(
+ group, json.dumps(issues, indent=4))
+ )
diff --git a/test_set/cvp-sanity/tests/test_mounts.py b/test_set/cvp-sanity/tests/test_mounts.py
index dfb67e9..cc6f201 100644
--- a/test_set/cvp-sanity/tests/test_mounts.py
+++ b/test_set/cvp-sanity/tests/test_mounts.py
@@ -9,8 +9,9 @@
# Get all mount points from each node in the group with the next command: `df -h | awk '{print $1}'`
# Check that all mount points are similar for each node in the group
"""
+ group, nodes = nodes_in_group
exclude_mounts = 'grep -v "overlay\|tmpfs\|shm\|Filesystem"'
- mounts_by_nodes = local_salt_client.cmd(tgt="L@"+','.join(nodes_in_group),
+ mounts_by_nodes = local_salt_client.cmd(tgt="L@"+','.join(nodes),
param="df -h | awk '{print $1}'" +
" |" + exclude_mounts,
expr_form='compound')
@@ -46,6 +47,7 @@
if not result:
pytest.skip("These nodes are skipped")
- assert len(set(result.values())) == 1,\
- "The nodes in the same group have different mounts:\n{}".format(
- json.dumps(pretty_result, indent=4))
+ assert len(set(result.values())) == 1, (
+ "Nodes in '{}' group have different mounts:\n{}".format(
+ group, json.dumps(pretty_result, indent=4))
+ )
diff --git a/test_set/cvp-sanity/tests/test_mtu.py b/test_set/cvp-sanity/tests/test_mtu.py
index 8dc4a79..7af3303 100644
--- a/test_set/cvp-sanity/tests/test_mtu.py
+++ b/test_set/cvp-sanity/tests/test_mtu.py
@@ -12,9 +12,10 @@
skipped_ifaces = config.get(testname)["skipped_ifaces"] or \
["bonding_masters", "lo", "veth", "tap", "cali", "qv", "qb", "br-int",
"vxlan", "virbr0", "virbr0-nic", "docker0", "o-hm0"]
+ group, nodes = nodes_in_group
total = {}
network_info = local_salt_client.cmd(
- tgt="L@"+','.join(nodes_in_group),
+ tgt="L@"+','.join(nodes),
param='ls /sys/class/net/',
expr_form='compound')
@@ -75,6 +76,7 @@
row.sort()
row.insert(0, interf)
mtu_data.append(row)
- assert len(mtu_data) == 0, \
- "Several problems found: {0}".format(
- json.dumps(mtu_data, indent=4))
\ No newline at end of file
+ assert len(mtu_data) == 0, (
+ "Non-uniform MTUs are set on the same node interfaces of '{}' group "
+ "of nodes: {}".format(group, json.dumps(mtu_data, indent=4))
+ )
\ No newline at end of file
diff --git a/test_set/cvp-sanity/tests/test_nodes.py b/test_set/cvp-sanity/tests/test_nodes.py
index 6ab48df..082930e 100644
--- a/test_set/cvp-sanity/tests/test_nodes.py
+++ b/test_set/cvp-sanity/tests/test_nodes.py
@@ -16,5 +16,5 @@
pytest.fail(
"Could not check the result: {}\n"
"Nodes status result: {}".format(e, result))
- assert not statuses["down"], "Some minions are down:\n {}".format(
+ assert not statuses["down"], "Some minions are down:\n{}".format(
statuses["down"])
diff --git a/test_set/cvp-sanity/tests/test_nodes_in_maas.py b/test_set/cvp-sanity/tests/test_nodes_in_maas.py
index 74ec865..62c0fd8 100644
--- a/test_set/cvp-sanity/tests/test_nodes_in_maas.py
+++ b/test_set/cvp-sanity/tests/test_nodes_in_maas.py
@@ -55,8 +55,7 @@
try:
result = json.loads(get_nodes.values()[0])
except ValueError as e:
- assert result, "Could not get nodes: {}\n{}". \
- format(get_nodes, e)
+ assert result, "Could not get '{}' nodes:\n{}".format(get_nodes, e)
# 5. Check all nodes are in Deployed status
failed_nodes = []
@@ -66,5 +65,7 @@
if "status_name" in node.keys():
if node["status_name"] != 'Deployed':
failed_nodes.append({node["fqdn"]: node["status_name"]})
- assert not failed_nodes, "Some nodes have unexpected status in MAAS:" \
- "\n{}".format(json.dumps(failed_nodes, indent=4))
+ assert not failed_nodes, (
+ "Some nodes have unexpected status in MAAS:\n{}".format(
+ json.dumps(failed_nodes, indent=4))
+ )
diff --git a/test_set/cvp-sanity/tests/test_nova_services.py b/test_set/cvp-sanity/tests/test_nova_services.py
index 54ed76e..a6fe959 100644
--- a/test_set/cvp-sanity/tests/test_nova_services.py
+++ b/test_set/cvp-sanity/tests/test_nova_services.py
@@ -10,8 +10,8 @@
param='. /root/keystonercv3;'
'nova service-list | grep "down\|disabled" | grep -v "Forced down"')
- assert result == '', \
- '''Some nova services are in wrong state'''
+ assert result == '', (
+ "Some Nova services are in wrong state:\n{}".format(result))
@pytest.mark.smoke
@@ -34,9 +34,11 @@
param='. /root/keystonercv3;'
'openstack hypervisor list | egrep -v "\-----|ID" | wc -l')
- assert all_cmp_services == hypervisors, \
- "Number of nova-compute services ({}) does not match number of " \
+ assert all_cmp_services == hypervisors, (
+ "Number of nova-compute services ({}) does not match number of "
"hypervisors ({}).".format(all_cmp_services, hypervisors)
- assert enabled_cmp_services == hosts, \
- "Number of enabled nova-compute services ({}) does not match number \
- of hosts ({}).".format(enabled_cmp_services, hosts)
+ )
+ assert enabled_cmp_services == hosts, (
+ "Number of enabled nova-compute services ({}) does not match number "
+ "of hosts ({}).".format(enabled_cmp_services, hosts)
+ )
diff --git a/test_set/cvp-sanity/tests/test_ntp_sync.py b/test_set/cvp-sanity/tests/test_ntp_sync.py
index f2bbe8f..998aa31 100644
--- a/test_set/cvp-sanity/tests/test_ntp_sync.py
+++ b/test_set/cvp-sanity/tests/test_ntp_sync.py
@@ -26,8 +26,10 @@
result[time].sort()
else:
result[time] = [node]
- assert len(result) <= 1, 'Not all nodes have the same time:\n {}'.format(
- json.dumps(result, indent=4))
+ assert len(result) <= 1, (
+ 'Time is out of sync on the following nodes:\n{}'.format(
+ json.dumps(result, indent=4))
+ )
@pytest.mark.smoke
@@ -62,6 +64,7 @@
sys_peer_declared = True
if not sys_peer_declared:
final_result[node] = ntpq_output
- assert not final_result,\
- "NTP peers state is not expected on some nodes, could not find " \
+ assert not final_result, (
+ "NTP peers state is not as expected on some nodes; could not find "
"declared system peer:\n{}".format(json.dumps(final_result, indent=4))
+ )
diff --git a/test_set/cvp-sanity/tests/test_oss.py b/test_set/cvp-sanity/tests/test_oss.py
index 469323e..f28917c 100644
--- a/test_set/cvp-sanity/tests/test_oss.py
+++ b/test_set/cvp-sanity/tests/test_oss.py
@@ -39,6 +39,7 @@
if row['svname'] != 'FRONTEND' and row['status'] != 'UP':
wrong_data.append(info)
- assert len(wrong_data) == 0, \
- '''Some haproxy services are in wrong state
- {}'''.format(json.dumps(wrong_data, indent=4))
+ assert len(wrong_data) == 0, (
+ "Some haproxy services are in wrong state:\n{}".format(
+ json.dumps(wrong_data, indent=4))
+ )
diff --git a/test_set/cvp-sanity/tests/test_packet_checker.py b/test_set/cvp-sanity/tests/test_packet_checker.py
index b7c2064..c132294 100644
--- a/test_set/cvp-sanity/tests/test_packet_checker.py
+++ b/test_set/cvp-sanity/tests/test_packet_checker.py
@@ -26,7 +26,8 @@
# defines packages specific to the concrete nodes
inconsistency_rule = {"kvm03": ["rsync", "sysstat", "xz-utils"], "log01": ["python-elasticsearch"], "ctl01": ["python-gnocchiclient", "python-ujson"]}
exclude_packages = utils.get_configuration().get("skipped_packages", [])
- packages_versions = local_salt_client.cmd(tgt="L@"+','.join(nodes_in_group),
+ group, nodes = nodes_in_group
+ packages_versions = local_salt_client.cmd(tgt="L@"+','.join(nodes),
fun='lowpkg.list_pkgs',
expr_form='compound')
# Let's exclude cid01 and dbs01 nodes from this check
@@ -38,8 +39,8 @@
cluster_domain = local_salt_client.pillar_get(
param='_param:cluster_domain') or '.local'
gtw01 += '.' + cluster_domain
- if gtw01 in nodes_in_group:
- octavia = local_salt_client.cmd(tgt="L@" + ','.join(nodes_in_group),
+ if gtw01 in nodes:
+ octavia = local_salt_client.cmd(tgt="L@" + ','.join(nodes),
fun='pillar.get',
param='octavia:manager:enabled',
expr_form='compound')
@@ -48,7 +49,7 @@
exclude_nodes.append(gtw01)
logging.info("gtw01 node is skipped in test_check_package_versions")
- total_nodes = [i for i in nodes_in_group if i not in exclude_nodes]
+ total_nodes = [i for i in nodes if i not in exclude_nodes]
if len(total_nodes) < 2:
pytest.skip("Nothing to compare - only 1 node")
nodes_with_packages = []
@@ -77,13 +78,15 @@
row.append("{}: No package".format(node))
if diff.count(diff[0]) < len(nodes_with_packages):
- if not is_deb_in_exception(inconsistency_rule, deb, row):
+ if not is_deb_in_exception(inconsistency_rule, deb, row):
row.sort()
row.insert(0, deb)
packages_with_different_versions.append(row)
- assert len(packages_with_different_versions) == 0, \
- "Several problems found: {0}".format(
- json.dumps(packages_with_different_versions, indent=4))
+ assert len(packages_with_different_versions) == 0, (
+ "Non-uniform package versions are installed on '{}' group of nodes:\n"
+ "{}".format(
+ group, json.dumps(packages_with_different_versions, indent=4))
+ )
@pytest.mark.full
@@ -93,19 +96,22 @@
if skip:
pytest.skip("Test for the latest packages is disabled")
skipped_pkg = config.get("test_packages")["skipped_packages"]
+ group, nodes = nodes_in_group
info_salt = local_salt_client.cmd(
- tgt='L@' + ','.join(nodes_in_group),
+ tgt='L@' + ','.join(nodes),
param='apt list --upgradable 2>/dev/null | grep -v Listing',
expr_form='compound')
- for node in nodes_in_group:
+ for node in nodes:
result = []
if info_salt[node]:
upg_list = info_salt[node].split('\n')
for i in upg_list:
if i.split('/')[0] not in skipped_pkg:
result.append(i)
- assert not result, "Please check not latest packages at {}:\n{}".format(
- node, "\n".join(result))
+ assert not result, (
+ "Packages are not of latest version on '{}' node:\n{}".format(
+ node, "\n".join(result))
+ )
@pytest.mark.full
@@ -113,8 +119,9 @@
# defines modules specific to the concrete nodes
inconsistency_rule = {"ctl01": ["gnocchiclient", "ujson"], "log01": ["elasticsearch"]}
exclude_modules = utils.get_configuration().get("skipped_modules", [])
+ group, nodes = nodes_in_group
pre_check = local_salt_client.cmd(
- tgt="L@"+','.join(nodes_in_group),
+ tgt="L@"+','.join(nodes),
param='dpkg -l | grep "python-pip "',
expr_form='compound')
if pre_check.values().count('') > 0:
@@ -129,8 +136,8 @@
cluster_domain = local_salt_client.pillar_get(
param='_param:cluster_domain') or '.local'
gtw01 += '.' + cluster_domain
- if gtw01 in nodes_in_group:
- octavia = local_salt_client.cmd(tgt="L@" + ','.join(nodes_in_group),
+ if gtw01 in nodes:
+ octavia = local_salt_client.cmd(tgt="L@" + ','.join(nodes),
fun='pillar.get',
param='octavia:manager:enabled',
expr_form='compound')
@@ -143,8 +150,9 @@
if len(total_nodes) < 2:
pytest.skip("Nothing to compare - only 1 node")
- list_of_pip_packages = local_salt_client.cmd(tgt="L@"+','.join(nodes_in_group),
- fun='pip.freeze', expr_form='compound')
+ list_of_pip_packages = local_salt_client.cmd(
+ tgt="L@"+','.join(nodes),
+ fun='pip.freeze', expr_form='compound')
nodes_with_packages = []
@@ -172,7 +180,8 @@
row.sort()
row.insert(0, deb)
modules_with_different_versions.append(row)
- assert len(modules_with_different_versions) == 0, \
- "Several problems found: {0}".format(
- json.dumps(modules_with_different_versions, indent=4))
-
+ assert len(modules_with_different_versions) == 0, (
+ "Non-uniform pip modules are installed on '{}' group of nodes:\n"
+ "{}".format(
+ group, json.dumps(modules_with_different_versions, indent=4))
+ )
diff --git a/test_set/cvp-sanity/tests/test_rabbit_cluster.py b/test_set/cvp-sanity/tests/test_rabbit_cluster.py
index d0bb44b..f90c54f 100644
--- a/test_set/cvp-sanity/tests/test_rabbit_cluster.py
+++ b/test_set/cvp-sanity/tests/test_rabbit_cluster.py
@@ -44,9 +44,9 @@
if required_cluster_size_dict[node] != running_nodes_count:
control_dict.update({node: running_nodes_count})
- assert not len(control_dict), "Inconsistency found within cloud. " \
- "RabbitMQ cluster is probably broken, " \
- "the cluster size for each node " \
- "should be: {} but the following " \
- "nodes has other values: {}".format(
- len(required_cluster_size_dict.keys()), control_dict)
+ assert not len(control_dict), (
+ "RabbitMQ cluster is probably "
+ "broken - the cluster size for each node should be ({}),\nbut the "
+ "following nodes have other values:\n{}".format(
+ len(required_cluster_size_dict.keys()), control_dict)
+ )
diff --git a/test_set/cvp-sanity/tests/test_repo_list.py b/test_set/cvp-sanity/tests/test_repo_list.py
index 92c45c0..43fc0e6 100644
--- a/test_set/cvp-sanity/tests/test_repo_list.py
+++ b/test_set/cvp-sanity/tests/test_repo_list.py
@@ -6,8 +6,8 @@
@pytest.mark.full
def test_list_of_repo_on_nodes(local_salt_client, nodes_in_group):
# TODO: pillar.get
- info_salt = local_salt_client.cmd(tgt='L@' + ','.join(
- nodes_in_group),
+ group, nodes = nodes_in_group
+ info_salt = local_salt_client.cmd(tgt='L@' + ','.join(nodes),
fun='pillar.get',
param='linux:system:repo',
expr_form='compound')
@@ -26,8 +26,7 @@
repos.pop(repo)
raw_actual_info = local_salt_client.cmd(
- tgt='L@' + ','.join(
- nodes_in_group),
+ tgt='L@' + ','.join(nodes),
param='cat /etc/apt/sources.list.d/*;'
'cat /etc/apt/sources.list|grep deb|grep -v "#"',
expr_form='compound', check_status=True)
@@ -57,8 +56,9 @@
rows.append("{}: {}".format("config", "+"))
rows.append("{}: No repo".format('pillars'))
diff[repo] = rows
- assert fail_counter == 0, \
- "Several problems found: {0}".format(
- json.dumps(diff, indent=4))
+ assert fail_counter == 0, (
+ "Non-uniform repos are on '{}' group of nodes:\n{}".format(
+ group, json.dumps(diff, indent=4))
+ )
if fail_counter == 0 and len(diff) > 0:
logging.warning("\nWarning: nodes contain more repos than reclass")
diff --git a/test_set/cvp-sanity/tests/test_salt_master.py b/test_set/cvp-sanity/tests/test_salt_master.py
index 75a0116..bb1b65c 100644
--- a/test_set/cvp-sanity/tests/test_salt_master.py
+++ b/test_set/cvp-sanity/tests/test_salt_master.py
@@ -9,8 +9,10 @@
tgt='salt:master',
param='cd /srv/salt/reclass/classes/cluster/; git status',
expr_form='pillar', check_status=True)
- assert 'nothing to commit' in git_status.values()[0], 'Git status showed' \
- ' some unmerged changes {}'''.format(git_status.values()[0])
+ assert 'nothing to commit' in git_status.values()[0], (
+ "Git status shows some unmerged changes:\n{}".format(
+ git_status.values()[0])
+ )
@pytest.mark.smoke
@@ -21,8 +23,7 @@
expr_form='pillar', check_status=True)
result = reclass[reclass.keys()[0]][-1]
- assert result == '0', 'Reclass is broken' \
- '\n {}'.format(reclass)
+ assert result == '0', 'Reclass is broken:\n{}'.format(reclass)
@pytest.mark.smoke
@@ -43,5 +44,7 @@
' reclass-salt --top output'
reclass_nodes = sorted(json.loads(reclass_nodes.strip("}")).keys())
salt_nodes = sorted([x for xs in json.loads(salt).values() for x in xs])
- assert salt_nodes == reclass_nodes, 'Mismatch between registered salt nodes (left) ' \
- 'and defined reclass nodes (right)'
+ assert salt_nodes == reclass_nodes, (
+ "Mismatch between registered salt nodes (left) and node defined in "
+ "reclass (right)."
+ )
diff --git a/test_set/cvp-sanity/tests/test_services.py b/test_set/cvp-sanity/tests/test_services.py
index 0267af6..a014d85 100644
--- a/test_set/cvp-sanity/tests/test_services.py
+++ b/test_set/cvp-sanity/tests/test_services.py
@@ -17,7 +17,8 @@
Inconsistent services will be checked with another test case
"""
exclude_services = utils.get_configuration().get("skipped_services", [])
- services_by_nodes = local_salt_client.cmd(tgt="L@"+','.join(nodes_in_group),
+ group, nodes = nodes_in_group
+ services_by_nodes = local_salt_client.cmd(tgt="L@"+','.join(nodes),
fun='service.get_all',
expr_form='compound')
@@ -30,8 +31,8 @@
cluster_domain = local_salt_client.pillar_get(
param='_param:cluster_domain') or '.local'
gtw01 += '.' + cluster_domain
- if gtw01 in nodes_in_group:
- octavia = local_salt_client.cmd(tgt="L@" + ','.join(nodes_in_group),
+ if gtw01 in nodes:
+ octavia = local_salt_client.cmd(tgt="L@" + ','.join(nodes),
fun='pillar.get',
param='octavia:manager:enabled',
expr_form='compound')
@@ -47,7 +48,7 @@
for node in services_by_nodes:
if not services_by_nodes[node]:
# TODO: do not skip node
- logging.info("Node {} is skipped".format (node))
+ logging.info("Node {} is skipped".format(node))
continue
nodes.append(node)
all_services.update(services_by_nodes[node])
@@ -72,9 +73,10 @@
report.sort()
report.insert(0, srv)
pkts_data.append(report)
- assert len(pkts_data) == 0, \
- "Several problems found: {0}".format(
- json.dumps(pkts_data, indent=4))
+ assert len(pkts_data) == 0, (
+ "Non-uniform services are running on '{}' group of nodes:\n{}".format(
+ group, json.dumps(pkts_data, indent=4))
+ )
# TODO : remake this test to make workable https://mirantis.jira.com/browse/PROD-25958
diff --git a/test_set/cvp-sanity/tests/test_single_vip.py b/test_set/cvp-sanity/tests/test_single_vip.py
index cee92ec..f0e7d68 100644
--- a/test_set/cvp-sanity/tests/test_single_vip.py
+++ b/test_set/cvp-sanity/tests/test_single_vip.py
@@ -32,5 +32,7 @@
no_vip[group] = 'No vip found'
else:
no_vip[group] = nodes_list
- assert len(no_vip) < 1, "Some groups of nodes have problem with vip " \
- "\n{}".format(json.dumps(no_vip, indent=4))
+ assert len(no_vip) < 1, (
+ "The following group(s) of nodes have problem with vip:\n{}".format(
+ json.dumps(no_vip, indent=4))
+ )
diff --git a/test_set/cvp-sanity/tests/test_stacklight.py b/test_set/cvp-sanity/tests/test_stacklight.py
index 7340951..03b5a42 100644
--- a/test_set/cvp-sanity/tests/test_stacklight.py
+++ b/test_set/cvp-sanity/tests/test_stacklight.py
@@ -18,26 +18,23 @@
proxies = {"http": None, "https": None}
IP = salt_output
- assert requests.get('{0}://{1}:9200/'.format(proto, IP),
- proxies=proxies, verify=False).status_code == 200, \
- 'Cannot check elasticsearch url on {}.'.format(IP)
- resp = requests.get('{0}://{1}:9200/_cat/health'.format(proto, IP),
- proxies=proxies, verify=False).content
- assert resp.split()[3] == 'green', \
- 'elasticsearch status is not good {}'.format(
- json.dumps(resp, indent=4))
- assert resp.split()[4] == '3', \
- 'elasticsearch status is not good {}'.format(
- json.dumps(resp, indent=4))
- assert resp.split()[5] == '3', \
- 'elasticsearch status is not good {}'.format(
- json.dumps(resp, indent=4))
- assert resp.split()[10] == '0', \
- 'elasticsearch status is not good {}'.format(
- json.dumps(resp, indent=4))
- assert resp.split()[13] == '100.0%', \
- 'elasticsearch status is not good {}'.format(
- json.dumps(resp, indent=4))
+ response = requests.get(
+ '{0}://{1}:9200/'.format(proto, IP),
+ proxies=proxies,
+ verify=False)
+ assert response.status_code == 200, (
+ "Issues with accessing elasticsearch on {}.".format(IP))
+ response = requests.get(
+ '{0}://{1}:9200/_cat/health'.format(proto, IP),
+ proxies=proxies,
+ verify=False).content
+ msg = "elasticsearch is not healthy:\n{}".format(
+ json.dumps(response, indent=4))
+ assert response.split()[3] == 'green',msg
+ assert response.split()[4] == '3', msg
+ assert response.split()[5] == '3', msg
+ assert response.split()[10] == '0', msg
+ assert response.split()[13] == '100.0%', msg
@pytest.mark.sl_dup
@@ -52,15 +49,18 @@
param='haproxy:proxy:listen:kibana:binds:ssl:enabled')
proto = "https" if ssl else "http"
- resp = requests.get('{0}://{1}:5601/api/status'.format(proto, IP),
- proxies=proxies, verify=False).content
- body = json.loads(resp)
- assert body['status']['overall']['state'] == "green", \
- "Kibana status is not expected: {}".format(
- body['status']['overall'])
+ response = requests.get(
+ '{0}://{1}:5601/api/status'.format(proto, IP),
+ proxies=proxies,
+ verify=False).content
+ body = json.loads(response)
+ assert body['status']['overall']['state'] == "green", (
+ "Kibana overall status is not 'green':\n{}".format(
+ body['status']['overall'])
+ )
for i in body['status']['statuses']:
- assert i['state'] == "green", \
- "Kibana statuses are unexpected: {}".format(i)
+ assert i['state'] == "green", (
+ "Kibana statuses are unexpected:\n{}".format(i))
@pytest.mark.smoke
@@ -89,10 +89,12 @@
'{0}://{1}:9200/log-{2}/_search?pretty'.format(proto, IP, today),
proxies=proxies,
headers=headers,
- verify = False,
+ verify=False,
data=data)
- assert 200 == response.status_code, 'Unexpected code {}'.format(
- response.text)
+ assert response.status_code == 200, (
+ 'Issues with accessing elasticsearch on {}:\n{}'.format(
+ IP, response.text)
+ )
resp = json.loads(response.text)
cluster_domain = local_salt_client.pillar_get(param='_param:cluster_domain')
monitored_nodes = []
@@ -104,10 +106,11 @@
for node in all_nodes:
if node not in monitored_nodes:
missing_nodes.append(node)
- assert len(missing_nodes) == 0, \
- 'Not all nodes are in Elasticsearch. Found {0} keys, ' \
- 'expected {1}. Missing nodes: \n{2}'. \
- format(len(monitored_nodes), len(all_nodes), missing_nodes)
+ assert len(missing_nodes) == 0, (
+ "Not all nodes are in Elasticsearch. Expected {}, but found {} keys.\n"
+ "Missing nodes:\n{}".format(
+ len(monitored_nodes), len(all_nodes), missing_nodes)
+ )
@pytest.mark.sl_dup
@@ -130,9 +133,10 @@
if line[line.find('/') - 1] != line[line.find('/') + 1] \
and 'replicated' in line:
wrong_items.append(line)
- assert len(wrong_items) == 0, \
- '''Some monitoring services doesn't have expected number of replicas:
- {}'''.format(json.dumps(wrong_items, indent=4))
+ assert len(wrong_items) == 0, (
+ "Some monitoring services don't have the expected number of "
+ "replicas:\n{}".format(json.dumps(wrong_items, indent=4))
+ )
@pytest.mark.smoke
@@ -152,8 +156,8 @@
result = nodes_info[nodes_info.keys()[0]].replace('</td>', '').replace(
'<td><i class="icon-chevron-down"></i> <b>', '').replace('</b>', '')
- assert result == '', 'AlertManager page has some alerts! {}'.format(
- json.dumps(result), indent=4)
+ assert result == '', 'AlertManager page has some alerts!\n{}'.format(
+ json.dumps(result), indent=4)
@pytest.mark.sl_dup
@@ -166,8 +170,8 @@
expr_form='compound')
if not salt_output:
- pytest.skip("docker:swarm:role:master or prometheus:server \
- pillars are not found on this environment.")
+ pytest.skip("docker:swarm:role:master or prometheus:server pillars "
+ "are not found on this environment.")
result = {}
# for old reclass models, docker:swarm:role:master can return
@@ -185,9 +189,10 @@
if line.split()[4 + shift] == 'Running' \
or line.split()[4 + shift] == 'Ready':
result[line.split()[1 + shift]] = 'OK'
- assert 'NOT OK' not in result.values(), \
- '''Some containers are in incorrect state:
- {}'''.format(json.dumps(result, indent=4))
+ assert 'NOT OK' not in result.values(), (
+ "Some containers have incorrect state:\n{}".format(
+ json.dumps(result, indent=4))
+ )
@pytest.mark.sl_dup
@@ -200,14 +205,16 @@
expr_form='pillar',)
if not salt_output:
- pytest.skip("Telegraf or telegraf:agent \
- pillar are not found on this environment.")
+ pytest.skip("Telegraf or telegraf:agent pillars are not found on "
+ "this environment.")
result = [{node: status} for node, status
in salt_output.items()
if status is False]
- assert result == [], 'Telegraf service is not running ' \
- 'on following nodes: {}'.format(result)
+ assert result == [], (
+ "Telegraf service is not running on the following nodes:\n{}".format(
+ result)
+ )
@pytest.mark.sl_dup
@@ -221,5 +228,7 @@
result = [{node: status} for node, status
in salt_output.items()
if status is False]
- assert result == [], 'Fluentd check failed: td-agent service is not ' \
- 'running on following nodes:'.format(result)
+ assert result == [], (
+ "Fluentd check failed - td-agent service is not running on the "
+ "following nodes:\n{}".format(result)
+ )
diff --git a/test_set/cvp-sanity/tests/test_ui_addresses.py b/test_set/cvp-sanity/tests/test_ui_addresses.py
index d8e6a32..5e6ccdb 100644
--- a/test_set/cvp-sanity/tests/test_ui_addresses.py
+++ b/test_set/cvp-sanity/tests/test_ui_addresses.py
@@ -16,9 +16,8 @@
param='curl -k {0}://{1}/auth/login/ 2>&1 | \
grep Login'.format(proto, IP),
expr_form='pillar')
- assert len(result) != 0, \
- 'Horizon login page is not reachable on {} from ctl nodes'.format(
- IP[0])
+ assert len(result) != 0, (
+ 'Horizon login page is not reachable on {} from ctl nodes.'.format(IP))
@pytest.mark.smoke
@@ -31,11 +30,12 @@
url = "{}://{}:{}/v3".format(proto, IP, port)
result = local_salt_client.cmd(
tgt=ctl_nodes_pillar,
- param='curl -k {}/ 2>&1 | \
- grep stable'.format(url),
+ param='curl -k {}/ 2>&1 | grep stable'.format(url),
expr_form='pillar')
- assert len(result[result.keys()[0]]) != 0, \
- 'Public Openstack url is not reachable on {} from ctl nodes'.format(url)
+ assert len(result[result.keys()[0]]) != 0, (
+ 'Public Openstack url is not reachable on {} from ctl '
+ 'nodes'.format(url)
+ )
@pytest.mark.sl_dup
@@ -52,12 +52,12 @@
url = "{}://{}:{}".format(proto, IP, port)
result = local_salt_client.cmd(
tgt=ctl_nodes_pillar,
- param='curl -k {}/app/kibana 2>&1 | \
- grep loading'.format(url),
+ param='curl -k {}/app/kibana 2>&1 | grep loading'.format(url),
expr_form='pillar')
- assert len(result[result.keys()[0]]) != 0, \
- 'Internal Kibana login page is not reachable on {} ' \
- 'from ctl nodes'.format(url)
+ assert len(result[result.keys()[0]]) != 0, (
+ 'Internal Kibana login page is not reachable on {} from ctl '
+ 'nodes'.format(url)
+ )
@pytest.mark.smoke
@@ -70,12 +70,12 @@
url = "{}://{}:{}".format(proto, IP, port)
result = local_salt_client.cmd(
tgt=ctl_nodes_pillar,
- param='curl -k {}/app/kibana 2>&1 | \
- grep loading'.format(url),
+ param='curl -k {}/app/kibana 2>&1 | grep loading'.format(url),
expr_form='pillar')
- assert len(result[result.keys()[0]]) != 0, \
- 'Public Kibana login page is not reachable on {} ' \
- 'from ctl nodes'.format(url)
+ assert len(result[result.keys()[0]]) != 0, (
+ 'Public Kibana login page is not reachable on {} from ctl '
+ 'nodes'.format(url)
+ )
@pytest.mark.sl_dup
@@ -89,12 +89,12 @@
url = "{}://{}:{}".format(protocol, IP, port)
result = local_salt_client.cmd(
tgt=ctl_nodes_pillar,
- param='curl {}/graph 2>&1 | \
- grep Prometheus'.format(url),
+ param='curl {}/graph 2>&1 | grep Prometheus'.format(url),
expr_form='pillar')
- assert len(result[result.keys()[0]]) != 0, \
- 'Internal Prometheus page is not reachable on {} ' \
- 'from ctl nodes'.format(url)
+ assert len(result[result.keys()[0]]) != 0, (
+ 'Internal Prometheus page is not reachable on {} from ctl '
+ 'nodes'.format(url)
+ )
@pytest.mark.smoke
@@ -107,12 +107,12 @@
url = "{}://{}:{}".format(proto, IP, port)
result = local_salt_client.cmd(
tgt=ctl_nodes_pillar,
- param='curl -k {}/graph 2>&1 | \
- grep Prometheus'.format(url),
+ param='curl -k {}/graph 2>&1 | grep Prometheus'.format(url),
expr_form='pillar')
- assert len(result[result.keys()[0]]) != 0, \
- 'Public Prometheus page is not reachable on {} ' \
- 'from ctl nodes'.format(url)
+ assert len(result[result.keys()[0]]) != 0, (
+ 'Public Prometheus page is not reachable on {} from ctl '
+ 'nodes'.format(url)
+ )
@pytest.mark.sl_dup
@@ -128,9 +128,10 @@
tgt=ctl_nodes_pillar,
param='curl -s {}/ | grep Alertmanager'.format(url),
expr_form='pillar')
- assert len(result[result.keys()[0]]) != 0, \
- 'Internal AlertManager page is not reachable on {} ' \
- 'from ctl nodes'.format(url)
+ assert len(result[result.keys()[0]]) != 0, (
+ 'Internal AlertManager page is not reachable on {} from ctl '
+ 'nodes'.format(url)
+ )
@pytest.mark.smoke
@@ -145,9 +146,10 @@
tgt=ctl_nodes_pillar,
param='curl -k -s {}/ | grep Alertmanager'.format(url),
expr_form='pillar')
- assert len(result[result.keys()[0]]) != 0, \
- 'Public AlertManager page is not reachable on {} ' \
+ assert len(result[result.keys()[0]]) != 0, (
+ 'Public AlertManager page is not reachable on {} '
'from ctl nodes'.format(url)
+ )
@pytest.mark.sl_dup
@@ -163,9 +165,10 @@
tgt=ctl_nodes_pillar,
param='curl {}/login 2>&1 | grep Grafana'.format(url),
expr_form='pillar')
- assert len(result[result.keys()[0]]) != 0, \
- 'Internal Grafana page is not reachable on {} ' \
+ assert len(result[result.keys()[0]]) != 0, (
+ 'Internal Grafana page is not reachable on {} '
'from ctl nodes'.format(url)
+ )
@pytest.mark.smoke
@@ -180,8 +183,9 @@
tgt=ctl_nodes_pillar,
param='curl -k {}/login 2>&1 | grep Grafana'.format(url),
expr_form='pillar')
- assert len(result[result.keys()[0]]) != 0, \
+ assert len(result[result.keys()[0]]) != 0, (
'Public Grafana page is not reachable on {} from ctl nodes'.format(url)
+ )
@pytest.mark.sl_dup
@@ -195,11 +199,12 @@
url = "{}://{}:{}".format(protocol, IP, port)
result = local_salt_client.cmd(
tgt=ctl_nodes_pillar,
- param='curl {}/ 2>&1 | \
- grep Alerta'.format(url),
+ param='curl {}/ 2>&1 | grep Alerta'.format(url),
expr_form='pillar')
- assert len(result[result.keys()[0]]) != 0, \
- 'Internal Alerta page is not reachable on {} from ctl nodes'.format(url)
+ assert len(result[result.keys()[0]]) != 0, (
+ 'Internal Alerta page is not reachable on {} from '
+ 'ctl nodes'.format(url)
+ )
@pytest.mark.smoke
@@ -212,11 +217,10 @@
url = "{}://{}:{}".format(proto, IP, port)
result = local_salt_client.cmd(
tgt=ctl_nodes_pillar,
- param='curl -k {}/ 2>&1 | \
- grep Alerta'.format(url),
+ param='curl -k {}/ 2>&1 | grep Alerta'.format(url),
expr_form='pillar')
- assert len(result[result.keys()[0]]) != 0, \
- 'Public Alerta page is not reachable on {} from ctl nodes'.format(url)
+ assert len(result[result.keys()[0]]) != 0, (
+ 'Public Alerta page is not reachable on {} from ctl nodes'.format(url))
@pytest.mark.smoke
@@ -230,11 +234,11 @@
url = "{}://{}:{}".format(proto, IP, port)
result = local_salt_client.cmd(
tgt=ctl_nodes_pillar,
- param='curl -k {}/ 2>&1 | \
- grep Authentication'.format(url),
+ param='curl -k {}/ 2>&1 | grep Authentication'.format(url),
expr_form='pillar')
- assert len(result[result.keys()[0]]) != 0, \
+ assert len(result[result.keys()[0]]) != 0, (
'Public Jenkins page is not reachable on {} from ctl nodes'.format(url)
+ )
@pytest.mark.smoke
@@ -248,8 +252,7 @@
url = "{}://{}:{}".format(proto, IP, port)
result = local_salt_client.cmd(
tgt=ctl_nodes_pillar,
- param='curl -k {}/ 2>&1 | \
- grep "Gerrit Code Review"'.format(url),
+ param='curl -k {}/ 2>&1 | grep "Gerrit Code Review"'.format(url),
expr_form='pillar')
- assert len(result[result.keys()[0]]) != 0, \
- 'Public Gerrit page is not reachable on {} from ctl nodes'.format(url)
\ No newline at end of file
+ assert len(result[result.keys()[0]]) != 0, (
+ 'Public Gerrit page is not reachable on {} from ctl nodes'.format(url))