Added tests to check kernel parameters
Additional changes:
* removed kdt-related tests
Change-Id: Ifee71c075411bce7ea2421fa2766383cb27b0330
Related-Task: #PROD-31892(PROD:31892)
(cherry picked from commit b9635f997c7d711eca7d008461a7c6ce28efd82a)
diff --git a/test_set/cvp-sanity/tests/test_drivetrain.py b/test_set/cvp-sanity/tests/test_drivetrain.py
index 82445e1..3c2344b 100644
--- a/test_set/cvp-sanity/tests/test_drivetrain.py
+++ b/test_set/cvp-sanity/tests/test_drivetrain.py
@@ -393,76 +393,3 @@
assert job_result == 'SUCCESS', \
'''Test job '{0}' build was not successful or timeout is too small
'''.format(jenkins_test_job)
-
-
-@pytest.mark.smoke
-# ??
-def test_kdt_all_pods_are_available(local_salt_client, check_kdt):
- """
- # Run kubectl get pods -n drivetrain on kdt-nodes to get status for each pod
- # Check that each pod has fulfilled status in the READY column
-
- """
- pods_statuses_output = local_salt_client.cmd_any(
- tgt='L@'+','.join(check_kdt),
- param='kubectl get pods -n drivetrain | awk {\'print $1"; "$2\'} | column -t',
- expr_form='compound')
-
- assert pods_statuses_output != "/bin/sh: 1: kubectl: not found", \
- "Nodes {} don't have kubectl".format(check_kdt)
- # Convert string to list and remove first row with column names
- pods_statuses = pods_statuses_output.split('\n')
- pods_statuses = pods_statuses[1:]
-
- report_with_errors = ""
- for pod_status in pods_statuses:
- pod, status = pod_status.split('; ')
- actual_replica, expected_replica = status.split('/')
-
- if actual_replica.strip() != expected_replica.strip():
- report_with_errors += "Pod [{pod}] doesn't have all containers. Expected {expected} containers, actual {actual}\n".format(
- pod=pod,
- expected=expected_replica,
- actual=actual_replica
- )
- assert report_with_errors == "", \
- "\n{sep}{kubectl_output}{sep} \n\n {report} ".format(
- sep="\n" + "-"*20 + "\n",
- kubectl_output=pods_statuses_output,
- report=report_with_errors
- )
-
-@pytest.mark.smoke
-# ??
-def test_kfg_all_pods_are_available(local_salt_client, check_kfg):
- """
- # Run kubectl get pods -n drivetrain on cfg node to get status for each pod
- # Check that each pod has fulfilled status in the READY column
-
- """
- # TODO collapse similar tests into one to check pods and add new fixture
- pods_statuses_output = local_salt_client.cmd_any(
- tgt='L@' + ','.join(check_kfg),
- param='kubectl get pods -n drivetrain | awk {\'print $1"; "$2\'} | column -t',
- expr_form='compound')
- # Convert string to list and remove first row with column names
- pods_statuses = pods_statuses_output.split('\n')
- pods_statuses = pods_statuses[1:]
-
- report_with_errors = ""
- for pod_status in pods_statuses:
- pod, status = pod_status.split('; ')
- actual_replica, expected_replica = status.split('/')
-
- if actual_replica.strip() == expected_replica.strip():
- report_with_errors += "Pod [{pod}] doesn't have all containers. Expected {expected} containers, actual {actual}\n".format(
- pod=pod,
- expected=expected_replica,
- actual=actual_replica
- )
- assert report_with_errors != "", \
- "\n{sep}{kubectl_output}{sep} \n\n {report} ".format(
- sep="\n" + "-" * 20 + "\n",
- kubectl_output=pods_statuses_output,
- report=report_with_errors
- )
diff --git a/test_set/cvp-sanity/tests/test_kernel_settings.py b/test_set/cvp-sanity/tests/test_kernel_settings.py
new file mode 100644
index 0000000..210c5c4
--- /dev/null
+++ b/test_set/cvp-sanity/tests/test_kernel_settings.py
@@ -0,0 +1,68 @@
+import json
+
+
+def test_sysctl_variables(local_salt_client, nodes_in_group):
+ """
+ # Request kernel setting from linux:system:kernel:sysctl
+ # Request the same setting from sysctl utility on the node
+ # Compare that value in sysctl equals to the same value in pillars
+
+ """
+ def normalize_value(value_in_string):
+ """
+ Changes to INT if value_in_string is parcible to int
+ Replaces \t with spaces if value_in_string is a string
+
+ :param value_in_string:
+ :return:
+ """
+ if '\t' in value_in_string:
+ return value_in_string.replace('\t', ' ')
+
+ try:
+ return int(value_in_string)
+ except ValueError:
+ pass
+
+ return value_in_string
+
+ issues = dict()
+ expected_kernel_params_by_nodes = local_salt_client.cmd(
+ tgt="L@"+','.join(nodes_in_group),
+ fun='pillar.get',
+ param="linux:system:kernel:sysctl",
+ expr_form='compound'
+ )
+
+ # Gather all params names from pillars and request their availability
+ # To get only specified values from system need to request them in the nex format
+ # 'sysctl param1 param2 param3 param4'
+
+ for node in expected_kernel_params_by_nodes.keys():
+ actual_kernel_params_for_node = local_salt_client.cmd(
+ tgt=node,
+ fun='cmd.run',
+ param="sysctl {}".format(" ".join(expected_kernel_params_by_nodes[node].keys())),
+ expr_form='compound'
+ )
+ # make transfer string to dict format
+ # it does a magic from
+ # "vm.watermark_scale_factor = 10\nvm.zone_reclaim_mode = 0"
+ # to
+ # {
+ # "vm.zone_reclaim_mode": "0",
+ # "vm.watermark_scale_factor": "10"
+ # }
+
+ values = {param.split(' = ')[0]: normalize_value(param.split(' = ')[-1])
+ for param in actual_kernel_params_for_node[node].split('\n')}
+
+ differences = [ "Parameter '{}' is not set === Expected '{}' === Got in sysctl '{}'".format(key, expected_kernel_params_by_nodes[node].get(key), actual)
+ for key, actual in values.items()
+ if expected_kernel_params_by_nodes[node].get(key) != actual ]
+ if differences.__len__() > 0:
+ issues[node] = differences
+
+ assert issues.__len__() == 0, json.dumps(issues, indent=4)
+
+
diff --git a/test_set/cvp-sanity/utils/__init__.py b/test_set/cvp-sanity/utils/__init__.py
index 44b163f..2ee6704 100644
--- a/test_set/cvp-sanity/utils/__init__.py
+++ b/test_set/cvp-sanity/utils/__init__.py
@@ -106,6 +106,10 @@
"""
This method is for fetching pillars only.
Returns value for pillar, False (if no such pillar) or if fail_if_empty=True - exception
+ :param tgt, string, target when the salt command will be executed
+ :param param, additional parameter for salt command
+ :param expr_form
+ :param fail_if_empty
"""
response = self.cmd(tgt=tgt, fun='pillar.get', param=param, expr_form=expr_form)
for node in response.keys():