Updated ping command to work with MCC/MOS

  - updated Pinger class with inherited structure for Salt and Kube
  - implemented DeamonSet handling in KubeApi interface
  - implemented put-textfile and series of ConfigMap methods in KubeApi
  - updated Pinger to use multiple --cidr commands at once
  - update Summary section to be more informative and human readable

Change-Id: Iac18a619d0bb9a36a286a07f38aeba8f99a454ca
Related-PROD: PROD-36603
diff --git a/cfg_checker/nodes.py b/cfg_checker/nodes.py
index cf2bdf7..d87d829 100644
--- a/cfg_checker/nodes.py
+++ b/cfg_checker/nodes.py
@@ -953,7 +953,7 @@
         _sh.kill()
         return _target_path
 
-    def prepare_daemonset(self, template_filename, config_map=None):
+    def prepare_daemonset(self, template_filename):
         # load template
         _yaml_file = os.path.join(pkg_dir, 'templates', template_filename)
         logger_cli.debug("... loading template '{}'".format(_yaml_file))
@@ -1043,6 +1043,27 @@
         logger_cli.error("Timed out waiting for Daemonset to be ready")
         return False
 
+    def exec_on_target_pod(self, pod_name, script_filename, args=None):
+        """
+        Run script from configmap on target pod assuming it is present
+        """
+        _arguments = args if args else ""
+        _cmd = [
+            "python3",
+            os.path.join(
+                "/",
+                self.env_config.kube_scripts_folder,
+                script_filename
+            )
+        ] + _arguments
+        _result = self.kube.exec_on_target_pod(
+            _cmd,
+            pod_name,
+            self._namespace,
+            strict=True
+        )
+        return _result
+
     def execute_script_on_daemon_set(self, ds, script_filename, args=None):
         """
         Query daemonset for pods and execute script on all of them
@@ -1060,12 +1081,7 @@
                 )
             ]
 
-        # get all pod names
-        logger_cli.debug("... extracting pod names from daemonset")
-        _pods = self.kube.CoreV1.list_namespaced_pod(
-            namespace=ds.metadata.namespace,
-            label_selector='name={}'.format(ds.metadata.name)
-        )
+        _pods = self.kube.get_pods_for_daemonset(ds)
         # Create map for threads: [[node_name, ns, pod_name, cmd]...]
         logger_cli.debug(
             "... runnning script on {} pods using {} threads at a time".format(
@@ -1137,3 +1153,40 @@
             ))
             _r = None
         return _r
+
+    def get_pod_name_in_daemonset_by_node(self, nodename, daemonset):
+        _podname = None
+        _pods = self.kube.get_pods_for_daemonset(daemonset)
+        for item in _pods.items:
+            if item.spec.node_name == nodename:
+                _podname = item.metadata.name
+
+        return _podname
+
+    def prepare_json_in_pod(self, podname, namespace, targets, filename):
+        # Iterate pods in daemonset and prepare json file on each one
+        _target_path = os.path.join(
+            "/",
+            "tmp",
+            filename
+        )
+        # check folder will probably not needed as the daemonset links
+        # configmap there on creation
+        # _folder = os.path.join(
+        #     self.env_config.kube_node_homepath,
+        #     self.env_config.kube_scripts_folder
+        # )
+        # prepare data
+        buffer = json.dumps(targets, indent=2).encode('utf-8')
+
+        # write data to pod using fancy websocket function
+        self.kube.put_string_buffer_to_pod_as_textfile(
+            podname,
+            namespace,
+            buffer,
+            _target_path
+        )
+
+        # TODO: Exception handling
+
+        return _target_path