cfg-checker ceph bench module alpha version

 - Ceph benchmark report (beta)
 - Updated result time choosing. Now results reported based on start time
 - New methods for listing
 - Cleanup-only mode
 - Unified results processing
 - Additional ceph info gather
 - Experimental barchart graph example

Fixes:
 - Kube API client recreated each time for stability (HTTP/WebSocket specifics)
 - args naming fixes
 -

Change-Id: Id541f789a00ab4ee827603c5b6f7f07899aaa7c5
diff --git a/cfg_checker/nodes.py b/cfg_checker/nodes.py
index 49284ca..a673842 100644
--- a/cfg_checker/nodes.py
+++ b/cfg_checker/nodes.py
@@ -1085,12 +1085,44 @@
         self,
         ds,
         cmd,
-        args=None,
+        _args=None,
         is_script=False
     ):
         """
         Query daemonset for pods and execute script on all of them
         """
+        _results = self.exec_cmd_on_pods(
+            self.kube.get_pods_for_daemonset(ds),
+            cmd,
+            _args=_args,
+            is_script=is_script
+        )
+        # Update results
+        _ds_results = {}
+        for _n, _, _v in _results:
+            _ds_results[_n] = _v
+        return _ds_results
+
+    def exec_on_labeled_pods_and_ns(self, label_str, cmd, _args=None, ns=None):
+        if not ns:
+            ns = self._namespace
+        _results = self.exec_cmd_on_pods(
+            self.kube.list_pods(ns, label_str=label_str),
+            cmd,
+            _args=_args
+        )
+        _pod_results = {}
+        for _, _p, _v in _results:
+            _pod_results[_p] = _v
+        return _pod_results
+
+    def exec_cmd_on_pods(
+        self,
+        pod_list,
+        cmd,
+        _args=None,
+        is_script=False
+    ):
         def _kube_exec_on_pod(plist):
             return [
                 plist[1],  # node
@@ -1105,16 +1137,15 @@
                 )
             ]
 
-        _pods = self.kube.get_pods_for_daemonset(ds)
         # Create map for threads: [[node_name, ns, pod_name, cmd]...]
         logger_cli.debug(
             "... runnning script on {} pods using {} threads at a time".format(
-                len(_pods.items),
+                len(pod_list.items),
                 self.env_config.threads
             )
         )
         _plist = []
-        _arguments = args if args else ""
+        _arguments = _args if _args else ""
         if is_script:
             _cmd = [
                 "python3",
@@ -1133,7 +1164,7 @@
                 _arguments = cmd
             else:
                 _cmd = cmd
-        for item in _pods.items:
+        for item in pod_list.items:
             _plist.append(
                 [
                     self,
@@ -1147,7 +1178,7 @@
 
         # map func and cmd
         pool = Pool(self.env_config.threads)
-        _results = {}
+        _results = []
         self.not_responded = []
         # create result list
         _progress = Progress(len(_plist))
@@ -1157,7 +1188,7 @@
             if not ii[1][1]:
                 self.not_responded.append(ii[1][0])
             else:
-                _results[ii[1][0]] = ii[1][2]
+                _results.append(ii[1])
             _progress.write_progress(ii[0])
 
         _progress.end()
@@ -1375,3 +1406,17 @@
             return _t.status.phase
         else:
             return None
+
+    def list_resource_names_by_type_and_ns(self, typ, ns="qa-space"):
+        if typ == "pod":
+            _items = self.kube.list_pods(ns)
+        elif typ == "svc":
+            _items = self.kube.list_svc(ns)
+        elif typ == "pvc":
+            _items = self.kube.list_pvc(ns)
+        elif typ == "pv":
+            _items = self.kube.list_pv()
+        else:
+            logger_cli.error("ERROR: '{}' is not supported yet".format(typ))
+            return None
+        return [[i.metadata.namespace, i.metadata.name] for i in _items.items]