large commit. new code, with sensors, line count dropped, etc
diff --git a/sensors/deploy_sensors.py b/sensors/deploy_sensors.py
index b2dc3f1..e0428d9 100644
--- a/sensors/deploy_sensors.py
+++ b/sensors/deploy_sensors.py
@@ -1,33 +1,50 @@
 import time
 import json
 import os.path
+import logging
 
 from concurrent.futures import ThreadPoolExecutor, wait
 
 from disk_perf_test_tool.ssh_utils import connect, copy_paths
 
+logger = logging.getLogger('io-perf-tool')
+
 
 def wait_all_ok(futures):
     return all(future.result() for future in futures)
 
 
-def deploy_and_start_sensors(monitor_uri, config, remote_path='/tmp/sensors'):
+def deploy_and_start_sensors(monitor_uri, config,
+                             remote_path='/tmp/sensors',
+                             connected_config=None):
     paths = {os.path.dirname(__file__): remote_path}
     with ThreadPoolExecutor(max_workers=32) as executor:
         futures = []
 
-        for uri, config in config.items():
+        if connected_config is not None:
+            assert config is None
+            node_iter = connected_config
+        else:
+            node_iter = config.items()
+
+        for uri_or_conn, config in node_iter:
             futures.append(executor.submit(deploy_and_start_sensor,
-                                           paths, uri, monitor_uri,
+                                           paths, uri_or_conn,
+                                           monitor_uri,
                                            config, remote_path))
 
         if not wait_all_ok(futures):
             raise RuntimeError("Sensor deployment fails on some nodes")
 
 
-def deploy_and_start_sensor(paths, uri, monitor_uri, config, remote_path):
+def deploy_and_start_sensor(paths, uri_or_conn, monitor_uri, config,
+                            remote_path):
     try:
-        conn = connect(uri)
+        if isinstance(uri_or_conn, basestring):
+            conn = connect(uri_or_conn)
+        else:
+            conn = uri_or_conn
+
         copy_paths(conn, paths)
         sftp = conn.open_sftp()
 
@@ -41,17 +58,23 @@
         cmd = cmd_templ.format(main_remote_path,
                                monitor_uri,
                                config_remote_path)
-        print "Executing", cmd
         conn.exec_command(cmd)
         sftp.close()
-        conn.close()
+
+        if isinstance(uri_or_conn, basestring):
+            conn.close()
     except:
+        logger.exception("During deploing sensors in {0}".format(uri_or_conn))
         return False
     return True
 
 
-def stop_and_remove_sensor(uri, remote_path):
-    conn = connect(uri)
+def stop_and_remove_sensor(uri_or_conn, remote_path):
+    if isinstance(uri_or_conn, basestring):
+        conn = connect(uri_or_conn)
+    else:
+        conn = uri_or_conn
+
     main_remote_path = os.path.join(remote_path, "main.py")
 
     cmd_templ = "python {0} -d stop"
@@ -62,15 +85,23 @@
 
     conn.exec_command("rm -rf {0}".format(remote_path))
 
-    conn.close()
+    if isinstance(uri_or_conn, basestring):
+        conn.close()
 
 
-def stop_and_remove_sensors(config, remote_path='/tmp/sensors'):
+def stop_and_remove_sensors(config, remote_path='/tmp/sensors',
+                            connected_config=None):
     with ThreadPoolExecutor(max_workers=32) as executor:
         futures = []
 
-        for uri, config in config.items():
+        if connected_config is not None:
+            assert config is None
+            conf_iter = connected_config
+        else:
+            conf_iter = config.items()
+
+        for uri_or_conn, config in conf_iter:
             futures.append(executor.submit(stop_and_remove_sensor,
-                                           uri, remote_path))
+                                           uri_or_conn, remote_path))
 
         wait(futures)