Two scripts, pgbench tests
diff --git a/formatters.py b/formatters.py
new file mode 100644
index 0000000..9d30c55
--- /dev/null
+++ b/formatters.py
@@ -0,0 +1,73 @@
+import itertools
+import json
+import math
+
+
+def get_formatter(test_type):
+    if test_type == "io":
+        return format_io_stat
+    elif test_type == "pgbench":
+        return format_pgbench_stat
+    else:
+        raise Exception("Cannot get formatter for type %s" % test_type)
+
+
+def format_io_stat(res):
+    if len(res) != 0:
+        bw_mean = 0.0
+        for measurement in res:
+            bw_mean += measurement["bw_mean"]
+
+        bw_mean /= len(res)
+
+        it = ((bw_mean - measurement["bw_mean"]) ** 2 for measurement in res)
+        bw_dev = sum(it) ** 0.5
+
+        meta = res[0]['__meta__']
+
+        sync = meta['sync']
+        direct = meta['direct_io']
+
+        if sync and direct:
+            ss = "d+"
+        elif sync:
+            ss = "s"
+        elif direct:
+            ss = "d"
+        else:
+            ss = "a"
+
+        key = "{0} {1} {2} {3}k".format(meta['action'], ss,
+                                        meta['concurence'],
+                                        meta['blocksize'])
+
+        data = json.dumps({key: (int(bw_mean), int(bw_dev))})
+
+        return data
+
+
+def format_pgbench_stat(res):
+    """
+    Receives results in format:
+    "<num_clients> <num_transactions>: <tps>
+     <num_clients> <num_transactions>: <tps>
+     ....
+    "
+    """
+    if res:
+        data = {}
+        grouped_res = itertools.groupby(res, lambda x: x[0])
+        for key, group in grouped_res:
+            results = list(group)
+            sum_res = sum([r[1] for r in results])
+            mean = sum_res/len(results)
+            sum_sq = sum([(r[1] - mean) ** 2 for r in results])
+            dev = math.sqrt(sum_sq / (len(results) - 1))
+            data[key] = (mean, dev)
+        return data
+
+
+
+
+
+
diff --git a/hl_tests/postgres/prepare.sh b/hl_tests/postgres/prepare.sh
old mode 100644
new mode 100755
index 4f81e0d..e7ca3bc
--- a/hl_tests/postgres/prepare.sh
+++ b/hl_tests/postgres/prepare.sh
@@ -1,12 +1,9 @@
 #!/bin/bash
-
-# install postgres
-apt-get update
-apt-get install postgresql postgresql-contrib
-
-# check if postrges cluster created
+set -e
 
 if [ ! -d /etc/postgresql ]; then
+    apt-get update
+    apt-get install -y postgresql postgresql-contrib
     err=$(pg_createcluster 9.3 main --start 2>&1  /dev/null )
     if [ $? -ne 0 ]; then
         echo "There was an error while creating cluster"
@@ -15,6 +12,8 @@
 fi
 
 sed -i 's/^local\s\+all\s\+all\s\+peer/local all all trust/g' /etc/postgresql/9.3/main/pg_hba.conf
-echo "listen_address = '*'" >> /etc/postgresql/9.3/main/postgresql.conf
+sudo sed -i "s/#listen_addresses = 'localhost'/listen_addresses = '*'/g" /etc/postgresql/9.3/main/postgresql.conf
 
+service postgresql restart
 
+exit 0
\ No newline at end of file
diff --git a/hl_tests/postgres/run.sh b/hl_tests/postgres/run.sh
old mode 100644
new mode 100755
index aaab8f5..132ed97
--- a/hl_tests/postgres/run.sh
+++ b/hl_tests/postgres/run.sh
@@ -1,29 +1,26 @@
 #!/bin/bash
+set -e
 
-CLIENTS=${CLIENTS:-"16 32 48 64 80 96"}
-TRANSACTINOS_PER_CLIENT=${TRANSACTINOS_PER_CLIENT:-"100 200 300 400 500 600 700 800 900 1000"}
-RESULTS_FILE=${RESULTS_FILE:-"results.json"}
+CLIENTS=${CLIENTS:-"4 8"}
+TRANSACTINOS_PER_CLIENT=${TRANSACTINOS_PER_CLIENT:-"1 2"}
 
 
-createdb -O postgres pgbench
-pgbench -i -U postgres pgbench
+sudo -u postgres createdb -O postgres pgbench
+sudo -u postgres pgbench -i -U postgres pgbench
 
-echo "{" > $RESULTS_FILE
 
 for num_clients in $CLIENTS; do
     for trans_per_cl in $TRANSACTINOS_PER_CLIENT; do
         tps_all=''
         for i in 1 2 3 4 5 6 7 8 9 10; do
-            tps=$(pgbench -c $num_clients -n -t $trans_per_cl -j 4 -r -U postgres pgbench |
-            grep "(excluding connections establishing)" | awk {'print $3'})
-            tps_all="$tps_all\n$tps"
+            echo -n "$num_clients $trans_per_cl:"
+            sudo -u postgres pgbench -c $num_clients -n -t $trans_per_cl -j 4 -r -U postgres pgbench |
+            grep "(excluding connections establishing)" | awk {'print $3'}
         done
-        # calculate average and deviation
-        echo "$num_clients $trans_per_cl: " >> $RESULTS_FILE
-        echo -e $tps_all | awk  '{ col=1; array[NR]=$col; sum+=$col; print "col="$col,"sum="sum} END {for(x=1;x<=NR;x++){sumsq+=((array[x]-(sum/NR))^2);} print "[" sum/NR "," sqrt(sumsq/(NR-1)) "], " }' >> $RESULTS_FILE
     done
 done
 
-echo "}" >> $RESULTS_FILE
+sudo -u postgres dropdb pgbench
 
+exit 0
 
diff --git a/itest.py b/itest.py
index 018a11b..d63a9b4 100644
--- a/itest.py
+++ b/itest.py
@@ -4,7 +4,6 @@
 import os.path
 import logging
 
-
 from io_scenario import io
 from ssh_copy_directory import copy_paths
 from utils import run_over_ssh
@@ -31,6 +30,70 @@
         pass
 
 
+class TwoScriptTest(IPerfTest):
+    def __init__(self, opts, testtool, on_result_cb, keep_tmp_files):
+        super(TwoScriptTest, self).__init__(on_result_cb)
+        self.opts = opts
+        self.pre_run_script = None
+        self.run_script = None
+        self.tmp_dir = "/tmp/"
+        self.set_run_script()
+        self.set_pre_run_script()
+
+    def set_run_script(self):
+        self.pre_run_script = self.opts.pre_run_script
+
+    def set_pre_run_script(self):
+        self.run_script = self.opts.run_script
+
+    def get_remote_for_script(self, script):
+        return os.path.join(self.tmp_dir, script.rpartition('/')[2])
+
+    def copy_script(self, conn, src):
+        remote_path = self.get_remote_for_script(src)
+        copy_paths(conn, {src: remote_path})
+        return remote_path
+
+    def pre_run(self, conn):
+        remote_script = self.copy_script(conn, self.pre_run_script)
+        cmd = remote_script
+        code, out, err = run_over_ssh(conn, cmd)
+        if code != 0:
+            raise Exception("Pre run failed. %s" % err)
+
+    def run(self, conn):
+        remote_script = self.copy_script(conn, self.run_script)
+        cmd = remote_script
+        code, out, err = run_over_ssh(conn, cmd)
+        self.on_result(code, out, err, cmd)
+
+    def parse_results(self, out):
+        for line in out.split("\n"):
+            key, separator, value = line.partition(":")
+            if key and value:
+                self.on_result_cb((key, float(value)))
+
+    def on_result(self, code, out, err, cmd):
+        if 0 == code:
+            try:
+                self.parse_results(out)
+            except Exception as err:
+                msg_templ = "Error during postprocessing results: {0!r}"
+                raise RuntimeError(msg_templ.format(err.message))
+        else:
+            templ = "Command {0!r} failed with code {1}. Error output is:\n{2}"
+            logger.error(templ.format(cmd, code, err))
+
+
+class PgBenchTest(TwoScriptTest):
+
+    def set_run_script(self):
+        self.pre_run_script = "hl_tests/postgres/prepare.sh"
+
+    def set_pre_run_script(self):
+        self.run_script = "hl_tests/postgres/run.sh"
+
+
 def run_test_iter(obj, conn):
     yield obj.pre_run(conn)
     res = obj.run(conn)
diff --git a/run_test.py b/run_test.py
index b1d5a59..601ccbe 100644
--- a/run_test.py
+++ b/run_test.py
@@ -17,8 +17,10 @@
 import io_scenario
 from utils import log_error
 from rest_api import add_test
-from itest import IOPerfTest, run_test_iter
+from itest import IOPerfTest, run_test_iter, PgBenchTest
 from starts_vms import nova_connect, create_vms_mt, clear_all
+from formatters import get_formatter
+
 
 try:
     import rally_runner
@@ -38,6 +40,13 @@
 ch.setFormatter(formatter)
 
 
+tool_type_mapper = {
+    "iozone": IOPerfTest,
+    "fio": IOPerfTest,
+    "pgbench": PgBenchTest,
+}
+
+
 def run_io_test(tool,
                 script_args,
                 test_runner,
@@ -48,10 +57,11 @@
     path = 'iozone' if 'iozone' == tool else 'fio'
     src_testtool_path = os.path.join(files_dir, path)
 
-    obj = IOPerfTest(script_args,
-                     src_testtool_path,
-                     None,
-                     keep_temp_files)
+    obj_cls = tool_type_mapper[tool]
+    obj = obj_cls(script_args,
+                  src_testtool_path,
+                  None,
+                  keep_temp_files)
 
     return test_runner(obj)
 
@@ -83,7 +93,6 @@
         res = (self.proc.stdin,
                FileWrapper(self.proc.stdout, self),
                self.proc.stderr)
-
         return res
 
     def recv_exit_status(self):
@@ -129,21 +138,25 @@
         description="Run disk io performance test")
 
     parser.add_argument("tool_type", help="test tool type",
-                        choices=['iozone', 'fio'])
+                        choices=['iozone', 'fio', 'pgbench'])
 
     parser.add_argument("-l", dest='extra_logs',
                         action='store_true', default=False,
                         help="print some extra log info")
 
-    parser.add_argument("-o", "--io-opts", dest='io_opts',
-                        help="cmd line options for io.py")
+    parser.add_argument("-o", "--test-opts", dest='opts',
+                        help="cmd line options for test")
 
-    parser.add_argument("-f", "--io-opts-file", dest='io_opts_file',
+    parser.add_argument("-f", "--test-opts-file", dest='opts_file',
                         type=argparse.FileType('r'), default=None,
-                        help="file with cmd line options for io.py")
+                        help="file with cmd line options for test")
+    #
+    # parser.add_argument("-t", "--test-directory", help="directory with test",
+    #                     dest="test_directory", required=True)
 
-    parser.add_argument("-t", "--test-directory", help="directory with test",
-                        dest="test_directory", required=True)
+    parser.add_argument("-t", "--test", help="test to run",
+                        dest="test_directory", required=True,
+                        choices=['io', 'pgbench', 'two_scripts'])
 
     parser.add_argument("--max-preparation-time", default=300,
                         type=int, dest="max_preparation_time")
@@ -179,81 +192,47 @@
     return parser.parse_args(argv)
 
 
-def format_measurements_stat(res):
-    if len(res) != 0:
-        bw_mean = 0.0
-        for measurement in res:
-            bw_mean += measurement["bw_mean"]
-
-        bw_mean /= len(res)
-
-        it = ((bw_mean - measurement["bw_mean"]) ** 2 for measurement in res)
-        bw_dev = sum(it) ** 0.5
-
-        meta = res[0]['__meta__']
-
-        sync = meta['sync']
-        direct = meta['direct_io']
-
-        if sync and direct:
-            ss = "d+"
-        elif sync:
-            ss = "s"
-        elif direct:
-            ss = "d"
-        else:
-            ss = "a"
-
-        key = "{0} {1} {2} {3}k".format(meta['action'], ss,
-                                        meta['concurence'],
-                                        meta['blocksize'])
-
-        data = json.dumps({key: (int(bw_mean), int(bw_dev))})
-
-        return data
-
-
-def get_io_opts(io_opts_file, io_opts):
-    if io_opts_file is not None and io_opts is not None:
-        print "Options --io-opts-file and --io-opts can't be " + \
+def get_opts(opts_file, test_opts):
+    if opts_file is not None and test_opts is not None:
+        print "Options --opts-file and --opts can't be " + \
             "provided same time"
         exit(1)
 
-    if io_opts_file is None and io_opts is None:
-        print "Either --io-opts-file or --io-opts should " + \
+    if opts_file is None and test_opts is None:
+        print "Either --opts-file or --opts should " + \
             "be provided"
         exit(1)
 
-    if io_opts_file is not None:
-        io_opts = []
+    if opts_file is not None:
+        opts = []
 
-        opt_lines = io_opts_file.readlines()
+        opt_lines = opts_file.readlines()
         opt_lines = [i for i in opt_lines if i != "" and not i.startswith("#")]
 
         for opt_line in opt_lines:
             if opt_line.strip() != "":
-                io_opts.append([opt.strip()
-                               for opt in opt_line.strip().split(" ")
-                               if opt.strip() != ""])
+                opts.append([opt.strip()
+                             for opt in opt_line.strip().split(" ")
+                             if opt.strip() != ""])
     else:
-        io_opts = [[opt.strip()
-                   for opt in io_opts.split(" ")
-                   if opt.strip() != ""]]
+        opts = [[opt.strip()
+                 for opt in test_opts.split(" ")
+                 if opt.strip() != ""]]
 
-    if len(io_opts) == 0:
-        print "Can't found parameters for io. Check" + \
-            "--io-opts-file or --io-opts options"
+    if len(opts) == 0:
+        print "Can't found parameters for tests. Check" + \
+            "--opts-file or --opts options"
         exit(1)
 
-    return io_opts
+    return opts
 
 
-def format_result(res):
+def format_result(res, formatter):
     data = "\n{0}\n".format("=" * 80)
     data += pprint.pformat(res) + "\n"
     data += "{0}\n".format("=" * 80)
     templ = "{0}\n\n====> {1}\n\n{2}\n\n"
-    return templ.format(data, format_measurements_stat(res), "=" * 80)
+    return templ.format(data, formatter(res), "=" * 80)
 
 
 @contextlib.contextmanager
@@ -307,11 +286,11 @@
         logger.setLevel(logging.DEBUG)
         ch.setLevel(logging.DEBUG)
 
-    io_opts = get_io_opts(opts.io_opts_file, opts.io_opts)
+    test_opts = get_opts(opts.opts_file, opts.opts)
 
     if opts.runner == "rally":
         logger.debug("Use rally runner")
-        for script_args in io_opts:
+        for script_args in test_opts:
 
             cmd_line = " ".join(script_args)
             logger.debug("Run test with {0!r} params".format(cmd_line))
@@ -326,12 +305,12 @@
                               script_args,
                               runner,
                               opts.keep_temp_files)
-            logger.debug(format_result(res))
+            logger.debug(format_result(res, get_formatter(opts.tool_type)))
 
     elif opts.runner == "local":
         logger.debug("Run on local computer")
         try:
-            for script_args in io_opts:
+            for script_args in test_opts:
                 cmd_line = " ".join(script_args)
                 logger.debug("Run test with {0!r} params".format(cmd_line))
                 runner = get_local_runner(opts.keep_temp_files)
@@ -339,7 +318,7 @@
                                   script_args,
                                   runner,
                                   opts.keep_temp_files)
-                logger.debug(format_result(res))
+                logger.debug(format_result(res, get_formatter(opts.tool_type)))
         except:
             traceback.print_exc()
             return 1
@@ -364,7 +343,7 @@
             return 1
 
         try:
-            for script_args in io_opts:
+            for script_args in test_opts:
                 cmd_line = " ".join(script_args)
                 logger.debug("Run test with {0!r} params".format(cmd_line))
                 latest_start_time = opts.max_preparation_time + time.time()
@@ -375,7 +354,7 @@
                                   script_args,
                                   runner,
                                   opts.keep_temp_files)
-                logger.debug(format_result(res))
+                logger.debug(format_result(res, get_formatter(opts.tool_type)))
 
         except:
             traceback.print_exc()
@@ -386,7 +365,7 @@
                 logger.debug("Clearing")
 
     if opts.data_server_url:
-        result = json.loads(format_measurements_stat(res))
+        result = json.loads(get_formatter(opts.tool_type)(res))
         result['name'] = opts.build_name
         add_test(opts.build_name, result, opts.data_server_url)