single-node OS tests addede
diff --git a/io_scenario/io.py b/io_scenario/io.py
index bd7a325..2ad8f19 100644
--- a/io_scenario/io.py
+++ b/io_scenario/io.py
@@ -215,6 +215,7 @@
 
     # no retest
     cmd.append('-+n')
+
     raw_res = subprocess.check_output(cmd)
 
     try:
@@ -317,6 +318,7 @@
                 "--filename=%s" % tmpname,
                 "--size={0}k".format(benchmark.size),
                 "--numjobs={0}".format(benchmark.concurence),
+                "--runtime=60",
                 "--output-format=json",
                 "--sync=" + ('1' if benchmark.sync else '0')]
 
@@ -346,8 +348,11 @@
     res = {}
 
     # 'bw_dev bw_mean bw_max bw_min'.split()
-    for field in ["bw_mean"]:
+    for field in ["bw_mean", "iops"]:
         res[field] = raw_result[field]
+    res["lat"] = raw_result["lat"]["mean"]
+    res["clat"] = raw_result["clat"]["mean"]
+    res["slat"] = raw_result["slat"]["mean"]
 
     return res, cmd_line
 
@@ -488,9 +493,6 @@
 
     parser.add_argument("--preparation-results", default="{}")
 
-    parser.add_argument("--with-sensors", default="",
-                        dest="with_sensors")
-
     return parser.parse_args(argv)
 
 
@@ -570,7 +572,7 @@
                              " options should be provided, not both")
 
         if argv_obj.test_file is not None:
-            preparation_results['all_files'] = argv_obj.test_file
+            preparation_results['all_files'] = [argv_obj.test_file]
 
         autoremove = False
         if 'all_files' not in preparation_results:
@@ -592,37 +594,19 @@
                 if dt > 0:
                     time.sleep(dt)
 
-            if argv_obj.with_sensors != "":
-                oq = Queue.Queue()
-                iq = Queue.Queue()
-                argv = (argv_obj.with_sensors, oq, iq)
-                th = threading.Thread(None, sensor_thread, None, argv)
-                th.daemon = True
-                th.start()
-
             res, cmd = run_benchmark(argv_obj.type,
                                      benchmark=benchmark,
                                      binary_path=binary_path,
                                      timeout=argv_obj.timeout,
                                      **preparation_results)
-            if argv_obj.with_sensors != "":
-                oq.put(None)
-                th.join()
-                stats = []
-
-                while not iq.empty():
-                    stats.append(iq.get())
-            else:
-                stats = None
 
             res['__meta__'] = benchmark.__dict__.copy()
             res['__meta__']['cmdline'] = cmd
 
-            if stats is not None:
-                res['__meta__']['sensor_data'] = stats
+            import pprint
+            sys.stdout.write(pprint.pformat(res))
 
-            sys.stdout.write(json.dumps(res))
-
+            # sys.stdout.write(json.dumps(res))
             if not argv_obj.prepare_only:
                 sys.stdout.write("\n")
 
diff --git a/io_scenario/io.yaml b/io_scenario/io.yaml
deleted file mode 100644
index deb4b99..0000000
--- a/io_scenario/io.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-VMTasks.boot_runcommand_delete:
-    -
-        args:
-            flavor:
-                name: "ceph.512"
-            image:
-                name: "ubuntu"
-
-            floating_network: "net04_ext"
-            force_delete: false
-            script: "io.py"
-            interpreter: "/usr/bin/env python2"
-            username: "ubuntu"
-
-        runner:
-            type: "constant"
-            times: 3
-            concurrency: 3
-
-        context:
-            users:
-                tenants: 1
-                users_per_tenant: 1
-            network: {}
\ No newline at end of file
diff --git a/io_scenario/libs/ubuntu_14_04_x64/_psutil_linux.so b/io_scenario/libs/ubuntu_14_04_x64/_psutil_linux.so
deleted file mode 100755
index b5f9713..0000000
--- a/io_scenario/libs/ubuntu_14_04_x64/_psutil_linux.so
+++ /dev/null
Binary files differ
diff --git a/io_scenario/libs/ubuntu_14_04_x64/_psutil_posix.so b/io_scenario/libs/ubuntu_14_04_x64/_psutil_posix.so
deleted file mode 100755
index 520ab67..0000000
--- a/io_scenario/libs/ubuntu_14_04_x64/_psutil_posix.so
+++ /dev/null
Binary files differ
diff --git a/io_scenario/libs/ubuntu_14_04_x64/psutil/__init__.py b/io_scenario/libs/ubuntu_14_04_x64/psutil/__init__.py
deleted file mode 100644
index 322cc21..0000000
--- a/io_scenario/libs/ubuntu_14_04_x64/psutil/__init__.py
+++ /dev/null
@@ -1,2015 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""psutil is a cross-platform library for retrieving information on
-running processes and system utilization (CPU, memory, disks, network)
-in Python.
-"""
-
-from __future__ import division
-
-__author__ = "Giampaolo Rodola'"
-__version__ = "2.2.1"
-version_info = tuple([int(num) for num in __version__.split('.')])
-
-__all__ = [
-    # exceptions
-    "Error", "NoSuchProcess", "AccessDenied", "TimeoutExpired",
-    # constants
-    "version_info", "__version__",
-    "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
-    "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
-    "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
-    "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
-    "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
-    "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
-    # classes
-    "Process", "Popen",
-    # functions
-    "pid_exists", "pids", "process_iter", "wait_procs",             # proc
-    "virtual_memory", "swap_memory",                                # memory
-    "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count",   # cpu
-    "net_io_counters", "net_connections",                           # network
-    "disk_io_counters", "disk_partitions", "disk_usage",            # disk
-    "users", "boot_time",                                           # others
-]
-
-import collections
-import errno
-import functools
-import os
-import signal
-import subprocess
-import sys
-import time
-import warnings
-try:
-    import pwd
-except ImportError:
-    pwd = None
-
-from psutil._common import memoize
-from psutil._compat import callable, long
-from psutil._compat import PY3 as _PY3
-from psutil._common import (deprecated_method as _deprecated_method,
-                            deprecated as _deprecated,
-                            sdiskio as _nt_sys_diskio,
-                            snetio as _nt_sys_netio)
-
-from psutil._common import (STATUS_RUNNING,  # NOQA
-                            STATUS_SLEEPING,
-                            STATUS_DISK_SLEEP,
-                            STATUS_STOPPED,
-                            STATUS_TRACING_STOP,
-                            STATUS_ZOMBIE,
-                            STATUS_DEAD,
-                            STATUS_WAKING,
-                            STATUS_LOCKED,
-                            STATUS_IDLE,  # bsd
-                            STATUS_WAITING,  # bsd
-                            STATUS_LOCKED)  # bsd
-
-from psutil._common import (CONN_ESTABLISHED,
-                            CONN_SYN_SENT,
-                            CONN_SYN_RECV,
-                            CONN_FIN_WAIT1,
-                            CONN_FIN_WAIT2,
-                            CONN_TIME_WAIT,
-                            CONN_CLOSE,
-                            CONN_CLOSE_WAIT,
-                            CONN_LAST_ACK,
-                            CONN_LISTEN,
-                            CONN_CLOSING,
-                            CONN_NONE)
-
-if sys.platform.startswith("linux"):
-    import psutil._pslinux as _psplatform
-    from psutil._pslinux import (phymem_buffers,  # NOQA
-                                 cached_phymem)
-
-    from psutil._pslinux import (IOPRIO_CLASS_NONE,  # NOQA
-                                 IOPRIO_CLASS_RT,
-                                 IOPRIO_CLASS_BE,
-                                 IOPRIO_CLASS_IDLE)
-    # Linux >= 2.6.36
-    if _psplatform.HAS_PRLIMIT:
-        from _psutil_linux import (RLIM_INFINITY,  # NOQA
-                                   RLIMIT_AS,
-                                   RLIMIT_CORE,
-                                   RLIMIT_CPU,
-                                   RLIMIT_DATA,
-                                   RLIMIT_FSIZE,
-                                   RLIMIT_LOCKS,
-                                   RLIMIT_MEMLOCK,
-                                   RLIMIT_NOFILE,
-                                   RLIMIT_NPROC,
-                                   RLIMIT_RSS,
-                                   RLIMIT_STACK)
-        # Kinda ugly but considerably faster than using hasattr() and
-        # setattr() against the module object (we are at import time:
-        # speed matters).
-        import _psutil_linux
-        try:
-            RLIMIT_MSGQUEUE = _psutil_linux.RLIMIT_MSGQUEUE
-        except AttributeError:
-            pass
-        try:
-            RLIMIT_NICE = _psutil_linux.RLIMIT_NICE
-        except AttributeError:
-            pass
-        try:
-            RLIMIT_RTPRIO = _psutil_linux.RLIMIT_RTPRIO
-        except AttributeError:
-            pass
-        try:
-            RLIMIT_RTTIME = _psutil_linux.RLIMIT_RTTIME
-        except AttributeError:
-            pass
-        try:
-            RLIMIT_SIGPENDING = _psutil_linux.RLIMIT_SIGPENDING
-        except AttributeError:
-            pass
-        del _psutil_linux
-
-elif sys.platform.startswith("win32"):
-    import psutil._pswindows as _psplatform
-    from _psutil_windows import (ABOVE_NORMAL_PRIORITY_CLASS,  # NOQA
-                                 BELOW_NORMAL_PRIORITY_CLASS,
-                                 HIGH_PRIORITY_CLASS,
-                                 IDLE_PRIORITY_CLASS,
-                                 NORMAL_PRIORITY_CLASS,
-                                 REALTIME_PRIORITY_CLASS)
-    from psutil._pswindows import CONN_DELETE_TCB  # NOQA
-
-elif sys.platform.startswith("darwin"):
-    import psutil._psosx as _psplatform
-
-elif sys.platform.startswith("freebsd"):
-    import psutil._psbsd as _psplatform
-
-elif sys.platform.startswith("sunos"):
-    import psutil._pssunos as _psplatform
-    from psutil._pssunos import (CONN_IDLE,  # NOQA
-                                 CONN_BOUND)
-
-else:
-    raise NotImplementedError('platform %s is not supported' % sys.platform)
-
-__all__.extend(_psplatform.__extra__all__)
-
-
-_TOTAL_PHYMEM = None
-_POSIX = os.name == 'posix'
-_WINDOWS = os.name == 'nt'
-_timer = getattr(time, 'monotonic', time.time)
-
-
-# Sanity check in case the user messed up with psutil installation
-# or did something weird with sys.path. In this case we might end
-# up importing a python module using a C extension module which
-# was compiled for a different version of psutil.
-# We want to prevent that by failing sooner rather than later.
-# See: https://github.com/giampaolo/psutil/issues/564
-if (int(__version__.replace('.', '')) !=
-        getattr(_psplatform.cext, 'version', None)):
-    msg = "version conflict: %r C extension module was built for another " \
-          "version of psutil (different than %s)" % (_psplatform.cext.__file__,
-                                                     __version__)
-    raise ImportError(msg)
-
-
-# =====================================================================
-# --- exceptions
-# =====================================================================
-
-class Error(Exception):
-    """Base exception class. All other psutil exceptions inherit
-    from this one.
-    """
-
-
-class NoSuchProcess(Error):
-    """Exception raised when a process with a certain PID doesn't
-    or no longer exists (zombie).
-    """
-
-    def __init__(self, pid, name=None, msg=None):
-        Error.__init__(self)
-        self.pid = pid
-        self.name = name
-        self.msg = msg
-        if msg is None:
-            if name:
-                details = "(pid=%s, name=%s)" % (self.pid, repr(self.name))
-            else:
-                details = "(pid=%s)" % self.pid
-            self.msg = "process no longer exists " + details
-
-    def __str__(self):
-        return self.msg
-
-
-class AccessDenied(Error):
-    """Exception raised when permission to perform an action is denied."""
-
-    def __init__(self, pid=None, name=None, msg=None):
-        Error.__init__(self)
-        self.pid = pid
-        self.name = name
-        self.msg = msg
-        if msg is None:
-            if (pid is not None) and (name is not None):
-                self.msg = "(pid=%s, name=%s)" % (pid, repr(name))
-            elif (pid is not None):
-                self.msg = "(pid=%s)" % self.pid
-            else:
-                self.msg = ""
-
-    def __str__(self):
-        return self.msg
-
-
-class TimeoutExpired(Error):
-    """Raised on Process.wait(timeout) if timeout expires and process
-    is still alive.
-    """
-
-    def __init__(self, seconds, pid=None, name=None):
-        Error.__init__(self)
-        self.seconds = seconds
-        self.pid = pid
-        self.name = name
-        self.msg = "timeout after %s seconds" % seconds
-        if (pid is not None) and (name is not None):
-            self.msg += " (pid=%s, name=%s)" % (pid, repr(name))
-        elif (pid is not None):
-            self.msg += " (pid=%s)" % self.pid
-
-    def __str__(self):
-        return self.msg
-
-# push exception classes into platform specific module namespace
-_psplatform.NoSuchProcess = NoSuchProcess
-_psplatform.AccessDenied = AccessDenied
-_psplatform.TimeoutExpired = TimeoutExpired
-
-
-# =====================================================================
-# --- Process class
-# =====================================================================
-
-def _assert_pid_not_reused(fun):
-    """Decorator which raises NoSuchProcess in case a process is no
-    longer running or its PID has been reused.
-    """
-    @functools.wraps(fun)
-    def wrapper(self, *args, **kwargs):
-        if not self.is_running():
-            raise NoSuchProcess(self.pid, self._name)
-        return fun(self, *args, **kwargs)
-    return wrapper
-
-
-class Process(object):
-    """Represents an OS process with the given PID.
-    If PID is omitted current process PID (os.getpid()) is used.
-    Raise NoSuchProcess if PID does not exist.
-
-    Note that most of the methods of this class do not make sure
-    the PID of the process being queried has been reused over time.
-    That means you might end up retrieving an information referring
-    to another process in case the original one this instance
-    refers to is gone in the meantime.
-
-    The only exceptions for which process identity is pre-emptively
-    checked and guaranteed are:
-
-     - parent()
-     - children()
-     - nice() (set)
-     - ionice() (set)
-     - rlimit() (set)
-     - cpu_affinity (set)
-     - suspend()
-     - resume()
-     - send_signal()
-     - terminate()
-     - kill()
-
-    To prevent this problem for all other methods you can:
-      - use is_running() before querying the process
-      - if you're continuously iterating over a set of Process
-        instances use process_iter() which pre-emptively checks
-        process identity for every yielded instance
-    """
-
-    def __init__(self, pid=None):
-        self._init(pid)
-
-    def _init(self, pid, _ignore_nsp=False):
-        if pid is None:
-            pid = os.getpid()
-        else:
-            if not _PY3 and not isinstance(pid, (int, long)):
-                raise TypeError('pid must be an integer (got %r)' % pid)
-            if pid < 0:
-                raise ValueError('pid must be a positive integer (got %s)'
-                                 % pid)
-        self._pid = pid
-        self._name = None
-        self._exe = None
-        self._create_time = None
-        self._gone = False
-        self._hash = None
-        # used for caching on Windows only (on POSIX ppid may change)
-        self._ppid = None
-        # platform-specific modules define an _psplatform.Process
-        # implementation class
-        self._proc = _psplatform.Process(pid)
-        self._last_sys_cpu_times = None
-        self._last_proc_cpu_times = None
-        # cache creation time for later use in is_running() method
-        try:
-            self.create_time()
-        except AccessDenied:
-            # we should never get here as AFAIK we're able to get
-            # process creation time on all platforms even as a
-            # limited user
-            pass
-        except NoSuchProcess:
-            if not _ignore_nsp:
-                msg = 'no process found with pid %s' % pid
-                raise NoSuchProcess(pid, None, msg)
-            else:
-                self._gone = True
-        # This pair is supposed to indentify a Process instance
-        # univocally over time (the PID alone is not enough as
-        # it might refer to a process whose PID has been reused).
-        # This will be used later in __eq__() and is_running().
-        self._ident = (self.pid, self._create_time)
-
-    def __str__(self):
-        try:
-            pid = self.pid
-            name = repr(self.name())
-        except NoSuchProcess:
-            details = "(pid=%s (terminated))" % self.pid
-        except AccessDenied:
-            details = "(pid=%s)" % (self.pid)
-        else:
-            details = "(pid=%s, name=%s)" % (pid, name)
-        return "%s.%s%s" % (self.__class__.__module__,
-                            self.__class__.__name__, details)
-
-    def __repr__(self):
-        return "<%s at %s>" % (self.__str__(), id(self))
-
-    def __eq__(self, other):
-        # Test for equality with another Process object based
-        # on PID and creation time.
-        if not isinstance(other, Process):
-            return NotImplemented
-        return self._ident == other._ident
-
-    def __ne__(self, other):
-        return not self == other
-
-    def __hash__(self):
-        if self._hash is None:
-            self._hash = hash(self._ident)
-        return self._hash
-
-    # --- utility methods
-
-    def as_dict(self, attrs=None, ad_value=None):
-        """Utility method returning process information as a
-        hashable dictionary.
-
-        If 'attrs' is specified it must be a list of strings
-        reflecting available Process class' attribute names
-        (e.g. ['cpu_times', 'name']) else all public (read
-        only) attributes are assumed.
-
-        'ad_value' is the value which gets assigned in case
-        AccessDenied  exception is raised when retrieving that
-        particular process information.
-        """
-        excluded_names = set(
-            ['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
-             'is_running', 'as_dict', 'parent', 'children', 'rlimit'])
-        retdict = dict()
-        ls = set(attrs or [x for x in dir(self) if not x.startswith('get')])
-        for name in ls:
-            if name.startswith('_'):
-                continue
-            if name.startswith('set_'):
-                continue
-            if name.startswith('get_'):
-                msg = "%s() is deprecated; use %s() instead" % (name, name[4:])
-                warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
-                name = name[4:]
-                if name in ls:
-                    continue
-            if name == 'getcwd':
-                msg = "getcwd() is deprecated; use cwd() instead"
-                warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
-                name = 'cwd'
-                if name in ls:
-                    continue
-
-            if name in excluded_names:
-                continue
-            try:
-                attr = getattr(self, name)
-                if callable(attr):
-                    ret = attr()
-                else:
-                    ret = attr
-            except AccessDenied:
-                ret = ad_value
-            except NotImplementedError:
-                # in case of not implemented functionality (may happen
-                # on old or exotic systems) we want to crash only if
-                # the user explicitly asked for that particular attr
-                if attrs:
-                    raise
-                continue
-            retdict[name] = ret
-        return retdict
-
-    def parent(self):
-        """Return the parent process as a Process object pre-emptively
-        checking whether PID has been reused.
-        If no parent is known return None.
-        """
-        ppid = self.ppid()
-        if ppid is not None:
-            try:
-                parent = Process(ppid)
-                if parent.create_time() <= self.create_time():
-                    return parent
-                # ...else ppid has been reused by another process
-            except NoSuchProcess:
-                pass
-
-    def is_running(self):
-        """Return whether this process is running.
-        It also checks if PID has been reused by another process in
-        which case return False.
-        """
-        if self._gone:
-            return False
-        try:
-            # Checking if PID is alive is not enough as the PID might
-            # have been reused by another process: we also want to
-            # check process identity.
-            # Process identity / uniqueness over time is greanted by
-            # (PID + creation time) and that is verified in __eq__.
-            return self == Process(self.pid)
-        except NoSuchProcess:
-            self._gone = True
-            return False
-
-    # --- actual API
-
-    @property
-    def pid(self):
-        """The process PID."""
-        return self._pid
-
-    def ppid(self):
-        """The process parent PID.
-        On Windows the return value is cached after first call.
-        """
-        # On POSIX we don't want to cache the ppid as it may unexpectedly
-        # change to 1 (init) in case this process turns into a zombie:
-        # https://github.com/giampaolo/psutil/issues/321
-        # http://stackoverflow.com/questions/356722/
-
-        # XXX should we check creation time here rather than in
-        # Process.parent()?
-        if _POSIX:
-            return self._proc.ppid()
-        else:
-            if self._ppid is None:
-                self._ppid = self._proc.ppid()
-            return self._ppid
-
-    def name(self):
-        """The process name. The return value is cached after first call."""
-        if self._name is None:
-            name = self._proc.name()
-            if _POSIX and len(name) >= 15:
-                # On UNIX the name gets truncated to the first 15 characters.
-                # If it matches the first part of the cmdline we return that
-                # one instead because it's usually more explicative.
-                # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
-                try:
-                    cmdline = self.cmdline()
-                except AccessDenied:
-                    pass
-                else:
-                    if cmdline:
-                        extended_name = os.path.basename(cmdline[0])
-                        if extended_name.startswith(name):
-                            name = extended_name
-            self._proc._name = name
-            self._name = name
-        return self._name
-
-    def exe(self):
-        """The process executable as an absolute path.
-        May also be an empty string.
-        The return value is cached after first call.
-        """
-        def guess_it(fallback):
-            # try to guess exe from cmdline[0] in absence of a native
-            # exe representation
-            cmdline = self.cmdline()
-            if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
-                exe = cmdline[0]  # the possible exe
-                # Attempt to guess only in case of an absolute path.
-                # It is not safe otherwise as the process might have
-                # changed cwd.
-                if (os.path.isabs(exe)
-                        and os.path.isfile(exe)
-                        and os.access(exe, os.X_OK)):
-                    return exe
-            if isinstance(fallback, AccessDenied):
-                raise fallback
-            return fallback
-
-        if self._exe is None:
-            try:
-                exe = self._proc.exe()
-            except AccessDenied as err:
-                return guess_it(fallback=err)
-            else:
-                if not exe:
-                    # underlying implementation can legitimately return an
-                    # empty string; if that's the case we don't want to
-                    # raise AD while guessing from the cmdline
-                    try:
-                        exe = guess_it(fallback=exe)
-                    except AccessDenied:
-                        pass
-                self._exe = exe
-        return self._exe
-
-    def cmdline(self):
-        """The command line this process has been called with."""
-        return self._proc.cmdline()
-
-    def status(self):
-        """The process current status as a STATUS_* constant."""
-        return self._proc.status()
-
-    def username(self):
-        """The name of the user that owns the process.
-        On UNIX this is calculated by using *real* process uid.
-        """
-        if _POSIX:
-            if pwd is None:
-                # might happen if python was installed from sources
-                raise ImportError(
-                    "requires pwd module shipped with standard python")
-            real_uid = self.uids().real
-            try:
-                return pwd.getpwuid(real_uid).pw_name
-            except KeyError:
-                # the uid can't be resolved by the system
-                return str(real_uid)
-        else:
-            return self._proc.username()
-
-    def create_time(self):
-        """The process creation time as a floating point number
-        expressed in seconds since the epoch, in UTC.
-        The return value is cached after first call.
-        """
-        if self._create_time is None:
-            self._create_time = self._proc.create_time()
-        return self._create_time
-
-    def cwd(self):
-        """Process current working directory as an absolute path."""
-        return self._proc.cwd()
-
-    def nice(self, value=None):
-        """Get or set process niceness (priority)."""
-        if value is None:
-            return self._proc.nice_get()
-        else:
-            if not self.is_running():
-                raise NoSuchProcess(self.pid, self._name)
-            self._proc.nice_set(value)
-
-    if _POSIX:
-
-        def uids(self):
-            """Return process UIDs as a (real, effective, saved)
-            namedtuple.
-            """
-            return self._proc.uids()
-
-        def gids(self):
-            """Return process GIDs as a (real, effective, saved)
-            namedtuple.
-            """
-            return self._proc.gids()
-
-        def terminal(self):
-            """The terminal associated with this process, if any,
-            else None.
-            """
-            return self._proc.terminal()
-
-        def num_fds(self):
-            """Return the number of file descriptors opened by this
-            process (POSIX only).
-            """
-            return self._proc.num_fds()
-
-    # Linux, BSD and Windows only
-    if hasattr(_psplatform.Process, "io_counters"):
-
-        def io_counters(self):
-            """Return process I/O statistics as a
-            (read_count, write_count, read_bytes, write_bytes)
-            namedtuple.
-            Those are the number of read/write calls performed and the
-            amount of bytes read and written by the process.
-            """
-            return self._proc.io_counters()
-
-    # Linux and Windows >= Vista only
-    if hasattr(_psplatform.Process, "ionice_get"):
-
-        def ionice(self, ioclass=None, value=None):
-            """Get or set process I/O niceness (priority).
-
-            On Linux 'ioclass' is one of the IOPRIO_CLASS_* constants.
-            'value' is a number which goes from 0 to 7. The higher the
-            value, the lower the I/O priority of the process.
-
-            On Windows only 'ioclass' is used and it can be set to 2
-            (normal), 1 (low) or 0 (very low).
-
-            Available on Linux and Windows > Vista only.
-            """
-            if ioclass is None:
-                if value is not None:
-                    raise ValueError("'ioclass' must be specified")
-                return self._proc.ionice_get()
-            else:
-                return self._proc.ionice_set(ioclass, value)
-
-    # Linux only
-    if hasattr(_psplatform.Process, "rlimit"):
-
-        def rlimit(self, resource, limits=None):
-            """Get or set process resource limits as a (soft, hard)
-            tuple.
-
-            'resource' is one of the RLIMIT_* constants.
-            'limits' is supposed to be a (soft, hard)  tuple.
-
-            See "man prlimit" for further info.
-            Available on Linux only.
-            """
-            if limits is None:
-                return self._proc.rlimit(resource)
-            else:
-                return self._proc.rlimit(resource, limits)
-
-    # Windows, Linux and BSD only
-    if hasattr(_psplatform.Process, "cpu_affinity_get"):
-
-        def cpu_affinity(self, cpus=None):
-            """Get or set process CPU affinity.
-            If specified 'cpus' must be a list of CPUs for which you
-            want to set the affinity (e.g. [0, 1]).
-            (Windows, Linux and BSD only).
-            """
-            if cpus is None:
-                return self._proc.cpu_affinity_get()
-            else:
-                self._proc.cpu_affinity_set(cpus)
-
-    if _WINDOWS:
-
-        def num_handles(self):
-            """Return the number of handles opened by this process
-            (Windows only).
-            """
-            return self._proc.num_handles()
-
-    def num_ctx_switches(self):
-        """Return the number of voluntary and involuntary context
-        switches performed by this process.
-        """
-        return self._proc.num_ctx_switches()
-
-    def num_threads(self):
-        """Return the number of threads used by this process."""
-        return self._proc.num_threads()
-
-    def threads(self):
-        """Return threads opened by process as a list of
-        (id, user_time, system_time) namedtuples representing
-        thread id and thread CPU times (user/system).
-        """
-        return self._proc.threads()
-
-    @_assert_pid_not_reused
-    def children(self, recursive=False):
-        """Return the children of this process as a list of Process
-        instances, pre-emptively checking whether PID has been reused.
-        If recursive is True return all the parent descendants.
-
-        Example (A == this process):
-
-         A ─┐
-            │
-            ├─ B (child) ─┐
-            │             └─ X (grandchild) ─┐
-            │                                └─ Y (great grandchild)
-            ├─ C (child)
-            └─ D (child)
-
-        >>> import psutil
-        >>> p = psutil.Process()
-        >>> p.children()
-        B, C, D
-        >>> p.children(recursive=True)
-        B, X, Y, C, D
-
-        Note that in the example above if process X disappears
-        process Y won't be listed as the reference to process A
-        is lost.
-        """
-        if hasattr(_psplatform, 'ppid_map'):
-            # Windows only: obtain a {pid:ppid, ...} dict for all running
-            # processes in one shot (faster).
-            ppid_map = _psplatform.ppid_map()
-        else:
-            ppid_map = None
-
-        ret = []
-        if not recursive:
-            if ppid_map is None:
-                # 'slow' version, common to all platforms except Windows
-                for p in process_iter():
-                    try:
-                        if p.ppid() == self.pid:
-                            # if child happens to be older than its parent
-                            # (self) it means child's PID has been reused
-                            if self.create_time() <= p.create_time():
-                                ret.append(p)
-                    except NoSuchProcess:
-                        pass
-            else:
-                # Windows only (faster)
-                for pid, ppid in ppid_map.items():
-                    if ppid == self.pid:
-                        try:
-                            child = Process(pid)
-                            # if child happens to be older than its parent
-                            # (self) it means child's PID has been reused
-                            if self.create_time() <= child.create_time():
-                                ret.append(child)
-                        except NoSuchProcess:
-                            pass
-        else:
-            # construct a dict where 'values' are all the processes
-            # having 'key' as their parent
-            table = collections.defaultdict(list)
-            if ppid_map is None:
-                for p in process_iter():
-                    try:
-                        table[p.ppid()].append(p)
-                    except NoSuchProcess:
-                        pass
-            else:
-                for pid, ppid in ppid_map.items():
-                    try:
-                        p = Process(pid)
-                        table[ppid].append(p)
-                    except NoSuchProcess:
-                        pass
-            # At this point we have a mapping table where table[self.pid]
-            # are the current process' children.
-            # Below, we look for all descendants recursively, similarly
-            # to a recursive function call.
-            checkpids = [self.pid]
-            for pid in checkpids:
-                for child in table[pid]:
-                    try:
-                        # if child happens to be older than its parent
-                        # (self) it means child's PID has been reused
-                        intime = self.create_time() <= child.create_time()
-                    except NoSuchProcess:
-                        pass
-                    else:
-                        if intime:
-                            ret.append(child)
-                            if child.pid not in checkpids:
-                                checkpids.append(child.pid)
-        return ret
-
-    def cpu_percent(self, interval=None):
-        """Return a float representing the current process CPU
-        utilization as a percentage.
-
-        When interval is 0.0 or None (default) compares process times
-        to system CPU times elapsed since last call, returning
-        immediately (non-blocking). That means that the first time
-        this is called it will return a meaningful 0.0 value.
-
-        When interval is > 0.0 compares process times to system CPU
-        times elapsed before and after the interval (blocking).
-
-        In this case is recommended for accuracy that this function
-        be called with at least 0.1 seconds between calls.
-
-        Examples:
-
-          >>> import psutil
-          >>> p = psutil.Process(os.getpid())
-          >>> # blocking
-          >>> p.cpu_percent(interval=1)
-          2.0
-          >>> # non-blocking (percentage since last call)
-          >>> p.cpu_percent(interval=None)
-          2.9
-          >>>
-        """
-        blocking = interval is not None and interval > 0.0
-        num_cpus = cpu_count()
-        if _POSIX:
-            timer = lambda: _timer() * num_cpus
-        else:
-            timer = lambda: sum(cpu_times())
-        if blocking:
-            st1 = timer()
-            pt1 = self._proc.cpu_times()
-            time.sleep(interval)
-            st2 = timer()
-            pt2 = self._proc.cpu_times()
-        else:
-            st1 = self._last_sys_cpu_times
-            pt1 = self._last_proc_cpu_times
-            st2 = timer()
-            pt2 = self._proc.cpu_times()
-            if st1 is None or pt1 is None:
-                self._last_sys_cpu_times = st2
-                self._last_proc_cpu_times = pt2
-                return 0.0
-
-        delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
-        delta_time = st2 - st1
-        # reset values for next call in case of interval == None
-        self._last_sys_cpu_times = st2
-        self._last_proc_cpu_times = pt2
-
-        try:
-            # The utilization split between all CPUs.
-            # Note: a percentage > 100 is legitimate as it can result
-            # from a process with multiple threads running on different
-            # CPU cores, see:
-            # http://stackoverflow.com/questions/1032357
-            # https://github.com/giampaolo/psutil/issues/474
-            overall_percent = ((delta_proc / delta_time) * 100) * num_cpus
-        except ZeroDivisionError:
-            # interval was too low
-            return 0.0
-        else:
-            return round(overall_percent, 1)
-
-    def cpu_times(self):
-        """Return a (user, system) namedtuple representing  the
-        accumulated process time, in seconds.
-        This is the same as os.times() but per-process.
-        """
-        return self._proc.cpu_times()
-
-    def memory_info(self):
-        """Return a tuple representing RSS (Resident Set Size) and VMS
-        (Virtual Memory Size) in bytes.
-
-        On UNIX RSS and VMS are the same values shown by 'ps'.
-
-        On Windows RSS and VMS refer to "Mem Usage" and "VM Size"
-        columns of taskmgr.exe.
-        """
-        return self._proc.memory_info()
-
-    def memory_info_ex(self):
-        """Return a namedtuple with variable fields depending on the
-        platform representing extended memory information about
-        this process. All numbers are expressed in bytes.
-        """
-        return self._proc.memory_info_ex()
-
-    def memory_percent(self):
-        """Compare physical system memory to process resident memory
-        (RSS) and calculate process memory utilization as a percentage.
-        """
-        rss = self._proc.memory_info()[0]
-        # use cached value if available
-        total_phymem = _TOTAL_PHYMEM or virtual_memory().total
-        try:
-            return (rss / float(total_phymem)) * 100
-        except ZeroDivisionError:
-            return 0.0
-
-    def memory_maps(self, grouped=True):
-        """Return process' mapped memory regions as a list of nameduples
-        whose fields are variable depending on the platform.
-
-        If 'grouped' is True the mapped regions with the same 'path'
-        are grouped together and the different memory fields are summed.
-
-        If 'grouped' is False every mapped region is shown as a single
-        entity and the namedtuple will also include the mapped region's
-        address space ('addr') and permission set ('perms').
-        """
-        it = self._proc.memory_maps()
-        if grouped:
-            d = {}
-            for tupl in it:
-                path = tupl[2]
-                nums = tupl[3:]
-                try:
-                    d[path] = map(lambda x, y: x + y, d[path], nums)
-                except KeyError:
-                    d[path] = nums
-            nt = _psplatform.pmmap_grouped
-            return [nt(path, *d[path]) for path in d]  # NOQA
-        else:
-            nt = _psplatform.pmmap_ext
-            return [nt(*x) for x in it]
-
-    def open_files(self):
-        """Return files opened by process as a list of
-        (path, fd) namedtuples including the absolute file name
-        and file descriptor number.
-        """
-        return self._proc.open_files()
-
-    def connections(self, kind='inet'):
-        """Return connections opened by process as a list of
-        (fd, family, type, laddr, raddr, status) namedtuples.
-        The 'kind' parameter filters for connections that match the
-        following criteria:
-
-        Kind Value      Connections using
-        inet            IPv4 and IPv6
-        inet4           IPv4
-        inet6           IPv6
-        tcp             TCP
-        tcp4            TCP over IPv4
-        tcp6            TCP over IPv6
-        udp             UDP
-        udp4            UDP over IPv4
-        udp6            UDP over IPv6
-        unix            UNIX socket (both UDP and TCP protocols)
-        all             the sum of all the possible families and protocols
-        """
-        return self._proc.connections(kind)
-
-    if _POSIX:
-        def _send_signal(self, sig):
-            # XXX: according to "man 2 kill" PID 0 has a special
-            # meaning as it refers to <<every process in the process
-            # group of the calling process>>, so should we prevent
-            # it here?
-            try:
-                os.kill(self.pid, sig)
-            except OSError as err:
-                if err.errno == errno.ESRCH:
-                    self._gone = True
-                    raise NoSuchProcess(self.pid, self._name)
-                if err.errno == errno.EPERM:
-                    raise AccessDenied(self.pid, self._name)
-                raise
-
-    @_assert_pid_not_reused
-    def send_signal(self, sig):
-        """Send a signal to process pre-emptively checking whether
-        PID has been reused (see signal module constants) .
-        On Windows only SIGTERM is valid and is treated as an alias
-        for kill().
-        """
-        if _POSIX:
-            self._send_signal(sig)
-        else:
-            if sig == signal.SIGTERM:
-                self._proc.kill()
-            else:
-                raise ValueError("only SIGTERM is supported on Windows")
-
-    @_assert_pid_not_reused
-    def suspend(self):
-        """Suspend process execution with SIGSTOP pre-emptively checking
-        whether PID has been reused.
-        On Windows this has the effect ot suspending all process threads.
-        """
-        if _POSIX:
-            self._send_signal(signal.SIGSTOP)
-        else:
-            self._proc.suspend()
-
-    @_assert_pid_not_reused
-    def resume(self):
-        """Resume process execution with SIGCONT pre-emptively checking
-        whether PID has been reused.
-        On Windows this has the effect of resuming all process threads.
-        """
-        if _POSIX:
-            self._send_signal(signal.SIGCONT)
-        else:
-            self._proc.resume()
-
-    @_assert_pid_not_reused
-    def terminate(self):
-        """Terminate the process with SIGTERM pre-emptively checking
-        whether PID has been reused.
-        On Windows this is an alias for kill().
-        """
-        if _POSIX:
-            self._send_signal(signal.SIGTERM)
-        else:
-            self._proc.kill()
-
-    @_assert_pid_not_reused
-    def kill(self):
-        """Kill the current process with SIGKILL pre-emptively checking
-        whether PID has been reused.
-        """
-        if _POSIX:
-            self._send_signal(signal.SIGKILL)
-        else:
-            self._proc.kill()
-
-    def wait(self, timeout=None):
-        """Wait for process to terminate and, if process is a children
-        of os.getpid(), also return its exit code, else None.
-
-        If the process is already terminated immediately return None
-        instead of raising NoSuchProcess.
-
-        If timeout (in seconds) is specified and process is still alive
-        raise TimeoutExpired.
-
-        To wait for multiple Process(es) use psutil.wait_procs().
-        """
-        if timeout is not None and not timeout >= 0:
-            raise ValueError("timeout must be a positive integer")
-        return self._proc.wait(timeout)
-
-    # --- deprecated APIs
-
-    _locals = set(locals())
-
-    @_deprecated_method(replacement='children')
-    def get_children(self):
-        pass
-
-    @_deprecated_method(replacement='connections')
-    def get_connections(self):
-        pass
-
-    if "cpu_affinity" in _locals:
-        @_deprecated_method(replacement='cpu_affinity')
-        def get_cpu_affinity(self):
-            pass
-
-        @_deprecated_method(replacement='cpu_affinity')
-        def set_cpu_affinity(self, cpus):
-            pass
-
-    @_deprecated_method(replacement='cpu_percent')
-    def get_cpu_percent(self):
-        pass
-
-    @_deprecated_method(replacement='cpu_times')
-    def get_cpu_times(self):
-        pass
-
-    @_deprecated_method(replacement='cwd')
-    def getcwd(self):
-        pass
-
-    @_deprecated_method(replacement='memory_info_ex')
-    def get_ext_memory_info(self):
-        pass
-
-    if "io_counters" in _locals:
-        @_deprecated_method(replacement='io_counters')
-        def get_io_counters(self):
-            pass
-
-    if "ionice" in _locals:
-        @_deprecated_method(replacement='ionice')
-        def get_ionice(self):
-            pass
-
-        @_deprecated_method(replacement='ionice')
-        def set_ionice(self, ioclass, value=None):
-            pass
-
-    @_deprecated_method(replacement='memory_info')
-    def get_memory_info(self):
-        pass
-
-    @_deprecated_method(replacement='memory_maps')
-    def get_memory_maps(self):
-        pass
-
-    @_deprecated_method(replacement='memory_percent')
-    def get_memory_percent(self):
-        pass
-
-    @_deprecated_method(replacement='nice')
-    def get_nice(self):
-        pass
-
-    @_deprecated_method(replacement='num_ctx_switches')
-    def get_num_ctx_switches(self):
-        pass
-
-    if 'num_fds' in _locals:
-        @_deprecated_method(replacement='num_fds')
-        def get_num_fds(self):
-            pass
-
-    if 'num_handles' in _locals:
-        @_deprecated_method(replacement='num_handles')
-        def get_num_handles(self):
-            pass
-
-    @_deprecated_method(replacement='num_threads')
-    def get_num_threads(self):
-        pass
-
-    @_deprecated_method(replacement='open_files')
-    def get_open_files(self):
-        pass
-
-    if "rlimit" in _locals:
-        @_deprecated_method(replacement='rlimit')
-        def get_rlimit(self):
-            pass
-
-        @_deprecated_method(replacement='rlimit')
-        def set_rlimit(self, resource, limits):
-            pass
-
-    @_deprecated_method(replacement='threads')
-    def get_threads(self):
-        pass
-
-    @_deprecated_method(replacement='nice')
-    def set_nice(self, value):
-        pass
-
-    del _locals
-
-
-# =====================================================================
-# --- Popen class
-# =====================================================================
-
-class Popen(Process):
-    """A more convenient interface to stdlib subprocess module.
-    It starts a sub process and deals with it exactly as when using
-    subprocess.Popen class but in addition also provides all the
-    properties and methods of psutil.Process class as a unified
-    interface:
-
-      >>> import psutil
-      >>> from subprocess import PIPE
-      >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
-      >>> p.name()
-      'python'
-      >>> p.uids()
-      user(real=1000, effective=1000, saved=1000)
-      >>> p.username()
-      'giampaolo'
-      >>> p.communicate()
-      ('hi\n', None)
-      >>> p.terminate()
-      >>> p.wait(timeout=2)
-      0
-      >>>
-
-    For method names common to both classes such as kill(), terminate()
-    and wait(), psutil.Process implementation takes precedence.
-
-    Unlike subprocess.Popen this class pre-emptively checks wheter PID
-    has been reused on send_signal(), terminate() and kill() so that
-    you don't accidentally terminate another process, fixing
-    http://bugs.python.org/issue6973.
-
-    For a complete documentation refer to:
-    http://docs.python.org/library/subprocess.html
-    """
-
-    def __init__(self, *args, **kwargs):
-        # Explicitly avoid to raise NoSuchProcess in case the process
-        # spawned by subprocess.Popen terminates too quickly, see:
-        # https://github.com/giampaolo/psutil/issues/193
-        self.__subproc = subprocess.Popen(*args, **kwargs)
-        self._init(self.__subproc.pid, _ignore_nsp=True)
-
-    def __dir__(self):
-        return sorted(set(dir(Popen) + dir(subprocess.Popen)))
-
-    def __getattribute__(self, name):
-        try:
-            return object.__getattribute__(self, name)
-        except AttributeError:
-            try:
-                return object.__getattribute__(self.__subproc, name)
-            except AttributeError:
-                raise AttributeError("%s instance has no attribute '%s'"
-                                     % (self.__class__.__name__, name))
-
-    def wait(self, timeout=None):
-        if self.__subproc.returncode is not None:
-            return self.__subproc.returncode
-        ret = super(Popen, self).wait(timeout)
-        self.__subproc.returncode = ret
-        return ret
-
-
-# =====================================================================
-# --- system processes related functions
-# =====================================================================
-
-def pids():
-    """Return a list of current running PIDs."""
-    return _psplatform.pids()
-
-
-def pid_exists(pid):
-    """Return True if given PID exists in the current process list.
-    This is faster than doing "pid in psutil.pids()" and
-    should be preferred.
-    """
-    if pid < 0:
-        return False
-    elif pid == 0 and _POSIX:
-        # On POSIX we use os.kill() to determine PID existence.
-        # According to "man 2 kill" PID 0 has a special meaning
-        # though: it refers to <<every process in the process
-        # group of the calling process>> and that is not we want
-        # to do here.
-        return pid in pids()
-    else:
-        return _psplatform.pid_exists(pid)
-
-
-_pmap = {}
-
-
-def process_iter():
-    """Return a generator yielding a Process instance for all
-    running processes.
-
-    Every new Process instance is only created once and then cached
-    into an internal table which is updated every time this is used.
-
-    Cached Process instances are checked for identity so that you're
-    safe in case a PID has been reused by another process, in which
-    case the cached instance is updated.
-
-    The sorting order in which processes are yielded is based on
-    their PIDs.
-    """
-    def add(pid):
-        proc = Process(pid)
-        _pmap[proc.pid] = proc
-        return proc
-
-    def remove(pid):
-        _pmap.pop(pid, None)
-
-    a = set(pids())
-    b = set(_pmap.keys())
-    new_pids = a - b
-    gone_pids = b - a
-
-    for pid in gone_pids:
-        remove(pid)
-    for pid, proc in sorted(list(_pmap.items()) +
-                            list(dict.fromkeys(new_pids).items())):
-        try:
-            if proc is None:  # new process
-                yield add(pid)
-            else:
-                # use is_running() to check whether PID has been reused by
-                # another process in which case yield a new Process instance
-                if proc.is_running():
-                    yield proc
-                else:
-                    yield add(pid)
-        except NoSuchProcess:
-            remove(pid)
-        except AccessDenied:
-            # Process creation time can't be determined hence there's
-            # no way to tell whether the pid of the cached process
-            # has been reused. Just return the cached version.
-            yield proc
-
-
-def wait_procs(procs, timeout=None, callback=None):
-    """Convenience function which waits for a list of processes to
-    terminate.
-
-    Return a (gone, alive) tuple indicating which processes
-    are gone and which ones are still alive.
-
-    The gone ones will have a new 'returncode' attribute indicating
-    process exit status (may be None).
-
-    'callback' is a function which gets called every time a process
-    terminates (a Process instance is passed as callback argument).
-
-    Function will return as soon as all processes terminate or when
-    timeout occurs.
-
-    Typical use case is:
-
-     - send SIGTERM to a list of processes
-     - give them some time to terminate
-     - send SIGKILL to those ones which are still alive
-
-    Example:
-
-    >>> def on_terminate(proc):
-    ...     print("process {} terminated".format(proc))
-    ...
-    >>> for p in procs:
-    ...    p.terminate()
-    ...
-    >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
-    >>> for p in alive:
-    ...     p.kill()
-    """
-    def check_gone(proc, timeout):
-        try:
-            returncode = proc.wait(timeout=timeout)
-        except TimeoutExpired:
-            pass
-        else:
-            if returncode is not None or not proc.is_running():
-                proc.returncode = returncode
-                gone.add(proc)
-                if callback is not None:
-                    callback(proc)
-
-    if timeout is not None and not timeout >= 0:
-        msg = "timeout must be a positive integer, got %s" % timeout
-        raise ValueError(msg)
-    gone = set()
-    alive = set(procs)
-    if callback is not None and not callable(callback):
-        raise TypeError("callback %r is not a callable" % callable)
-    if timeout is not None:
-        deadline = _timer() + timeout
-
-    while alive:
-        if timeout is not None and timeout <= 0:
-            break
-        for proc in alive:
-            # Make sure that every complete iteration (all processes)
-            # will last max 1 sec.
-            # We do this because we don't want to wait too long on a
-            # single process: in case it terminates too late other
-            # processes may disappear in the meantime and their PID
-            # reused.
-            max_timeout = 1.0 / len(alive)
-            if timeout is not None:
-                timeout = min((deadline - _timer()), max_timeout)
-                if timeout <= 0:
-                    break
-                check_gone(proc, timeout)
-            else:
-                check_gone(proc, max_timeout)
-        alive = alive - gone
-
-    if alive:
-        # Last attempt over processes survived so far.
-        # timeout == 0 won't make this function wait any further.
-        for proc in alive:
-            check_gone(proc, 0)
-        alive = alive - gone
-
-    return (list(gone), list(alive))
-
-
-# =====================================================================
-# --- CPU related functions
-# =====================================================================
-
-@memoize
-def cpu_count(logical=True):
-    """Return the number of logical CPUs in the system (same as
-    os.cpu_count() in Python 3.4).
-
-    If logical is False return the number of physical cores only
-    (hyper thread CPUs are excluded).
-
-    Return None if undetermined.
-
-    The return value is cached after first call.
-    If desired cache can be cleared like this:
-
-    >>> psutil.cpu_count.cache_clear()
-    """
-    if logical:
-        return _psplatform.cpu_count_logical()
-    else:
-        return _psplatform.cpu_count_physical()
-
-
-def cpu_times(percpu=False):
-    """Return system-wide CPU times as a namedtuple.
-    Every CPU time represents the seconds the CPU has spent in the given mode.
-    The namedtuple's fields availability varies depending on the platform:
-     - user
-     - system
-     - idle
-     - nice (UNIX)
-     - iowait (Linux)
-     - irq (Linux, FreeBSD)
-     - softirq (Linux)
-     - steal (Linux >= 2.6.11)
-     - guest (Linux >= 2.6.24)
-     - guest_nice (Linux >= 3.2.0)
-
-    When percpu is True return a list of nameduples for each CPU.
-    First element of the list refers to first CPU, second element
-    to second CPU and so on.
-    The order of the list is consistent across calls.
-    """
-    if not percpu:
-        return _psplatform.cpu_times()
-    else:
-        return _psplatform.per_cpu_times()
-
-
-_last_cpu_times = cpu_times()
-_last_per_cpu_times = cpu_times(percpu=True)
-
-
-def cpu_percent(interval=None, percpu=False):
-    """Return a float representing the current system-wide CPU
-    utilization as a percentage.
-
-    When interval is > 0.0 compares system CPU times elapsed before
-    and after the interval (blocking).
-
-    When interval is 0.0 or None compares system CPU times elapsed
-    since last call or module import, returning immediately (non
-    blocking). That means the first time this is called it will
-    return a meaningless 0.0 value which you should ignore.
-    In this case is recommended for accuracy that this function be
-    called with at least 0.1 seconds between calls.
-
-    When percpu is True returns a list of floats representing the
-    utilization as a percentage for each CPU.
-    First element of the list refers to first CPU, second element
-    to second CPU and so on.
-    The order of the list is consistent across calls.
-
-    Examples:
-
-      >>> # blocking, system-wide
-      >>> psutil.cpu_percent(interval=1)
-      2.0
-      >>>
-      >>> # blocking, per-cpu
-      >>> psutil.cpu_percent(interval=1, percpu=True)
-      [2.0, 1.0]
-      >>>
-      >>> # non-blocking (percentage since last call)
-      >>> psutil.cpu_percent(interval=None)
-      2.9
-      >>>
-    """
-    global _last_cpu_times
-    global _last_per_cpu_times
-    blocking = interval is not None and interval > 0.0
-
-    def calculate(t1, t2):
-        t1_all = sum(t1)
-        t1_busy = t1_all - t1.idle
-
-        t2_all = sum(t2)
-        t2_busy = t2_all - t2.idle
-
-        # this usually indicates a float precision issue
-        if t2_busy <= t1_busy:
-            return 0.0
-
-        busy_delta = t2_busy - t1_busy
-        all_delta = t2_all - t1_all
-        busy_perc = (busy_delta / all_delta) * 100
-        return round(busy_perc, 1)
-
-    # system-wide usage
-    if not percpu:
-        if blocking:
-            t1 = cpu_times()
-            time.sleep(interval)
-        else:
-            t1 = _last_cpu_times
-        _last_cpu_times = cpu_times()
-        return calculate(t1, _last_cpu_times)
-    # per-cpu usage
-    else:
-        ret = []
-        if blocking:
-            tot1 = cpu_times(percpu=True)
-            time.sleep(interval)
-        else:
-            tot1 = _last_per_cpu_times
-        _last_per_cpu_times = cpu_times(percpu=True)
-        for t1, t2 in zip(tot1, _last_per_cpu_times):
-            ret.append(calculate(t1, t2))
-        return ret
-
-
-# Use separate global vars for cpu_times_percent() so that it's
-# independent from cpu_percent() and they can both be used within
-# the same program.
-_last_cpu_times_2 = _last_cpu_times
-_last_per_cpu_times_2 = _last_per_cpu_times
-
-
-def cpu_times_percent(interval=None, percpu=False):
-    """Same as cpu_percent() but provides utilization percentages
-    for each specific CPU time as is returned by cpu_times().
-    For instance, on Linux we'll get:
-
-      >>> cpu_times_percent()
-      cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
-                 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
-      >>>
-
-    interval and percpu arguments have the same meaning as in
-    cpu_percent().
-    """
-    global _last_cpu_times_2
-    global _last_per_cpu_times_2
-    blocking = interval is not None and interval > 0.0
-
-    def calculate(t1, t2):
-        nums = []
-        all_delta = sum(t2) - sum(t1)
-        for field in t1._fields:
-            field_delta = getattr(t2, field) - getattr(t1, field)
-            try:
-                field_perc = (100 * field_delta) / all_delta
-            except ZeroDivisionError:
-                field_perc = 0.0
-            field_perc = round(field_perc, 1)
-            if _WINDOWS:
-                # XXX
-                # Work around:
-                # https://github.com/giampaolo/psutil/issues/392
-                # CPU times are always supposed to increase over time
-                # or at least remain the same and that's because time
-                # cannot go backwards.
-                # Surprisingly sometimes this might not be the case on
-                # Windows where 'system' CPU time can be smaller
-                # compared to the previous call, resulting in corrupted
-                # percentages (< 0 or > 100).
-                # I really don't know what to do about that except
-                # forcing the value to 0 or 100.
-                if field_perc > 100.0:
-                    field_perc = 100.0
-                elif field_perc < 0.0:
-                    field_perc = 0.0
-            nums.append(field_perc)
-        return _psplatform.scputimes(*nums)
-
-    # system-wide usage
-    if not percpu:
-        if blocking:
-            t1 = cpu_times()
-            time.sleep(interval)
-        else:
-            t1 = _last_cpu_times_2
-        _last_cpu_times_2 = cpu_times()
-        return calculate(t1, _last_cpu_times_2)
-    # per-cpu usage
-    else:
-        ret = []
-        if blocking:
-            tot1 = cpu_times(percpu=True)
-            time.sleep(interval)
-        else:
-            tot1 = _last_per_cpu_times_2
-        _last_per_cpu_times_2 = cpu_times(percpu=True)
-        for t1, t2 in zip(tot1, _last_per_cpu_times_2):
-            ret.append(calculate(t1, t2))
-        return ret
-
-
-# =====================================================================
-# --- system memory related functions
-# =====================================================================
-
-def virtual_memory():
-    """Return statistics about system memory usage as a namedtuple
-    including the following fields, expressed in bytes:
-
-     - total:
-       total physical memory available.
-
-     - available:
-       the actual amount of available memory that can be given
-       instantly to processes that request more memory in bytes; this
-       is calculated by summing different memory values depending on
-       the platform (e.g. free + buffers + cached on Linux) and it is
-       supposed to be used to monitor actual memory usage in a cross
-       platform fashion.
-
-     - percent:
-       the percentage usage calculated as (total - available) / total * 100
-
-     - used:
-       memory used, calculated differently depending on the platform and
-       designed for informational purposes only:
-        OSX: active + inactive + wired
-        BSD: active + wired + cached
-        LINUX: total - free
-
-     - free:
-       memory not being used at all (zeroed) that is readily available;
-       note that this doesn't reflect the actual memory available
-       (use 'available' instead)
-
-    Platform-specific fields:
-
-     - active (UNIX):
-       memory currently in use or very recently used, and so it is in RAM.
-
-     - inactive (UNIX):
-       memory that is marked as not used.
-
-     - buffers (BSD, Linux):
-       cache for things like file system metadata.
-
-     - cached (BSD, OSX):
-       cache for various things.
-
-     - wired (OSX, BSD):
-       memory that is marked to always stay in RAM. It is never moved to disk.
-
-     - shared (BSD):
-       memory that may be simultaneously accessed by multiple processes.
-
-    The sum of 'used' and 'available' does not necessarily equal total.
-    On Windows 'available' and 'free' are the same.
-    """
-    global _TOTAL_PHYMEM
-    ret = _psplatform.virtual_memory()
-    # cached for later use in Process.memory_percent()
-    _TOTAL_PHYMEM = ret.total
-    return ret
-
-
-def swap_memory():
-    """Return system swap memory statistics as a namedtuple including
-    the following fields:
-
-     - total:   total swap memory in bytes
-     - used:    used swap memory in bytes
-     - free:    free swap memory in bytes
-     - percent: the percentage usage
-     - sin:     no. of bytes the system has swapped in from disk (cumulative)
-     - sout:    no. of bytes the system has swapped out from disk (cumulative)
-
-    'sin' and 'sout' on Windows are meaningless and always set to 0.
-    """
-    return _psplatform.swap_memory()
-
-
-# =====================================================================
-# --- disks/paritions related functions
-# =====================================================================
-
-def disk_usage(path):
-    """Return disk usage statistics about the given path as a namedtuple
-    including total, used and free space expressed in bytes plus the
-    percentage usage.
-    """
-    return _psplatform.disk_usage(path)
-
-
-def disk_partitions(all=False):
-    """Return mounted partitions as a list of
-    (device, mountpoint, fstype, opts) namedtuple.
-    'opts' field is a raw string separated by commas indicating mount
-    options which may vary depending on the platform.
-
-    If "all" parameter is False return physical devices only and ignore
-    all others.
-    """
-    return _psplatform.disk_partitions(all)
-
-
-def disk_io_counters(perdisk=False):
-    """Return system disk I/O statistics as a namedtuple including
-    the following fields:
-
-     - read_count:  number of reads
-     - write_count: number of writes
-     - read_bytes:  number of bytes read
-     - write_bytes: number of bytes written
-     - read_time:   time spent reading from disk (in milliseconds)
-     - write_time:  time spent writing to disk (in milliseconds)
-
-    If perdisk is True return the same information for every
-    physical disk installed on the system as a dictionary
-    with partition names as the keys and the namedutuple
-    described above as the values.
-
-    On recent Windows versions 'diskperf -y' command may need to be
-    executed first otherwise this function won't find any disk.
-    """
-    rawdict = _psplatform.disk_io_counters()
-    if not rawdict:
-        raise RuntimeError("couldn't find any physical disk")
-    if perdisk:
-        for disk, fields in rawdict.items():
-            rawdict[disk] = _nt_sys_diskio(*fields)
-        return rawdict
-    else:
-        return _nt_sys_diskio(*[sum(x) for x in zip(*rawdict.values())])
-
-
-# =====================================================================
-# --- network related functions
-# =====================================================================
-
-def net_io_counters(pernic=False):
-    """Return network I/O statistics as a namedtuple including
-    the following fields:
-
-     - bytes_sent:   number of bytes sent
-     - bytes_recv:   number of bytes received
-     - packets_sent: number of packets sent
-     - packets_recv: number of packets received
-     - errin:        total number of errors while receiving
-     - errout:       total number of errors while sending
-     - dropin:       total number of incoming packets which were dropped
-     - dropout:      total number of outgoing packets which were dropped
-                     (always 0 on OSX and BSD)
-
-    If pernic is True return the same information for every
-    network interface installed on the system as a dictionary
-    with network interface names as the keys and the namedtuple
-    described above as the values.
-    """
-    rawdict = _psplatform.net_io_counters()
-    if not rawdict:
-        raise RuntimeError("couldn't find any network interface")
-    if pernic:
-        for nic, fields in rawdict.items():
-            rawdict[nic] = _nt_sys_netio(*fields)
-        return rawdict
-    else:
-        return _nt_sys_netio(*[sum(x) for x in zip(*rawdict.values())])
-
-
-def net_connections(kind='inet'):
-    """Return system-wide connections as a list of
-    (fd, family, type, laddr, raddr, status, pid) namedtuples.
-    In case of limited privileges 'fd' and 'pid' may be set to -1
-    and None respectively.
-    The 'kind' parameter filters for connections that fit the
-    following criteria:
-
-    Kind Value      Connections using
-    inet            IPv4 and IPv6
-    inet4           IPv4
-    inet6           IPv6
-    tcp             TCP
-    tcp4            TCP over IPv4
-    tcp6            TCP over IPv6
-    udp             UDP
-    udp4            UDP over IPv4
-    udp6            UDP over IPv6
-    unix            UNIX socket (both UDP and TCP protocols)
-    all             the sum of all the possible families and protocols
-    """
-    return _psplatform.net_connections(kind)
-
-
-# =====================================================================
-# --- other system related functions
-# =====================================================================
-
-
-def boot_time():
-    """Return the system boot time expressed in seconds since the epoch.
-    This is also available as psutil.BOOT_TIME.
-    """
-    # Note: we are not caching this because it is subject to
-    # system clock updates.
-    return _psplatform.boot_time()
-
-
-def users():
-    """Return users currently connected on the system as a list of
-    namedtuples including the following fields.
-
-     - user: the name of the user
-     - terminal: the tty or pseudo-tty associated with the user, if any.
-     - host: the host name associated with the entry, if any.
-     - started: the creation time as a floating point number expressed in
-       seconds since the epoch.
-    """
-    return _psplatform.users()
-
-
-# =====================================================================
-# --- deprecated functions
-# =====================================================================
-
-@_deprecated(replacement="psutil.pids()")
-def get_pid_list():
-    return pids()
-
-
-@_deprecated(replacement="list(process_iter())")
-def get_process_list():
-    return list(process_iter())
-
-
-@_deprecated(replacement="psutil.users()")
-def get_users():
-    return users()
-
-
-@_deprecated(replacement="psutil.virtual_memory()")
-def phymem_usage():
-    """Return the amount of total, used and free physical memory
-    on the system in bytes plus the percentage usage.
-    Deprecated; use psutil.virtual_memory() instead.
-    """
-    return virtual_memory()
-
-
-@_deprecated(replacement="psutil.swap_memory()")
-def virtmem_usage():
-    return swap_memory()
-
-
-@_deprecated(replacement="psutil.phymem_usage().free")
-def avail_phymem():
-    return phymem_usage().free
-
-
-@_deprecated(replacement="psutil.phymem_usage().used")
-def used_phymem():
-    return phymem_usage().used
-
-
-@_deprecated(replacement="psutil.virtmem_usage().total")
-def total_virtmem():
-    return virtmem_usage().total
-
-
-@_deprecated(replacement="psutil.virtmem_usage().used")
-def used_virtmem():
-    return virtmem_usage().used
-
-
-@_deprecated(replacement="psutil.virtmem_usage().free")
-def avail_virtmem():
-    return virtmem_usage().free
-
-
-@_deprecated(replacement="psutil.net_io_counters()")
-def network_io_counters(pernic=False):
-    return net_io_counters(pernic)
-
-
-def test():
-    """List info of all currently running processes emulating ps aux
-    output.
-    """
-    import datetime
-
-    today_day = datetime.date.today()
-    templ = "%-10s %5s %4s %4s %7s %7s %-13s %5s %7s  %s"
-    attrs = ['pid', 'cpu_percent', 'memory_percent', 'name', 'cpu_times',
-             'create_time', 'memory_info']
-    if _POSIX:
-        attrs.append('uids')
-        attrs.append('terminal')
-    print(templ % ("USER", "PID", "%CPU", "%MEM", "VSZ", "RSS", "TTY",
-                   "START", "TIME", "COMMAND"))
-    for p in process_iter():
-        try:
-            pinfo = p.as_dict(attrs, ad_value='')
-        except NoSuchProcess:
-            pass
-        else:
-            if pinfo['create_time']:
-                ctime = datetime.datetime.fromtimestamp(pinfo['create_time'])
-                if ctime.date() == today_day:
-                    ctime = ctime.strftime("%H:%M")
-                else:
-                    ctime = ctime.strftime("%b%d")
-            else:
-                ctime = ''
-            cputime = time.strftime("%M:%S",
-                                    time.localtime(sum(pinfo['cpu_times'])))
-            try:
-                user = p.username()
-            except KeyError:
-                if _POSIX:
-                    if pinfo['uids']:
-                        user = str(pinfo['uids'].real)
-                    else:
-                        user = ''
-                else:
-                    raise
-            except Error:
-                user = ''
-            if _WINDOWS and '\\' in user:
-                user = user.split('\\')[1]
-            vms = pinfo['memory_info'] and \
-                int(pinfo['memory_info'].vms / 1024) or '?'
-            rss = pinfo['memory_info'] and \
-                int(pinfo['memory_info'].rss / 1024) or '?'
-            memp = pinfo['memory_percent'] and \
-                round(pinfo['memory_percent'], 1) or '?'
-            print(templ % (
-                user[:10],
-                pinfo['pid'],
-                pinfo['cpu_percent'],
-                memp,
-                vms,
-                rss,
-                pinfo.get('terminal', '') or '?',
-                ctime,
-                cputime,
-                pinfo['name'].strip() or '?'))
-
-
-def _replace_module():
-    """Dirty hack to replace the module object in order to access
-    deprecated module constants, see:
-    http://www.dr-josiah.com/2013/12/properties-on-python-modules.html
-    """
-    class ModuleWrapper(object):
-
-        def __repr__(self):
-            return repr(self._module)
-        __str__ = __repr__
-
-        @property
-        def NUM_CPUS(self):
-            msg = "NUM_CPUS constant is deprecated; use cpu_count() instead"
-            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
-            return cpu_count()
-
-        @property
-        def BOOT_TIME(self):
-            msg = "BOOT_TIME constant is deprecated; use boot_time() instead"
-            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
-            return boot_time()
-
-        @property
-        def TOTAL_PHYMEM(self):
-            msg = "TOTAL_PHYMEM constant is deprecated; " \
-                  "use virtual_memory().total instead"
-            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
-            return virtual_memory().total
-
-    mod = ModuleWrapper()
-    mod.__dict__ = globals()
-    mod._module = sys.modules[__name__]
-    sys.modules[__name__] = mod
-
-
-_replace_module()
-del memoize, division, _replace_module
-if sys.version_info < (3, 0):
-    del num
-
-if __name__ == "__main__":
-    test()
diff --git a/io_scenario/libs/ubuntu_14_04_x64/psutil/_common.py b/io_scenario/libs/ubuntu_14_04_x64/psutil/_common.py
deleted file mode 100644
index 92d0fd0..0000000
--- a/io_scenario/libs/ubuntu_14_04_x64/psutil/_common.py
+++ /dev/null
@@ -1,256 +0,0 @@
-# /usr/bin/env python
-
-# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Common objects shared by all _ps* modules."""
-
-from __future__ import division
-import errno
-import functools
-import os
-import socket
-import stat
-import warnings
-try:
-    import threading
-except ImportError:
-    import dummy_threading as threading
-
-from collections import namedtuple
-from socket import AF_INET, SOCK_STREAM, SOCK_DGRAM
-
-# --- constants
-
-AF_INET6 = getattr(socket, 'AF_INET6', None)
-AF_UNIX = getattr(socket, 'AF_UNIX', None)
-
-STATUS_RUNNING = "running"
-STATUS_SLEEPING = "sleeping"
-STATUS_DISK_SLEEP = "disk-sleep"
-STATUS_STOPPED = "stopped"
-STATUS_TRACING_STOP = "tracing-stop"
-STATUS_ZOMBIE = "zombie"
-STATUS_DEAD = "dead"
-STATUS_WAKE_KILL = "wake-kill"
-STATUS_WAKING = "waking"
-STATUS_IDLE = "idle"  # BSD
-STATUS_LOCKED = "locked"  # BSD
-STATUS_WAITING = "waiting"  # BSD
-
-CONN_ESTABLISHED = "ESTABLISHED"
-CONN_SYN_SENT = "SYN_SENT"
-CONN_SYN_RECV = "SYN_RECV"
-CONN_FIN_WAIT1 = "FIN_WAIT1"
-CONN_FIN_WAIT2 = "FIN_WAIT2"
-CONN_TIME_WAIT = "TIME_WAIT"
-CONN_CLOSE = "CLOSE"
-CONN_CLOSE_WAIT = "CLOSE_WAIT"
-CONN_LAST_ACK = "LAST_ACK"
-CONN_LISTEN = "LISTEN"
-CONN_CLOSING = "CLOSING"
-CONN_NONE = "NONE"
-
-
-# --- functions
-
-def usage_percent(used, total, _round=None):
-    """Calculate percentage usage of 'used' against 'total'."""
-    try:
-        ret = (used / total) * 100
-    except ZeroDivisionError:
-        ret = 0
-    if _round is not None:
-        return round(ret, _round)
-    else:
-        return ret
-
-
-def memoize(fun):
-    """A simple memoize decorator for functions supporting (hashable)
-    positional arguments.
-    It also provides a cache_clear() function for clearing the cache:
-
-    >>> @memoize
-    ... def foo()
-    ...     return 1
-    ...
-    >>> foo()
-    1
-    >>> foo.cache_clear()
-    >>>
-    """
-    @functools.wraps(fun)
-    def wrapper(*args, **kwargs):
-        key = (args, frozenset(sorted(kwargs.items())))
-        lock.acquire()
-        try:
-            try:
-                return cache[key]
-            except KeyError:
-                ret = cache[key] = fun(*args, **kwargs)
-        finally:
-            lock.release()
-        return ret
-
-    def cache_clear():
-        """Clear cache."""
-        lock.acquire()
-        try:
-            cache.clear()
-        finally:
-            lock.release()
-
-    lock = threading.RLock()
-    cache = {}
-    wrapper.cache_clear = cache_clear
-    return wrapper
-
-
-# http://code.activestate.com/recipes/577819-deprecated-decorator/
-def deprecated(replacement=None):
-    """A decorator which can be used to mark functions as deprecated."""
-    def outer(fun):
-        msg = "psutil.%s is deprecated" % fun.__name__
-        if replacement is not None:
-            msg += "; use %s instead" % replacement
-        if fun.__doc__ is None:
-            fun.__doc__ = msg
-
-        @functools.wraps(fun)
-        def inner(*args, **kwargs):
-            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
-            return fun(*args, **kwargs)
-
-        return inner
-    return outer
-
-
-def deprecated_method(replacement):
-    """A decorator which can be used to mark a method as deprecated
-    'replcement' is the method name which will be called instead.
-    """
-    def outer(fun):
-        msg = "%s() is deprecated; use %s() instead" % (
-            fun.__name__, replacement)
-        if fun.__doc__ is None:
-            fun.__doc__ = msg
-
-        @functools.wraps(fun)
-        def inner(self, *args, **kwargs):
-            warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
-            return getattr(self, replacement)(*args, **kwargs)
-        return inner
-    return outer
-
-
-def isfile_strict(path):
-    """Same as os.path.isfile() but does not swallow EACCES / EPERM
-    exceptions, see:
-    http://mail.python.org/pipermail/python-dev/2012-June/120787.html
-    """
-    try:
-        st = os.stat(path)
-    except OSError as err:
-        if err.errno in (errno.EPERM, errno.EACCES):
-            raise
-        return False
-    else:
-        return stat.S_ISREG(st.st_mode)
-
-
-# --- Process.connections() 'kind' parameter mapping
-
-conn_tmap = {
-    "all": ([AF_INET, AF_INET6, AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
-    "tcp": ([AF_INET, AF_INET6], [SOCK_STREAM]),
-    "tcp4": ([AF_INET], [SOCK_STREAM]),
-    "udp": ([AF_INET, AF_INET6], [SOCK_DGRAM]),
-    "udp4": ([AF_INET], [SOCK_DGRAM]),
-    "inet": ([AF_INET, AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
-    "inet4": ([AF_INET], [SOCK_STREAM, SOCK_DGRAM]),
-    "inet6": ([AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
-}
-
-if AF_INET6 is not None:
-    conn_tmap.update({
-        "tcp6": ([AF_INET6], [SOCK_STREAM]),
-        "udp6": ([AF_INET6], [SOCK_DGRAM]),
-    })
-
-if AF_UNIX is not None:
-    conn_tmap.update({
-        "unix": ([AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
-    })
-
-del AF_INET, AF_INET6, AF_UNIX, SOCK_STREAM, SOCK_DGRAM, socket
-
-
-# --- namedtuples for psutil.* system-related functions
-
-# psutil.swap_memory()
-sswap = namedtuple('sswap', ['total', 'used', 'free', 'percent', 'sin',
-                             'sout'])
-# psutil.disk_usage()
-sdiskusage = namedtuple('sdiskusage', ['total', 'used', 'free', 'percent'])
-# psutil.disk_io_counters()
-sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
-                                 'read_bytes', 'write_bytes',
-                                 'read_time', 'write_time'])
-# psutil.disk_partitions()
-sdiskpart = namedtuple('sdiskpart', ['device', 'mountpoint', 'fstype', 'opts'])
-# psutil.net_io_counters()
-snetio = namedtuple('snetio', ['bytes_sent', 'bytes_recv',
-                               'packets_sent', 'packets_recv',
-                               'errin', 'errout',
-                               'dropin', 'dropout'])
-# psutil.users()
-suser = namedtuple('suser', ['name', 'terminal', 'host', 'started'])
-# psutil.net_connections()
-sconn = namedtuple('sconn', ['fd', 'family', 'type', 'laddr', 'raddr',
-                             'status', 'pid'])
-
-
-# --- namedtuples for psutil.Process methods
-
-# psutil.Process.memory_info()
-pmem = namedtuple('pmem', ['rss', 'vms'])
-# psutil.Process.cpu_times()
-pcputimes = namedtuple('pcputimes', ['user', 'system'])
-# psutil.Process.open_files()
-popenfile = namedtuple('popenfile', ['path', 'fd'])
-# psutil.Process.threads()
-pthread = namedtuple('pthread', ['id', 'user_time', 'system_time'])
-# psutil.Process.uids()
-puids = namedtuple('puids', ['real', 'effective', 'saved'])
-# psutil.Process.gids()
-pgids = namedtuple('pgids', ['real', 'effective', 'saved'])
-# psutil.Process.io_counters()
-pio = namedtuple('pio', ['read_count', 'write_count',
-                         'read_bytes', 'write_bytes'])
-# psutil.Process.ionice()
-pionice = namedtuple('pionice', ['ioclass', 'value'])
-# psutil.Process.ctx_switches()
-pctxsw = namedtuple('pctxsw', ['voluntary', 'involuntary'])
-
-
-# --- misc
-
-# backward compatibility layer for Process.connections() ntuple
-class pconn(
-    namedtuple('pconn',
-               ['fd', 'family', 'type', 'laddr', 'raddr', 'status'])):
-    __slots__ = ()
-
-    @property
-    def local_address(self):
-        warnings.warn("'local_address' field is deprecated; use 'laddr'"
-                      "instead", category=DeprecationWarning, stacklevel=2)
-        return self.laddr
-
-    @property
-    def remote_address(self):
-        warnings.warn("'remote_address' field is deprecated; use 'raddr'"
-                      "instead", category=DeprecationWarning, stacklevel=2)
-        return self.raddr
diff --git a/io_scenario/libs/ubuntu_14_04_x64/psutil/_compat.py b/io_scenario/libs/ubuntu_14_04_x64/psutil/_compat.py
deleted file mode 100644
index 84fd9ca..0000000
--- a/io_scenario/libs/ubuntu_14_04_x64/psutil/_compat.py
+++ /dev/null
@@ -1,203 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Module which provides compatibility with older Python versions."""
-
-__all__ = ["PY3", "int", "long", "xrange", "exec_", "callable", "lru_cache"]
-
-import collections
-import functools
-import sys
-try:
-    import __builtin__
-except ImportError:
-    import builtins as __builtin__  # py3
-
-PY3 = sys.version_info[0] == 3
-
-if PY3:
-    int = int
-    long = int
-    xrange = range
-    unicode = str
-    basestring = str
-    exec_ = getattr(__builtin__, "exec")
-else:
-    int = int
-    long = long
-    xrange = xrange
-    unicode = unicode
-    basestring = basestring
-
-    def exec_(code, globs=None, locs=None):
-        if globs is None:
-            frame = sys._getframe(1)
-            globs = frame.f_globals
-            if locs is None:
-                locs = frame.f_locals
-            del frame
-        elif locs is None:
-            locs = globs
-        exec("""exec code in globs, locs""")
-
-
-# removed in 3.0, reintroduced in 3.2
-try:
-    callable = callable
-except NameError:
-    def callable(obj):
-        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
-
-
-# --- stdlib additions
-
-
-# py 3.2 functools.lru_cache
-# Taken from: http://code.activestate.com/recipes/578078
-# Credit: Raymond Hettinger
-try:
-    from functools import lru_cache
-except ImportError:
-    try:
-        from threading import RLock
-    except ImportError:
-        from dummy_threading import RLock
-
-    _CacheInfo = collections.namedtuple(
-        "CacheInfo", ["hits", "misses", "maxsize", "currsize"])
-
-    class _HashedSeq(list):
-        __slots__ = 'hashvalue'
-
-        def __init__(self, tup, hash=hash):
-            self[:] = tup
-            self.hashvalue = hash(tup)
-
-        def __hash__(self):
-            return self.hashvalue
-
-    def _make_key(args, kwds, typed,
-                  kwd_mark=(object(), ),
-                  fasttypes=set((int, str, frozenset, type(None))),
-                  sorted=sorted, tuple=tuple, type=type, len=len):
-        key = args
-        if kwds:
-            sorted_items = sorted(kwds.items())
-            key += kwd_mark
-            for item in sorted_items:
-                key += item
-        if typed:
-            key += tuple(type(v) for v in args)
-            if kwds:
-                key += tuple(type(v) for k, v in sorted_items)
-        elif len(key) == 1 and type(key[0]) in fasttypes:
-            return key[0]
-        return _HashedSeq(key)
-
-    def lru_cache(maxsize=100, typed=False):
-        """Least-recently-used cache decorator, see:
-        http://docs.python.org/3/library/functools.html#functools.lru_cache
-        """
-        def decorating_function(user_function):
-            cache = dict()
-            stats = [0, 0]
-            HITS, MISSES = 0, 1
-            make_key = _make_key
-            cache_get = cache.get
-            _len = len
-            lock = RLock()
-            root = []
-            root[:] = [root, root, None, None]
-            nonlocal_root = [root]
-            PREV, NEXT, KEY, RESULT = 0, 1, 2, 3
-            if maxsize == 0:
-                def wrapper(*args, **kwds):
-                    result = user_function(*args, **kwds)
-                    stats[MISSES] += 1
-                    return result
-            elif maxsize is None:
-                def wrapper(*args, **kwds):
-                    key = make_key(args, kwds, typed)
-                    result = cache_get(key, root)
-                    if result is not root:
-                        stats[HITS] += 1
-                        return result
-                    result = user_function(*args, **kwds)
-                    cache[key] = result
-                    stats[MISSES] += 1
-                    return result
-            else:
-                def wrapper(*args, **kwds):
-                    if kwds or typed:
-                        key = make_key(args, kwds, typed)
-                    else:
-                        key = args
-                    lock.acquire()
-                    try:
-                        link = cache_get(key)
-                        if link is not None:
-                            root, = nonlocal_root
-                            link_prev, link_next, key, result = link
-                            link_prev[NEXT] = link_next
-                            link_next[PREV] = link_prev
-                            last = root[PREV]
-                            last[NEXT] = root[PREV] = link
-                            link[PREV] = last
-                            link[NEXT] = root
-                            stats[HITS] += 1
-                            return result
-                    finally:
-                        lock.release()
-                    result = user_function(*args, **kwds)
-                    lock.acquire()
-                    try:
-                        root, = nonlocal_root
-                        if key in cache:
-                            pass
-                        elif _len(cache) >= maxsize:
-                            oldroot = root
-                            oldroot[KEY] = key
-                            oldroot[RESULT] = result
-                            root = nonlocal_root[0] = oldroot[NEXT]
-                            oldkey = root[KEY]
-                            root[KEY] = root[RESULT] = None
-                            del cache[oldkey]
-                            cache[key] = oldroot
-                        else:
-                            last = root[PREV]
-                            link = [last, root, key, result]
-                            last[NEXT] = root[PREV] = cache[key] = link
-                        stats[MISSES] += 1
-                    finally:
-                        lock.release()
-                    return result
-
-            def cache_info():
-                """Report cache statistics"""
-                lock.acquire()
-                try:
-                    return _CacheInfo(stats[HITS], stats[MISSES], maxsize,
-                                      len(cache))
-                finally:
-                    lock.release()
-
-            def cache_clear():
-                """Clear the cache and cache statistics"""
-                lock.acquire()
-                try:
-                    cache.clear()
-                    root = nonlocal_root[0]
-                    root[:] = [root, root, None, None]
-                    stats[:] = [0, 0]
-                finally:
-                    lock.release()
-
-            wrapper.__wrapped__ = user_function
-            wrapper.cache_info = cache_info
-            wrapper.cache_clear = cache_clear
-            return functools.update_wrapper(wrapper, user_function)
-
-        return decorating_function
diff --git a/io_scenario/libs/ubuntu_14_04_x64/psutil/_psbsd.py b/io_scenario/libs/ubuntu_14_04_x64/psutil/_psbsd.py
deleted file mode 100644
index 87ae211..0000000
--- a/io_scenario/libs/ubuntu_14_04_x64/psutil/_psbsd.py
+++ /dev/null
@@ -1,410 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""FreeBSD platform implementation."""
-
-import errno
-import functools
-import os
-import sys
-from collections import namedtuple
-
-from psutil import _common
-from psutil import _psposix
-from psutil._common import conn_tmap, usage_percent
-import _psutil_bsd as cext
-import _psutil_posix
-
-
-__extra__all__ = []
-
-# --- constants
-
-PROC_STATUSES = {
-    cext.SSTOP: _common.STATUS_STOPPED,
-    cext.SSLEEP: _common.STATUS_SLEEPING,
-    cext.SRUN: _common.STATUS_RUNNING,
-    cext.SIDL: _common.STATUS_IDLE,
-    cext.SWAIT: _common.STATUS_WAITING,
-    cext.SLOCK: _common.STATUS_LOCKED,
-    cext.SZOMB: _common.STATUS_ZOMBIE,
-}
-
-TCP_STATUSES = {
-    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
-    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
-    cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,
-    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
-    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
-    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
-    cext.TCPS_CLOSED: _common.CONN_CLOSE,
-    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
-    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
-    cext.TCPS_LISTEN: _common.CONN_LISTEN,
-    cext.TCPS_CLOSING: _common.CONN_CLOSING,
-    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
-}
-
-PAGESIZE = os.sysconf("SC_PAGE_SIZE")
-
-# extend base mem ntuple with BSD-specific memory metrics
-svmem = namedtuple(
-    'svmem', ['total', 'available', 'percent', 'used', 'free',
-              'active', 'inactive', 'buffers', 'cached', 'shared', 'wired'])
-scputimes = namedtuple(
-    'scputimes', ['user', 'nice', 'system', 'idle', 'irq'])
-pextmem = namedtuple('pextmem', ['rss', 'vms', 'text', 'data', 'stack'])
-pmmap_grouped = namedtuple(
-    'pmmap_grouped', 'path rss, private, ref_count, shadow_count')
-pmmap_ext = namedtuple(
-    'pmmap_ext', 'addr, perms path rss, private, ref_count, shadow_count')
-
-# set later from __init__.py
-NoSuchProcess = None
-AccessDenied = None
-TimeoutExpired = None
-
-
-def virtual_memory():
-    """System virtual memory as a namedtuple."""
-    mem = cext.virtual_mem()
-    total, free, active, inactive, wired, cached, buffers, shared = mem
-    avail = inactive + cached + free
-    used = active + wired + cached
-    percent = usage_percent((total - avail), total, _round=1)
-    return svmem(total, avail, percent, used, free,
-                 active, inactive, buffers, cached, shared, wired)
-
-
-def swap_memory():
-    """System swap memory as (total, used, free, sin, sout) namedtuple."""
-    total, used, free, sin, sout = [x * PAGESIZE for x in cext.swap_mem()]
-    percent = usage_percent(used, total, _round=1)
-    return _common.sswap(total, used, free, percent, sin, sout)
-
-
-def cpu_times():
-    """Return system per-CPU times as a namedtuple"""
-    user, nice, system, idle, irq = cext.cpu_times()
-    return scputimes(user, nice, system, idle, irq)
-
-
-if hasattr(cext, "per_cpu_times"):
-    def per_cpu_times():
-        """Return system CPU times as a namedtuple"""
-        ret = []
-        for cpu_t in cext.per_cpu_times():
-            user, nice, system, idle, irq = cpu_t
-            item = scputimes(user, nice, system, idle, irq)
-            ret.append(item)
-        return ret
-else:
-    # XXX
-    # Ok, this is very dirty.
-    # On FreeBSD < 8 we cannot gather per-cpu information, see:
-    # https://github.com/giampaolo/psutil/issues/226
-    # If num cpus > 1, on first call we return single cpu times to avoid a
-    # crash at psutil import time.
-    # Next calls will fail with NotImplementedError
-    def per_cpu_times():
-        if cpu_count_logical() == 1:
-            return [cpu_times()]
-        if per_cpu_times.__called__:
-            raise NotImplementedError("supported only starting from FreeBSD 8")
-        per_cpu_times.__called__ = True
-        return [cpu_times()]
-
-    per_cpu_times.__called__ = False
-
-
-def cpu_count_logical():
-    """Return the number of logical CPUs in the system."""
-    return cext.cpu_count_logical()
-
-
-def cpu_count_physical():
-    """Return the number of physical CPUs in the system."""
-    # From the C module we'll get an XML string similar to this:
-    # http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html
-    # We may get None in case "sysctl kern.sched.topology_spec"
-    # is not supported on this BSD version, in which case we'll mimic
-    # os.cpu_count() and return None.
-    s = cext.cpu_count_phys()
-    if s is not None:
-        # get rid of padding chars appended at the end of the string
-        index = s.rfind("</groups>")
-        if index != -1:
-            s = s[:index + 9]
-            if sys.version_info >= (2, 5):
-                import xml.etree.ElementTree as ET
-                root = ET.fromstring(s)
-                return len(root.findall('group/children/group/cpu')) or None
-            else:
-                s = s[s.find('<children>'):]
-                return s.count("<cpu") or None
-
-
-def boot_time():
-    """The system boot time expressed in seconds since the epoch."""
-    return cext.boot_time()
-
-
-def disk_partitions(all=False):
-    retlist = []
-    partitions = cext.disk_partitions()
-    for partition in partitions:
-        device, mountpoint, fstype, opts = partition
-        if device == 'none':
-            device = ''
-        if not all:
-            if not os.path.isabs(device) or not os.path.exists(device):
-                continue
-        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
-        retlist.append(ntuple)
-    return retlist
-
-
-def users():
-    retlist = []
-    rawlist = cext.users()
-    for item in rawlist:
-        user, tty, hostname, tstamp = item
-        if tty == '~':
-            continue  # reboot or shutdown
-        nt = _common.suser(user, tty or None, hostname, tstamp)
-        retlist.append(nt)
-    return retlist
-
-
-def net_connections(kind):
-    if kind not in _common.conn_tmap:
-        raise ValueError("invalid %r kind argument; choose between %s"
-                         % (kind, ', '.join([repr(x) for x in conn_tmap])))
-    families, types = conn_tmap[kind]
-    ret = []
-    rawlist = cext.net_connections()
-    for item in rawlist:
-        fd, fam, type, laddr, raddr, status, pid = item
-        # TODO: apply filter at C level
-        if fam in families and type in types:
-            status = TCP_STATUSES[status]
-            nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)
-            ret.append(nt)
-    return ret
-
-
-pids = cext.pids
-pid_exists = _psposix.pid_exists
-disk_usage = _psposix.disk_usage
-net_io_counters = cext.net_io_counters
-disk_io_counters = cext.disk_io_counters
-
-
-def wrap_exceptions(fun):
-    """Decorator which translates bare OSError exceptions into
-    NoSuchProcess and AccessDenied.
-    """
-    @functools.wraps(fun)
-    def wrapper(self, *args, **kwargs):
-        try:
-            return fun(self, *args, **kwargs)
-        except OSError as err:
-            # support for private module import
-            if NoSuchProcess is None or AccessDenied is None:
-                raise
-            if err.errno == errno.ESRCH:
-                raise NoSuchProcess(self.pid, self._name)
-            if err.errno in (errno.EPERM, errno.EACCES):
-                raise AccessDenied(self.pid, self._name)
-            raise
-    return wrapper
-
-
-class Process(object):
-    """Wrapper class around underlying C implementation."""
-
-    __slots__ = ["pid", "_name"]
-
-    def __init__(self, pid):
-        self.pid = pid
-        self._name = None
-
-    @wrap_exceptions
-    def name(self):
-        return cext.proc_name(self.pid)
-
-    @wrap_exceptions
-    def exe(self):
-        return cext.proc_exe(self.pid)
-
-    @wrap_exceptions
-    def cmdline(self):
-        return cext.proc_cmdline(self.pid)
-
-    @wrap_exceptions
-    def terminal(self):
-        tty_nr = cext.proc_tty_nr(self.pid)
-        tmap = _psposix._get_terminal_map()
-        try:
-            return tmap[tty_nr]
-        except KeyError:
-            return None
-
-    @wrap_exceptions
-    def ppid(self):
-        return cext.proc_ppid(self.pid)
-
-    @wrap_exceptions
-    def uids(self):
-        real, effective, saved = cext.proc_uids(self.pid)
-        return _common.puids(real, effective, saved)
-
-    @wrap_exceptions
-    def gids(self):
-        real, effective, saved = cext.proc_gids(self.pid)
-        return _common.pgids(real, effective, saved)
-
-    @wrap_exceptions
-    def cpu_times(self):
-        user, system = cext.proc_cpu_times(self.pid)
-        return _common.pcputimes(user, system)
-
-    @wrap_exceptions
-    def memory_info(self):
-        rss, vms = cext.proc_memory_info(self.pid)[:2]
-        return _common.pmem(rss, vms)
-
-    @wrap_exceptions
-    def memory_info_ex(self):
-        return pextmem(*cext.proc_memory_info(self.pid))
-
-    @wrap_exceptions
-    def create_time(self):
-        return cext.proc_create_time(self.pid)
-
-    @wrap_exceptions
-    def num_threads(self):
-        return cext.proc_num_threads(self.pid)
-
-    @wrap_exceptions
-    def num_ctx_switches(self):
-        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
-
-    @wrap_exceptions
-    def threads(self):
-        rawlist = cext.proc_threads(self.pid)
-        retlist = []
-        for thread_id, utime, stime in rawlist:
-            ntuple = _common.pthread(thread_id, utime, stime)
-            retlist.append(ntuple)
-        return retlist
-
-    @wrap_exceptions
-    def connections(self, kind='inet'):
-        if kind not in conn_tmap:
-            raise ValueError("invalid %r kind argument; choose between %s"
-                             % (kind, ', '.join([repr(x) for x in conn_tmap])))
-        families, types = conn_tmap[kind]
-        rawlist = cext.proc_connections(self.pid, families, types)
-        ret = []
-        for item in rawlist:
-            fd, fam, type, laddr, raddr, status = item
-            status = TCP_STATUSES[status]
-            nt = _common.pconn(fd, fam, type, laddr, raddr, status)
-            ret.append(nt)
-        return ret
-
-    @wrap_exceptions
-    def wait(self, timeout=None):
-        try:
-            return _psposix.wait_pid(self.pid, timeout)
-        except _psposix.TimeoutExpired:
-            # support for private module import
-            if TimeoutExpired is None:
-                raise
-            raise TimeoutExpired(timeout, self.pid, self._name)
-
-    @wrap_exceptions
-    def nice_get(self):
-        return _psutil_posix.getpriority(self.pid)
-
-    @wrap_exceptions
-    def nice_set(self, value):
-        return _psutil_posix.setpriority(self.pid, value)
-
-    @wrap_exceptions
-    def status(self):
-        code = cext.proc_status(self.pid)
-        if code in PROC_STATUSES:
-            return PROC_STATUSES[code]
-        # XXX is this legit? will we even ever get here?
-        return "?"
-
-    @wrap_exceptions
-    def io_counters(self):
-        rc, wc, rb, wb = cext.proc_io_counters(self.pid)
-        return _common.pio(rc, wc, rb, wb)
-
-    nt_mmap_grouped = namedtuple(
-        'mmap', 'path rss, private, ref_count, shadow_count')
-    nt_mmap_ext = namedtuple(
-        'mmap', 'addr, perms path rss, private, ref_count, shadow_count')
-
-    # FreeBSD < 8 does not support functions based on kinfo_getfile()
-    # and kinfo_getvmmap()
-    if hasattr(cext, 'proc_open_files'):
-
-        @wrap_exceptions
-        def open_files(self):
-            """Return files opened by process as a list of namedtuples."""
-            rawlist = cext.proc_open_files(self.pid)
-            return [_common.popenfile(path, fd) for path, fd in rawlist]
-
-        @wrap_exceptions
-        def cwd(self):
-            """Return process current working directory."""
-            # sometimes we get an empty string, in which case we turn
-            # it into None
-            return cext.proc_cwd(self.pid) or None
-
-        @wrap_exceptions
-        def memory_maps(self):
-            return cext.proc_memory_maps(self.pid)
-
-        @wrap_exceptions
-        def num_fds(self):
-            """Return the number of file descriptors opened by this process."""
-            return cext.proc_num_fds(self.pid)
-
-    else:
-        def _not_implemented(self):
-            raise NotImplementedError("supported only starting from FreeBSD 8")
-
-        open_files = _not_implemented
-        proc_cwd = _not_implemented
-        memory_maps = _not_implemented
-        num_fds = _not_implemented
-
-    @wrap_exceptions
-    def cpu_affinity_get(self):
-        return cext.proc_cpu_affinity_get(self.pid)
-
-    @wrap_exceptions
-    def cpu_affinity_set(self, cpus):
-        try:
-            cext.proc_cpu_affinity_set(self.pid, cpus)
-        except OSError as err:
-            # 'man cpuset_setaffinity' about EDEADLK:
-            # <<the call would leave a thread without a valid CPU to run
-            # on because the set does not overlap with the thread's
-            # anonymous mask>>
-            if err.errno in (errno.EINVAL, errno.EDEADLK):
-                allcpus = tuple(range(len(per_cpu_times())))
-                for cpu in cpus:
-                    if cpu not in allcpus:
-                        raise ValueError("invalid CPU #%i (choose between %s)"
-                                         % (cpu, allcpus))
-            raise
diff --git a/io_scenario/libs/ubuntu_14_04_x64/psutil/_pslinux.py b/io_scenario/libs/ubuntu_14_04_x64/psutil/_pslinux.py
deleted file mode 100644
index 6bde29e..0000000
--- a/io_scenario/libs/ubuntu_14_04_x64/psutil/_pslinux.py
+++ /dev/null
@@ -1,1096 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Linux platform implementation."""
-
-from __future__ import division
-
-import base64
-import errno
-import functools
-import os
-import re
-import socket
-import struct
-import sys
-import warnings
-from collections import namedtuple, defaultdict
-
-from psutil import _common
-from psutil import _psposix
-from psutil._common import (isfile_strict, usage_percent, deprecated)
-from psutil._compat import PY3
-import _psutil_linux as cext
-import _psutil_posix
-
-
-__extra__all__ = [
-    # io prio constants
-    "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
-    "IOPRIO_CLASS_IDLE",
-    # connection status constants
-    "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
-    "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
-    "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING",
-    # other
-    "phymem_buffers", "cached_phymem"]
-
-
-# --- constants
-
-HAS_PRLIMIT = hasattr(cext, "linux_prlimit")
-
-# RLIMIT_* constants, not guaranteed to be present on all kernels
-if HAS_PRLIMIT:
-    for name in dir(cext):
-        if name.startswith('RLIM'):
-            __extra__all__.append(name)
-
-# Number of clock ticks per second
-CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
-PAGESIZE = os.sysconf("SC_PAGE_SIZE")
-BOOT_TIME = None  # set later
-DEFAULT_ENCODING = sys.getdefaultencoding()
-
-# ioprio_* constants http://linux.die.net/man/2/ioprio_get
-IOPRIO_CLASS_NONE = 0
-IOPRIO_CLASS_RT = 1
-IOPRIO_CLASS_BE = 2
-IOPRIO_CLASS_IDLE = 3
-
-# taken from /fs/proc/array.c
-PROC_STATUSES = {
-    "R": _common.STATUS_RUNNING,
-    "S": _common.STATUS_SLEEPING,
-    "D": _common.STATUS_DISK_SLEEP,
-    "T": _common.STATUS_STOPPED,
-    "t": _common.STATUS_TRACING_STOP,
-    "Z": _common.STATUS_ZOMBIE,
-    "X": _common.STATUS_DEAD,
-    "x": _common.STATUS_DEAD,
-    "K": _common.STATUS_WAKE_KILL,
-    "W": _common.STATUS_WAKING
-}
-
-# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
-TCP_STATUSES = {
-    "01": _common.CONN_ESTABLISHED,
-    "02": _common.CONN_SYN_SENT,
-    "03": _common.CONN_SYN_RECV,
-    "04": _common.CONN_FIN_WAIT1,
-    "05": _common.CONN_FIN_WAIT2,
-    "06": _common.CONN_TIME_WAIT,
-    "07": _common.CONN_CLOSE,
-    "08": _common.CONN_CLOSE_WAIT,
-    "09": _common.CONN_LAST_ACK,
-    "0A": _common.CONN_LISTEN,
-    "0B": _common.CONN_CLOSING
-}
-
-# set later from __init__.py
-NoSuchProcess = None
-AccessDenied = None
-TimeoutExpired = None
-
-
-# --- named tuples
-
-def _get_cputimes_fields():
-    """Return a namedtuple of variable fields depending on the
-    CPU times available on this Linux kernel version which may be:
-    (user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
-     [guest_nice]]])
-    """
-    with open('/proc/stat', 'rb') as f:
-        values = f.readline().split()[1:]
-    fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
-    vlen = len(values)
-    if vlen >= 8:
-        # Linux >= 2.6.11
-        fields.append('steal')
-    if vlen >= 9:
-        # Linux >= 2.6.24
-        fields.append('guest')
-    if vlen >= 10:
-        # Linux >= 3.2.0
-        fields.append('guest_nice')
-    return fields
-
-
-scputimes = namedtuple('scputimes', _get_cputimes_fields())
-
-svmem = namedtuple(
-    'svmem', ['total', 'available', 'percent', 'used', 'free',
-              'active', 'inactive', 'buffers', 'cached'])
-
-pextmem = namedtuple('pextmem', 'rss vms shared text lib data dirty')
-
-pmmap_grouped = namedtuple(
-    'pmmap_grouped', ['path', 'rss', 'size', 'pss', 'shared_clean',
-                      'shared_dirty', 'private_clean', 'private_dirty',
-                      'referenced', 'anonymous', 'swap'])
-
-pmmap_ext = namedtuple(
-    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
-
-
-# --- system memory
-
-def virtual_memory():
-    total, free, buffers, shared, _, _ = cext.linux_sysinfo()
-    cached = active = inactive = None
-    with open('/proc/meminfo', 'rb') as f:
-        for line in f:
-            if line.startswith(b"Cached:"):
-                cached = int(line.split()[1]) * 1024
-            elif line.startswith(b"Active:"):
-                active = int(line.split()[1]) * 1024
-            elif line.startswith(b"Inactive:"):
-                inactive = int(line.split()[1]) * 1024
-            if (cached is not None
-                    and active is not None
-                    and inactive is not None):
-                break
-        else:
-            # we might get here when dealing with exotic Linux flavors, see:
-            # https://github.com/giampaolo/psutil/issues/313
-            msg = "'cached', 'active' and 'inactive' memory stats couldn't " \
-                  "be determined and were set to 0"
-            warnings.warn(msg, RuntimeWarning)
-            cached = active = inactive = 0
-    avail = free + buffers + cached
-    used = total - free
-    percent = usage_percent((total - avail), total, _round=1)
-    return svmem(total, avail, percent, used, free,
-                 active, inactive, buffers, cached)
-
-
-def swap_memory():
-    _, _, _, _, total, free = cext.linux_sysinfo()
-    used = total - free
-    percent = usage_percent(used, total, _round=1)
-    # get pgin/pgouts
-    with open("/proc/vmstat", "rb") as f:
-        sin = sout = None
-        for line in f:
-            # values are expressed in 4 kilo bytes, we want bytes instead
-            if line.startswith(b'pswpin'):
-                sin = int(line.split(b' ')[1]) * 4 * 1024
-            elif line.startswith(b'pswpout'):
-                sout = int(line.split(b' ')[1]) * 4 * 1024
-            if sin is not None and sout is not None:
-                break
-        else:
-            # we might get here when dealing with exotic Linux flavors, see:
-            # https://github.com/giampaolo/psutil/issues/313
-            msg = "'sin' and 'sout' swap memory stats couldn't " \
-                  "be determined and were set to 0"
-            warnings.warn(msg, RuntimeWarning)
-            sin = sout = 0
-    return _common.sswap(total, used, free, percent, sin, sout)
-
-
-@deprecated(replacement='psutil.virtual_memory().cached')
-def cached_phymem():
-    return virtual_memory().cached
-
-
-@deprecated(replacement='psutil.virtual_memory().buffers')
-def phymem_buffers():
-    return virtual_memory().buffers
-
-
-# --- CPUs
-
-def cpu_times():
-    """Return a named tuple representing the following system-wide
-    CPU times:
-    (user, nice, system, idle, iowait, irq, softirq [steal, [guest,
-     [guest_nice]]])
-    Last 3 fields may not be available on all Linux kernel versions.
-    """
-    with open('/proc/stat', 'rb') as f:
-        values = f.readline().split()
-    fields = values[1:len(scputimes._fields) + 1]
-    fields = [float(x) / CLOCK_TICKS for x in fields]
-    return scputimes(*fields)
-
-
-def per_cpu_times():
-    """Return a list of namedtuple representing the CPU times
-    for every CPU available on the system.
-    """
-    cpus = []
-    with open('/proc/stat', 'rb') as f:
-        # get rid of the first line which refers to system wide CPU stats
-        f.readline()
-        for line in f:
-            if line.startswith(b'cpu'):
-                values = line.split()
-                fields = values[1:len(scputimes._fields) + 1]
-                fields = [float(x) / CLOCK_TICKS for x in fields]
-                entry = scputimes(*fields)
-                cpus.append(entry)
-        return cpus
-
-
-def cpu_count_logical():
-    """Return the number of logical CPUs in the system."""
-    try:
-        return os.sysconf("SC_NPROCESSORS_ONLN")
-    except ValueError:
-        # as a second fallback we try to parse /proc/cpuinfo
-        num = 0
-        with open('/proc/cpuinfo', 'rb') as f:
-            for line in f:
-                if line.lower().startswith(b'processor'):
-                    num += 1
-
-        # unknown format (e.g. amrel/sparc architectures), see:
-        # https://github.com/giampaolo/psutil/issues/200
-        # try to parse /proc/stat as a last resort
-        if num == 0:
-            search = re.compile('cpu\d')
-            with open('/proc/stat', 'rt') as f:
-                for line in f:
-                    line = line.split(' ')[0]
-                    if search.match(line):
-                        num += 1
-
-        if num == 0:
-            # mimic os.cpu_count()
-            return None
-        return num
-
-
-def cpu_count_physical():
-    """Return the number of physical CPUs in the system."""
-    with open('/proc/cpuinfo', 'rb') as f:
-        found = set()
-        for line in f:
-            if line.lower().startswith(b'physical id'):
-                found.add(line.strip())
-    # mimic os.cpu_count()
-    return len(found) if found else None
-
-
-# --- other system functions
-
-def users():
-    """Return currently connected users as a list of namedtuples."""
-    retlist = []
-    rawlist = cext.users()
-    for item in rawlist:
-        user, tty, hostname, tstamp, user_process = item
-        # note: the underlying C function includes entries about
-        # system boot, run level and others.  We might want
-        # to use them in the future.
-        if not user_process:
-            continue
-        if hostname == ':0.0':
-            hostname = 'localhost'
-        nt = _common.suser(user, tty or None, hostname, tstamp)
-        retlist.append(nt)
-    return retlist
-
-
-def boot_time():
-    """Return the system boot time expressed in seconds since the epoch."""
-    global BOOT_TIME
-    with open('/proc/stat', 'rb') as f:
-        for line in f:
-            if line.startswith(b'btime'):
-                ret = float(line.strip().split()[1])
-                BOOT_TIME = ret
-                return ret
-        raise RuntimeError("line 'btime' not found")
-
-
-# --- processes
-
-def pids():
-    """Returns a list of PIDs currently running on the system."""
-    return [int(x) for x in os.listdir(b'/proc') if x.isdigit()]
-
-
-def pid_exists(pid):
-    """Check For the existence of a unix pid."""
-    return _psposix.pid_exists(pid)
-
-
-# --- network
-
-class Connections:
-    """A wrapper on top of /proc/net/* files, retrieving per-process
-    and system-wide open connections (TCP, UDP, UNIX) similarly to
-    "netstat -an".
-
-    Note: in case of UNIX sockets we're only able to determine the
-    local endpoint/path, not the one it's connected to.
-    According to [1] it would be possible but not easily.
-
-    [1] http://serverfault.com/a/417946
-    """
-
-    def __init__(self):
-        tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM)
-        tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
-        udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM)
-        udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
-        unix = ("unix", socket.AF_UNIX, None)
-        self.tmap = {
-            "all": (tcp4, tcp6, udp4, udp6, unix),
-            "tcp": (tcp4, tcp6),
-            "tcp4": (tcp4,),
-            "tcp6": (tcp6,),
-            "udp": (udp4, udp6),
-            "udp4": (udp4,),
-            "udp6": (udp6,),
-            "unix": (unix,),
-            "inet": (tcp4, tcp6, udp4, udp6),
-            "inet4": (tcp4, udp4),
-            "inet6": (tcp6, udp6),
-        }
-
-    def get_proc_inodes(self, pid):
-        inodes = defaultdict(list)
-        for fd in os.listdir("/proc/%s/fd" % pid):
-            try:
-                inode = os.readlink("/proc/%s/fd/%s" % (pid, fd))
-            except OSError:
-                # TODO: need comment here
-                continue
-            else:
-                if inode.startswith('socket:['):
-                    # the process is using a socket
-                    inode = inode[8:][:-1]
-                    inodes[inode].append((pid, int(fd)))
-        return inodes
-
-    def get_all_inodes(self):
-        inodes = {}
-        for pid in pids():
-            try:
-                inodes.update(self.get_proc_inodes(pid))
-            except OSError as err:
-                # os.listdir() is gonna raise a lot of access denied
-                # exceptions in case of unprivileged user; that's fine
-                # as we'll just end up returning a connection with PID
-                # and fd set to None anyway.
-                # Both netstat -an and lsof does the same so it's
-                # unlikely we can do any better.
-                # ENOENT just means a PID disappeared on us.
-                if err.errno not in (
-                        errno.ENOENT, errno.ESRCH, errno.EPERM, errno.EACCES):
-                    raise
-        return inodes
-
-    def decode_address(self, addr, family):
-        """Accept an "ip:port" address as displayed in /proc/net/*
-        and convert it into a human readable form, like:
-
-        "0500000A:0016" -> ("10.0.0.5", 22)
-        "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
-
-        The IP address portion is a little or big endian four-byte
-        hexadecimal number; that is, the least significant byte is listed
-        first, so we need to reverse the order of the bytes to convert it
-        to an IP address.
-        The port is represented as a two-byte hexadecimal number.
-
-        Reference:
-        http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
-        """
-        ip, port = addr.split(':')
-        port = int(port, 16)
-        # this usually refers to a local socket in listen mode with
-        # no end-points connected
-        if not port:
-            return ()
-        if PY3:
-            ip = ip.encode('ascii')
-        if family == socket.AF_INET:
-            # see: https://github.com/giampaolo/psutil/issues/201
-            if sys.byteorder == 'little':
-                ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
-            else:
-                ip = socket.inet_ntop(family, base64.b16decode(ip))
-        else:  # IPv6
-            # old version - let's keep it, just in case...
-            # ip = ip.decode('hex')
-            # return socket.inet_ntop(socket.AF_INET6,
-            #          ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))
-            ip = base64.b16decode(ip)
-            # see: https://github.com/giampaolo/psutil/issues/201
-            if sys.byteorder == 'little':
-                ip = socket.inet_ntop(
-                    socket.AF_INET6,
-                    struct.pack('>4I', *struct.unpack('<4I', ip)))
-            else:
-                ip = socket.inet_ntop(
-                    socket.AF_INET6,
-                    struct.pack('<4I', *struct.unpack('<4I', ip)))
-        return (ip, port)
-
-    def process_inet(self, file, family, type_, inodes, filter_pid=None):
-        """Parse /proc/net/tcp* and /proc/net/udp* files."""
-        if file.endswith('6') and not os.path.exists(file):
-            # IPv6 not supported
-            return
-        with open(file, 'rt') as f:
-            f.readline()  # skip the first line
-            for line in f:
-                _, laddr, raddr, status, _, _, _, _, _, inode = \
-                    line.split()[:10]
-                if inode in inodes:
-                    # # We assume inet sockets are unique, so we error
-                    # # out if there are multiple references to the
-                    # # same inode. We won't do this for UNIX sockets.
-                    # if len(inodes[inode]) > 1 and family != socket.AF_UNIX:
-                    #     raise ValueError("ambiguos inode with multiple "
-                    #                      "PIDs references")
-                    pid, fd = inodes[inode][0]
-                else:
-                    pid, fd = None, -1
-                if filter_pid is not None and filter_pid != pid:
-                    continue
-                else:
-                    if type_ == socket.SOCK_STREAM:
-                        status = TCP_STATUSES[status]
-                    else:
-                        status = _common.CONN_NONE
-                    laddr = self.decode_address(laddr, family)
-                    raddr = self.decode_address(raddr, family)
-                    yield (fd, family, type_, laddr, raddr, status, pid)
-
-    def process_unix(self, file, family, inodes, filter_pid=None):
-        """Parse /proc/net/unix files."""
-        with open(file, 'rt') as f:
-            f.readline()  # skip the first line
-            for line in f:
-                tokens = line.split()
-                _, _, _, _, type_, _, inode = tokens[0:7]
-                if inode in inodes:
-                    # With UNIX sockets we can have a single inode
-                    # referencing many file descriptors.
-                    pairs = inodes[inode]
-                else:
-                    pairs = [(None, -1)]
-                for pid, fd in pairs:
-                    if filter_pid is not None and filter_pid != pid:
-                        continue
-                    else:
-                        if len(tokens) == 8:
-                            path = tokens[-1]
-                        else:
-                            path = ""
-                        type_ = int(type_)
-                        raddr = None
-                        status = _common.CONN_NONE
-                        yield (fd, family, type_, path, raddr, status, pid)
-
-    def retrieve(self, kind, pid=None):
-        if kind not in self.tmap:
-            raise ValueError("invalid %r kind argument; choose between %s"
-                             % (kind, ', '.join([repr(x) for x in self.tmap])))
-        if pid is not None:
-            inodes = self.get_proc_inodes(pid)
-            if not inodes:
-                # no connections for this process
-                return []
-        else:
-            inodes = self.get_all_inodes()
-        ret = []
-        for f, family, type_ in self.tmap[kind]:
-            if family in (socket.AF_INET, socket.AF_INET6):
-                ls = self.process_inet(
-                    "/proc/net/%s" % f, family, type_, inodes, filter_pid=pid)
-            else:
-                ls = self.process_unix(
-                    "/proc/net/%s" % f, family, inodes, filter_pid=pid)
-            for fd, family, type_, laddr, raddr, status, bound_pid in ls:
-                if pid:
-                    conn = _common.pconn(fd, family, type_, laddr, raddr,
-                                         status)
-                else:
-                    conn = _common.sconn(fd, family, type_, laddr, raddr,
-                                         status, bound_pid)
-                ret.append(conn)
-        return ret
-
-
-_connections = Connections()
-
-
-def net_connections(kind='inet'):
-    """Return system-wide open connections."""
-    return _connections.retrieve(kind)
-
-
-def net_io_counters():
-    """Return network I/O statistics for every network interface
-    installed on the system as a dict of raw tuples.
-    """
-    with open("/proc/net/dev", "rt") as f:
-        lines = f.readlines()
-    retdict = {}
-    for line in lines[2:]:
-        colon = line.rfind(':')
-        assert colon > 0, repr(line)
-        name = line[:colon].strip()
-        fields = line[colon + 1:].strip().split()
-        bytes_recv = int(fields[0])
-        packets_recv = int(fields[1])
-        errin = int(fields[2])
-        dropin = int(fields[3])
-        bytes_sent = int(fields[8])
-        packets_sent = int(fields[9])
-        errout = int(fields[10])
-        dropout = int(fields[11])
-        retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
-                         errin, errout, dropin, dropout)
-    return retdict
-
-
-# --- disks
-
-def disk_io_counters():
-    """Return disk I/O statistics for every disk installed on the
-    system as a dict of raw tuples.
-    """
-    # man iostat states that sectors are equivalent with blocks and
-    # have a size of 512 bytes since 2.4 kernels. This value is
-    # needed to calculate the amount of disk I/O in bytes.
-    SECTOR_SIZE = 512
-
-    # determine partitions we want to look for
-    partitions = []
-    with open("/proc/partitions", "rt") as f:
-        lines = f.readlines()[2:]
-    for line in reversed(lines):
-        _, _, _, name = line.split()
-        if name[-1].isdigit():
-            # we're dealing with a partition (e.g. 'sda1'); 'sda' will
-            # also be around but we want to omit it
-            partitions.append(name)
-        else:
-            if not partitions or not partitions[-1].startswith(name):
-                # we're dealing with a disk entity for which no
-                # partitions have been defined (e.g. 'sda' but
-                # 'sda1' was not around), see:
-                # https://github.com/giampaolo/psutil/issues/338
-                partitions.append(name)
-    #
-    retdict = {}
-    with open("/proc/diskstats", "rt") as f:
-        lines = f.readlines()
-    for line in lines:
-        # http://www.mjmwired.net/kernel/Documentation/iostats.txt
-        fields = line.split()
-        if len(fields) > 7:
-            _, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \
-                fields[:11]
-        else:
-            # from kernel 2.6.0 to 2.6.25
-            _, _, name, reads, rbytes, writes, wbytes = fields
-            rtime, wtime = 0, 0
-        if name in partitions:
-            rbytes = int(rbytes) * SECTOR_SIZE
-            wbytes = int(wbytes) * SECTOR_SIZE
-            reads = int(reads)
-            writes = int(writes)
-            rtime = int(rtime)
-            wtime = int(wtime)
-            retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime)
-    return retdict
-
-
-def disk_partitions(all=False):
-    """Return mounted disk partitions as a list of nameduples"""
-    phydevs = []
-    with open("/proc/filesystems", "r") as f:
-        for line in f:
-            if not line.startswith("nodev"):
-                phydevs.append(line.strip())
-
-    retlist = []
-    partitions = cext.disk_partitions()
-    for partition in partitions:
-        device, mountpoint, fstype, opts = partition
-        if device == 'none':
-            device = ''
-        if not all:
-            if device == '' or fstype not in phydevs:
-                continue
-        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
-        retlist.append(ntuple)
-    return retlist
-
-
-disk_usage = _psposix.disk_usage
-
-
-# --- decorators
-
-def wrap_exceptions(fun):
-    """Decorator which translates bare OSError and IOError exceptions
-    into NoSuchProcess and AccessDenied.
-    """
-    @functools.wraps(fun)
-    def wrapper(self, *args, **kwargs):
-        try:
-            return fun(self, *args, **kwargs)
-        except EnvironmentError as err:
-            # support for private module import
-            if NoSuchProcess is None or AccessDenied is None:
-                raise
-            # ENOENT (no such file or directory) gets raised on open().
-            # ESRCH (no such process) can get raised on read() if
-            # process is gone in meantime.
-            if err.errno in (errno.ENOENT, errno.ESRCH):
-                raise NoSuchProcess(self.pid, self._name)
-            if err.errno in (errno.EPERM, errno.EACCES):
-                raise AccessDenied(self.pid, self._name)
-            raise
-    return wrapper
-
-
-class Process(object):
-    """Linux process implementation."""
-
-    __slots__ = ["pid", "_name"]
-
-    def __init__(self, pid):
-        self.pid = pid
-        self._name = None
-
-    @wrap_exceptions
-    def name(self):
-        fname = "/proc/%s/stat" % self.pid
-        kw = dict(encoding=DEFAULT_ENCODING) if PY3 else dict()
-        with open(fname, "rt", **kw) as f:
-            # XXX - gets changed later and probably needs refactoring
-            return f.read().split(' ')[1].replace('(', '').replace(')', '')
-
-    def exe(self):
-        try:
-            exe = os.readlink("/proc/%s/exe" % self.pid)
-        except (OSError, IOError) as err:
-            if err.errno in (errno.ENOENT, errno.ESRCH):
-                # no such file error; might be raised also if the
-                # path actually exists for system processes with
-                # low pids (about 0-20)
-                if os.path.lexists("/proc/%s" % self.pid):
-                    return ""
-                else:
-                    # ok, it is a process which has gone away
-                    raise NoSuchProcess(self.pid, self._name)
-            if err.errno in (errno.EPERM, errno.EACCES):
-                raise AccessDenied(self.pid, self._name)
-            raise
-
-        # readlink() might return paths containing null bytes ('\x00').
-        # Certain names have ' (deleted)' appended. Usually this is
-        # bogus as the file actually exists. Either way that's not
-        # important as we don't want to discriminate executables which
-        # have been deleted.
-        exe = exe.split('\x00')[0]
-        if exe.endswith(' (deleted)') and not os.path.exists(exe):
-            exe = exe[:-10]
-        return exe
-
-    @wrap_exceptions
-    def cmdline(self):
-        fname = "/proc/%s/cmdline" % self.pid
-        kw = dict(encoding=DEFAULT_ENCODING) if PY3 else dict()
-        with open(fname, "rt", **kw) as f:
-            return [x for x in f.read().split('\x00') if x]
-
-    @wrap_exceptions
-    def terminal(self):
-        tmap = _psposix._get_terminal_map()
-        with open("/proc/%s/stat" % self.pid, 'rb') as f:
-            tty_nr = int(f.read().split(b' ')[6])
-        try:
-            return tmap[tty_nr]
-        except KeyError:
-            return None
-
-    if os.path.exists('/proc/%s/io' % os.getpid()):
-        @wrap_exceptions
-        def io_counters(self):
-            fname = "/proc/%s/io" % self.pid
-            with open(fname, 'rb') as f:
-                rcount = wcount = rbytes = wbytes = None
-                for line in f:
-                    if rcount is None and line.startswith(b"syscr"):
-                        rcount = int(line.split()[1])
-                    elif wcount is None and line.startswith(b"syscw"):
-                        wcount = int(line.split()[1])
-                    elif rbytes is None and line.startswith(b"read_bytes"):
-                        rbytes = int(line.split()[1])
-                    elif wbytes is None and line.startswith(b"write_bytes"):
-                        wbytes = int(line.split()[1])
-                for x in (rcount, wcount, rbytes, wbytes):
-                    if x is None:
-                        raise NotImplementedError(
-                            "couldn't read all necessary info from %r" % fname)
-                return _common.pio(rcount, wcount, rbytes, wbytes)
-    else:
-        def io_counters(self):
-            raise NotImplementedError("couldn't find /proc/%s/io (kernel "
-                                      "too old?)" % self.pid)
-
-    @wrap_exceptions
-    def cpu_times(self):
-        with open("/proc/%s/stat" % self.pid, 'rb') as f:
-            st = f.read().strip()
-        # ignore the first two values ("pid (exe)")
-        st = st[st.find(b')') + 2:]
-        values = st.split(b' ')
-        utime = float(values[11]) / CLOCK_TICKS
-        stime = float(values[12]) / CLOCK_TICKS
-        return _common.pcputimes(utime, stime)
-
-    @wrap_exceptions
-    def wait(self, timeout=None):
-        try:
-            return _psposix.wait_pid(self.pid, timeout)
-        except _psposix.TimeoutExpired:
-            # support for private module import
-            if TimeoutExpired is None:
-                raise
-            raise TimeoutExpired(timeout, self.pid, self._name)
-
-    @wrap_exceptions
-    def create_time(self):
-        with open("/proc/%s/stat" % self.pid, 'rb') as f:
-            st = f.read().strip()
-        # ignore the first two values ("pid (exe)")
-        st = st[st.rfind(b')') + 2:]
-        values = st.split(b' ')
-        # According to documentation, starttime is in field 21 and the
-        # unit is jiffies (clock ticks).
-        # We first divide it for clock ticks and then add uptime returning
-        # seconds since the epoch, in UTC.
-        # Also use cached value if available.
-        bt = BOOT_TIME or boot_time()
-        return (float(values[19]) / CLOCK_TICKS) + bt
-
-    @wrap_exceptions
-    def memory_info(self):
-        with open("/proc/%s/statm" % self.pid, 'rb') as f:
-            vms, rss = f.readline().split()[:2]
-            return _common.pmem(int(rss) * PAGESIZE,
-                                int(vms) * PAGESIZE)
-
-    @wrap_exceptions
-    def memory_info_ex(self):
-        #  ============================================================
-        # | FIELD  | DESCRIPTION                         | AKA  | TOP  |
-        #  ============================================================
-        # | rss    | resident set size                   |      | RES  |
-        # | vms    | total program size                  | size | VIRT |
-        # | shared | shared pages (from shared mappings) |      | SHR  |
-        # | text   | text ('code')                       | trs  | CODE |
-        # | lib    | library (unused in Linux 2.6)       | lrs  |      |
-        # | data   | data + stack                        | drs  | DATA |
-        # | dirty  | dirty pages (unused in Linux 2.6)   | dt   |      |
-        #  ============================================================
-        with open("/proc/%s/statm" % self.pid, "rb") as f:
-            vms, rss, shared, text, lib, data, dirty = \
-                [int(x) * PAGESIZE for x in f.readline().split()[:7]]
-        return pextmem(rss, vms, shared, text, lib, data, dirty)
-
-    if os.path.exists('/proc/%s/smaps' % os.getpid()):
-
-        @wrap_exceptions
-        def memory_maps(self):
-            """Return process's mapped memory regions as a list of nameduples.
-            Fields are explained in 'man proc'; here is an updated (Apr 2012)
-            version: http://goo.gl/fmebo
-            """
-            with open("/proc/%s/smaps" % self.pid, "rt") as f:
-                first_line = f.readline()
-                current_block = [first_line]
-
-                def get_blocks():
-                    data = {}
-                    for line in f:
-                        fields = line.split(None, 5)
-                        if not fields[0].endswith(':'):
-                            # new block section
-                            yield (current_block.pop(), data)
-                            current_block.append(line)
-                        else:
-                            try:
-                                data[fields[0]] = int(fields[1]) * 1024
-                            except ValueError:
-                                if fields[0].startswith('VmFlags:'):
-                                    # see issue #369
-                                    continue
-                                else:
-                                    raise ValueError("don't know how to inte"
-                                                     "rpret line %r" % line)
-                    yield (current_block.pop(), data)
-
-                ls = []
-                if first_line:  # smaps file can be empty
-                    for header, data in get_blocks():
-                        hfields = header.split(None, 5)
-                        try:
-                            addr, perms, offset, dev, inode, path = hfields
-                        except ValueError:
-                            addr, perms, offset, dev, inode, path = \
-                                hfields + ['']
-                        if not path:
-                            path = '[anon]'
-                        else:
-                            path = path.strip()
-                        ls.append((
-                            addr, perms, path,
-                            data['Rss:'],
-                            data.get('Size:', 0),
-                            data.get('Pss:', 0),
-                            data.get('Shared_Clean:', 0),
-                            data.get('Shared_Dirty:', 0),
-                            data.get('Private_Clean:', 0),
-                            data.get('Private_Dirty:', 0),
-                            data.get('Referenced:', 0),
-                            data.get('Anonymous:', 0),
-                            data.get('Swap:', 0)
-                        ))
-            return ls
-
-    else:
-        def memory_maps(self):
-            msg = "couldn't find /proc/%s/smaps; kernel < 2.6.14 or "  \
-                  "CONFIG_MMU kernel configuration option is not enabled" \
-                  % self.pid
-            raise NotImplementedError(msg)
-
-    @wrap_exceptions
-    def cwd(self):
-        # readlink() might return paths containing null bytes causing
-        # problems when used with other fs-related functions (os.*,
-        # open(), ...)
-        path = os.readlink("/proc/%s/cwd" % self.pid)
-        return path.replace('\x00', '')
-
-    @wrap_exceptions
-    def num_ctx_switches(self):
-        vol = unvol = None
-        with open("/proc/%s/status" % self.pid, "rb") as f:
-            for line in f:
-                if line.startswith(b"voluntary_ctxt_switches"):
-                    vol = int(line.split()[1])
-                elif line.startswith(b"nonvoluntary_ctxt_switches"):
-                    unvol = int(line.split()[1])
-                if vol is not None and unvol is not None:
-                    return _common.pctxsw(vol, unvol)
-            raise NotImplementedError(
-                "'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'"
-                "fields were not found in /proc/%s/status; the kernel is "
-                "probably older than 2.6.23" % self.pid)
-
-    @wrap_exceptions
-    def num_threads(self):
-        with open("/proc/%s/status" % self.pid, "rb") as f:
-            for line in f:
-                if line.startswith(b"Threads:"):
-                    return int(line.split()[1])
-            raise NotImplementedError("line not found")
-
-    @wrap_exceptions
-    def threads(self):
-        thread_ids = os.listdir("/proc/%s/task" % self.pid)
-        thread_ids.sort()
-        retlist = []
-        hit_enoent = False
-        for thread_id in thread_ids:
-            fname = "/proc/%s/task/%s/stat" % (self.pid, thread_id)
-            try:
-                with open(fname, 'rb') as f:
-                    st = f.read().strip()
-            except EnvironmentError as err:
-                if err.errno == errno.ENOENT:
-                    # no such file or directory; it means thread
-                    # disappeared on us
-                    hit_enoent = True
-                    continue
-                raise
-            # ignore the first two values ("pid (exe)")
-            st = st[st.find(b')') + 2:]
-            values = st.split(b' ')
-            utime = float(values[11]) / CLOCK_TICKS
-            stime = float(values[12]) / CLOCK_TICKS
-            ntuple = _common.pthread(int(thread_id), utime, stime)
-            retlist.append(ntuple)
-        if hit_enoent:
-            # raise NSP if the process disappeared on us
-            os.stat('/proc/%s' % self.pid)
-        return retlist
-
-    @wrap_exceptions
-    def nice_get(self):
-        # with open('/proc/%s/stat' % self.pid, 'r') as f:
-        #   data = f.read()
-        #   return int(data.split()[18])
-
-        # Use C implementation
-        return _psutil_posix.getpriority(self.pid)
-
-    @wrap_exceptions
-    def nice_set(self, value):
-        return _psutil_posix.setpriority(self.pid, value)
-
-    @wrap_exceptions
-    def cpu_affinity_get(self):
-        return cext.proc_cpu_affinity_get(self.pid)
-
-    @wrap_exceptions
-    def cpu_affinity_set(self, cpus):
-        try:
-            cext.proc_cpu_affinity_set(self.pid, cpus)
-        except OSError as err:
-            if err.errno == errno.EINVAL:
-                allcpus = tuple(range(len(per_cpu_times())))
-                for cpu in cpus:
-                    if cpu not in allcpus:
-                        raise ValueError("invalid CPU #%i (choose between %s)"
-                                         % (cpu, allcpus))
-            raise
-
-    # only starting from kernel 2.6.13
-    if hasattr(cext, "proc_ioprio_get"):
-
-        @wrap_exceptions
-        def ionice_get(self):
-            ioclass, value = cext.proc_ioprio_get(self.pid)
-            return _common.pionice(ioclass, value)
-
-        @wrap_exceptions
-        def ionice_set(self, ioclass, value):
-            if ioclass in (IOPRIO_CLASS_NONE, None):
-                if value:
-                    msg = "can't specify value with IOPRIO_CLASS_NONE"
-                    raise ValueError(msg)
-                ioclass = IOPRIO_CLASS_NONE
-                value = 0
-            if ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE):
-                if value is None:
-                    value = 4
-            elif ioclass == IOPRIO_CLASS_IDLE:
-                if value:
-                    msg = "can't specify value with IOPRIO_CLASS_IDLE"
-                    raise ValueError(msg)
-                value = 0
-            else:
-                value = 0
-            if not 0 <= value <= 8:
-                raise ValueError(
-                    "value argument range expected is between 0 and 8")
-            return cext.proc_ioprio_set(self.pid, ioclass, value)
-
-    if HAS_PRLIMIT:
-        @wrap_exceptions
-        def rlimit(self, resource, limits=None):
-            # if pid is 0 prlimit() applies to the calling process and
-            # we don't want that
-            if self.pid == 0:
-                raise ValueError("can't use prlimit() against PID 0 process")
-            if limits is None:
-                # get
-                return cext.linux_prlimit(self.pid, resource)
-            else:
-                # set
-                if len(limits) != 2:
-                    raise ValueError(
-                        "second argument must be a (soft, hard) tuple")
-                soft, hard = limits
-                cext.linux_prlimit(self.pid, resource, soft, hard)
-
-    @wrap_exceptions
-    def status(self):
-        with open("/proc/%s/status" % self.pid, 'rb') as f:
-            for line in f:
-                if line.startswith(b"State:"):
-                    letter = line.split()[1]
-                    if PY3:
-                        letter = letter.decode()
-                    # XXX is '?' legit? (we're not supposed to return
-                    # it anyway)
-                    return PROC_STATUSES.get(letter, '?')
-
-    @wrap_exceptions
-    def open_files(self):
-        retlist = []
-        files = os.listdir("/proc/%s/fd" % self.pid)
-        hit_enoent = False
-        for fd in files:
-            file = "/proc/%s/fd/%s" % (self.pid, fd)
-            try:
-                file = os.readlink(file)
-            except OSError as err:
-                # ENOENT == file which is gone in the meantime
-                if err.errno in (errno.ENOENT, errno.ESRCH):
-                    hit_enoent = True
-                    continue
-                elif err.errno == errno.EINVAL:
-                    # not a link
-                    continue
-                else:
-                    raise
-            else:
-                # If file is not an absolute path there's no way
-                # to tell whether it's a regular file or not,
-                # so we skip it. A regular file is always supposed
-                # to be absolutized though.
-                if file.startswith('/') and isfile_strict(file):
-                    ntuple = _common.popenfile(file, int(fd))
-                    retlist.append(ntuple)
-        if hit_enoent:
-            # raise NSP if the process disappeared on us
-            os.stat('/proc/%s' % self.pid)
-        return retlist
-
-    @wrap_exceptions
-    def connections(self, kind='inet'):
-        ret = _connections.retrieve(kind, self.pid)
-        # raise NSP if the process disappeared on us
-        os.stat('/proc/%s' % self.pid)
-        return ret
-
-    @wrap_exceptions
-    def num_fds(self):
-        return len(os.listdir("/proc/%s/fd" % self.pid))
-
-    @wrap_exceptions
-    def ppid(self):
-        with open("/proc/%s/status" % self.pid, 'rb') as f:
-            for line in f:
-                if line.startswith(b"PPid:"):
-                    # PPid: nnnn
-                    return int(line.split()[1])
-            raise NotImplementedError("line not found")
-
-    @wrap_exceptions
-    def uids(self):
-        with open("/proc/%s/status" % self.pid, 'rb') as f:
-            for line in f:
-                if line.startswith(b'Uid:'):
-                    _, real, effective, saved, fs = line.split()
-                    return _common.puids(int(real), int(effective), int(saved))
-            raise NotImplementedError("line not found")
-
-    @wrap_exceptions
-    def gids(self):
-        with open("/proc/%s/status" % self.pid, 'rb') as f:
-            for line in f:
-                if line.startswith(b'Gid:'):
-                    _, real, effective, saved, fs = line.split()
-                    return _common.pgids(int(real), int(effective), int(saved))
-            raise NotImplementedError("line not found")
diff --git a/io_scenario/libs/ubuntu_14_04_x64/psutil/_psosx.py b/io_scenario/libs/ubuntu_14_04_x64/psutil/_psosx.py
deleted file mode 100644
index c40ef1d..0000000
--- a/io_scenario/libs/ubuntu_14_04_x64/psutil/_psosx.py
+++ /dev/null
@@ -1,340 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""OSX platform implementation."""
-
-import errno
-import functools
-import os
-from collections import namedtuple
-
-from psutil import _common
-from psutil import _psposix
-from psutil._common import conn_tmap, usage_percent, isfile_strict
-import _psutil_osx as cext
-import _psutil_posix
-
-
-__extra__all__ = []
-
-# --- constants
-
-PAGESIZE = os.sysconf("SC_PAGE_SIZE")
-
-# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
-TCP_STATUSES = {
-    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
-    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
-    cext.TCPS_SYN_RECEIVED: _common.CONN_SYN_RECV,
-    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
-    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
-    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
-    cext.TCPS_CLOSED: _common.CONN_CLOSE,
-    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
-    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
-    cext.TCPS_LISTEN: _common.CONN_LISTEN,
-    cext.TCPS_CLOSING: _common.CONN_CLOSING,
-    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
-}
-
-PROC_STATUSES = {
-    cext.SIDL: _common.STATUS_IDLE,
-    cext.SRUN: _common.STATUS_RUNNING,
-    cext.SSLEEP: _common.STATUS_SLEEPING,
-    cext.SSTOP: _common.STATUS_STOPPED,
-    cext.SZOMB: _common.STATUS_ZOMBIE,
-}
-
-scputimes = namedtuple('scputimes', ['user', 'nice', 'system', 'idle'])
-
-svmem = namedtuple(
-    'svmem', ['total', 'available', 'percent', 'used', 'free',
-              'active', 'inactive', 'wired'])
-
-pextmem = namedtuple('pextmem', ['rss', 'vms', 'pfaults', 'pageins'])
-
-pmmap_grouped = namedtuple(
-    'pmmap_grouped',
-    'path rss private swapped dirtied ref_count shadow_depth')
-
-pmmap_ext = namedtuple(
-    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
-
-# set later from __init__.py
-NoSuchProcess = None
-AccessDenied = None
-TimeoutExpired = None
-
-
-# --- functions
-
-def virtual_memory():
-    """System virtual memory as a namedtuple."""
-    total, active, inactive, wired, free = cext.virtual_mem()
-    avail = inactive + free
-    used = active + inactive + wired
-    percent = usage_percent((total - avail), total, _round=1)
-    return svmem(total, avail, percent, used, free,
-                 active, inactive, wired)
-
-
-def swap_memory():
-    """Swap system memory as a (total, used, free, sin, sout) tuple."""
-    total, used, free, sin, sout = cext.swap_mem()
-    percent = usage_percent(used, total, _round=1)
-    return _common.sswap(total, used, free, percent, sin, sout)
-
-
-def cpu_times():
-    """Return system CPU times as a namedtuple."""
-    user, nice, system, idle = cext.cpu_times()
-    return scputimes(user, nice, system, idle)
-
-
-def per_cpu_times():
-    """Return system CPU times as a named tuple"""
-    ret = []
-    for cpu_t in cext.per_cpu_times():
-        user, nice, system, idle = cpu_t
-        item = scputimes(user, nice, system, idle)
-        ret.append(item)
-    return ret
-
-
-def cpu_count_logical():
-    """Return the number of logical CPUs in the system."""
-    return cext.cpu_count_logical()
-
-
-def cpu_count_physical():
-    """Return the number of physical CPUs in the system."""
-    return cext.cpu_count_phys()
-
-
-def boot_time():
-    """The system boot time expressed in seconds since the epoch."""
-    return cext.boot_time()
-
-
-def disk_partitions(all=False):
-    retlist = []
-    partitions = cext.disk_partitions()
-    for partition in partitions:
-        device, mountpoint, fstype, opts = partition
-        if device == 'none':
-            device = ''
-        if not all:
-            if not os.path.isabs(device) or not os.path.exists(device):
-                continue
-        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
-        retlist.append(ntuple)
-    return retlist
-
-
-def users():
-    retlist = []
-    rawlist = cext.users()
-    for item in rawlist:
-        user, tty, hostname, tstamp = item
-        if tty == '~':
-            continue  # reboot or shutdown
-        if not tstamp:
-            continue
-        nt = _common.suser(user, tty or None, hostname or None, tstamp)
-        retlist.append(nt)
-    return retlist
-
-
-def net_connections(kind='inet'):
-    # Note: on OSX this will fail with AccessDenied unless
-    # the process is owned by root.
-    ret = []
-    for pid in pids():
-        try:
-            cons = Process(pid).connections(kind)
-        except NoSuchProcess:
-            continue
-        else:
-            if cons:
-                for c in cons:
-                    c = list(c) + [pid]
-                    ret.append(_common.sconn(*c))
-    return ret
-
-
-pids = cext.pids
-pid_exists = _psposix.pid_exists
-disk_usage = _psposix.disk_usage
-net_io_counters = cext.net_io_counters
-disk_io_counters = cext.disk_io_counters
-
-
-def wrap_exceptions(fun):
-    """Decorator which translates bare OSError exceptions into
-    NoSuchProcess and AccessDenied.
-    """
-    @functools.wraps(fun)
-    def wrapper(self, *args, **kwargs):
-        try:
-            return fun(self, *args, **kwargs)
-        except OSError as err:
-            # support for private module import
-            if NoSuchProcess is None or AccessDenied is None:
-                raise
-            if err.errno == errno.ESRCH:
-                raise NoSuchProcess(self.pid, self._name)
-            if err.errno in (errno.EPERM, errno.EACCES):
-                raise AccessDenied(self.pid, self._name)
-            raise
-    return wrapper
-
-
-class Process(object):
-    """Wrapper class around underlying C implementation."""
-
-    __slots__ = ["pid", "_name"]
-
-    def __init__(self, pid):
-        self.pid = pid
-        self._name = None
-
-    @wrap_exceptions
-    def name(self):
-        return cext.proc_name(self.pid)
-
-    @wrap_exceptions
-    def exe(self):
-        return cext.proc_exe(self.pid)
-
-    @wrap_exceptions
-    def cmdline(self):
-        if not pid_exists(self.pid):
-            raise NoSuchProcess(self.pid, self._name)
-        return cext.proc_cmdline(self.pid)
-
-    @wrap_exceptions
-    def ppid(self):
-        return cext.proc_ppid(self.pid)
-
-    @wrap_exceptions
-    def cwd(self):
-        return cext.proc_cwd(self.pid)
-
-    @wrap_exceptions
-    def uids(self):
-        real, effective, saved = cext.proc_uids(self.pid)
-        return _common.puids(real, effective, saved)
-
-    @wrap_exceptions
-    def gids(self):
-        real, effective, saved = cext.proc_gids(self.pid)
-        return _common.pgids(real, effective, saved)
-
-    @wrap_exceptions
-    def terminal(self):
-        tty_nr = cext.proc_tty_nr(self.pid)
-        tmap = _psposix._get_terminal_map()
-        try:
-            return tmap[tty_nr]
-        except KeyError:
-            return None
-
-    @wrap_exceptions
-    def memory_info(self):
-        rss, vms = cext.proc_memory_info(self.pid)[:2]
-        return _common.pmem(rss, vms)
-
-    @wrap_exceptions
-    def memory_info_ex(self):
-        rss, vms, pfaults, pageins = cext.proc_memory_info(self.pid)
-        return pextmem(rss, vms, pfaults * PAGESIZE, pageins * PAGESIZE)
-
-    @wrap_exceptions
-    def cpu_times(self):
-        user, system = cext.proc_cpu_times(self.pid)
-        return _common.pcputimes(user, system)
-
-    @wrap_exceptions
-    def create_time(self):
-        return cext.proc_create_time(self.pid)
-
-    @wrap_exceptions
-    def num_ctx_switches(self):
-        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
-
-    @wrap_exceptions
-    def num_threads(self):
-        return cext.proc_num_threads(self.pid)
-
-    @wrap_exceptions
-    def open_files(self):
-        if self.pid == 0:
-            return []
-        files = []
-        rawlist = cext.proc_open_files(self.pid)
-        for path, fd in rawlist:
-            if isfile_strict(path):
-                ntuple = _common.popenfile(path, fd)
-                files.append(ntuple)
-        return files
-
-    @wrap_exceptions
-    def connections(self, kind='inet'):
-        if kind not in conn_tmap:
-            raise ValueError("invalid %r kind argument; choose between %s"
-                             % (kind, ', '.join([repr(x) for x in conn_tmap])))
-        families, types = conn_tmap[kind]
-        rawlist = cext.proc_connections(self.pid, families, types)
-        ret = []
-        for item in rawlist:
-            fd, fam, type, laddr, raddr, status = item
-            status = TCP_STATUSES[status]
-            nt = _common.pconn(fd, fam, type, laddr, raddr, status)
-            ret.append(nt)
-        return ret
-
-    @wrap_exceptions
-    def num_fds(self):
-        if self.pid == 0:
-            return 0
-        return cext.proc_num_fds(self.pid)
-
-    @wrap_exceptions
-    def wait(self, timeout=None):
-        try:
-            return _psposix.wait_pid(self.pid, timeout)
-        except _psposix.TimeoutExpired:
-            # support for private module import
-            if TimeoutExpired is None:
-                raise
-            raise TimeoutExpired(timeout, self.pid, self._name)
-
-    @wrap_exceptions
-    def nice_get(self):
-        return _psutil_posix.getpriority(self.pid)
-
-    @wrap_exceptions
-    def nice_set(self, value):
-        return _psutil_posix.setpriority(self.pid, value)
-
-    @wrap_exceptions
-    def status(self):
-        code = cext.proc_status(self.pid)
-        # XXX is '?' legit? (we're not supposed to return it anyway)
-        return PROC_STATUSES.get(code, '?')
-
-    @wrap_exceptions
-    def threads(self):
-        rawlist = cext.proc_threads(self.pid)
-        retlist = []
-        for thread_id, utime, stime in rawlist:
-            ntuple = _common.pthread(thread_id, utime, stime)
-            retlist.append(ntuple)
-        return retlist
-
-    @wrap_exceptions
-    def memory_maps(self):
-        return cext.proc_memory_maps(self.pid)
diff --git a/io_scenario/libs/ubuntu_14_04_x64/psutil/_psposix.py b/io_scenario/libs/ubuntu_14_04_x64/psutil/_psposix.py
deleted file mode 100644
index 94db351..0000000
--- a/io_scenario/libs/ubuntu_14_04_x64/psutil/_psposix.py
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Routines common to all posix systems."""
-
-import errno
-import glob
-import os
-import sys
-import time
-
-from psutil._common import sdiskusage, usage_percent, memoize
-from psutil._compat import PY3, unicode
-
-
-class TimeoutExpired(Exception):
-    pass
-
-
-def pid_exists(pid):
-    """Check whether pid exists in the current process table."""
-    if pid == 0:
-        # According to "man 2 kill" PID 0 has a special meaning:
-        # it refers to <<every process in the process group of the
-        # calling process>> so we don't want to go any further.
-        # If we get here it means this UNIX platform *does* have
-        # a process with id 0.
-        return True
-    try:
-        os.kill(pid, 0)
-    except OSError as err:
-        if err.errno == errno.ESRCH:
-            # ESRCH == No such process
-            return False
-        elif err.errno == errno.EPERM:
-            # EPERM clearly means there's a process to deny access to
-            return True
-        else:
-            # According to "man 2 kill" possible error values are
-            # (EINVAL, EPERM, ESRCH) therefore we should never get
-            # here. If we do let's be explicit in considering this
-            # an error.
-            raise err
-    else:
-        return True
-
-
-def wait_pid(pid, timeout=None):
-    """Wait for process with pid 'pid' to terminate and return its
-    exit status code as an integer.
-
-    If pid is not a children of os.getpid() (current process) just
-    waits until the process disappears and return None.
-
-    If pid does not exist at all return None immediately.
-
-    Raise TimeoutExpired on timeout expired.
-    """
-    def check_timeout(delay):
-        if timeout is not None:
-            if timer() >= stop_at:
-                raise TimeoutExpired()
-        time.sleep(delay)
-        return min(delay * 2, 0.04)
-
-    timer = getattr(time, 'monotonic', time.time)
-    if timeout is not None:
-        waitcall = lambda: os.waitpid(pid, os.WNOHANG)
-        stop_at = timer() + timeout
-    else:
-        waitcall = lambda: os.waitpid(pid, 0)
-
-    delay = 0.0001
-    while True:
-        try:
-            retpid, status = waitcall()
-        except OSError as err:
-            if err.errno == errno.EINTR:
-                delay = check_timeout(delay)
-                continue
-            elif err.errno == errno.ECHILD:
-                # This has two meanings:
-                # - pid is not a child of os.getpid() in which case
-                #   we keep polling until it's gone
-                # - pid never existed in the first place
-                # In both cases we'll eventually return None as we
-                # can't determine its exit status code.
-                while True:
-                    if pid_exists(pid):
-                        delay = check_timeout(delay)
-                    else:
-                        return
-            else:
-                raise
-        else:
-            if retpid == 0:
-                # WNOHANG was used, pid is still running
-                delay = check_timeout(delay)
-                continue
-            # process exited due to a signal; return the integer of
-            # that signal
-            if os.WIFSIGNALED(status):
-                return os.WTERMSIG(status)
-            # process exited using exit(2) system call; return the
-            # integer exit(2) system call has been called with
-            elif os.WIFEXITED(status):
-                return os.WEXITSTATUS(status)
-            else:
-                # should never happen
-                raise RuntimeError("unknown process exit status")
-
-
-def disk_usage(path):
-    """Return disk usage associated with path."""
-    try:
-        st = os.statvfs(path)
-    except UnicodeEncodeError:
-        if not PY3 and isinstance(path, unicode):
-            # this is a bug with os.statvfs() and unicode on
-            # Python 2, see:
-            # - https://github.com/giampaolo/psutil/issues/416
-            # - http://bugs.python.org/issue18695
-            try:
-                path = path.encode(sys.getfilesystemencoding())
-            except UnicodeEncodeError:
-                pass
-            st = os.statvfs(path)
-        else:
-            raise
-    free = (st.f_bavail * st.f_frsize)
-    total = (st.f_blocks * st.f_frsize)
-    used = (st.f_blocks - st.f_bfree) * st.f_frsize
-    percent = usage_percent(used, total, _round=1)
-    # NB: the percentage is -5% than what shown by df due to
-    # reserved blocks that we are currently not considering:
-    # http://goo.gl/sWGbH
-    return sdiskusage(total, used, free, percent)
-
-
-@memoize
-def _get_terminal_map():
-    ret = {}
-    ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*')
-    for name in ls:
-        assert name not in ret
-        try:
-            ret[os.stat(name).st_rdev] = name
-        except OSError as err:
-            if err.errno != errno.ENOENT:
-                raise
-    return ret
diff --git a/io_scenario/libs/ubuntu_14_04_x64/psutil/_pssunos.py b/io_scenario/libs/ubuntu_14_04_x64/psutil/_pssunos.py
deleted file mode 100644
index 7b5385e..0000000
--- a/io_scenario/libs/ubuntu_14_04_x64/psutil/_pssunos.py
+++ /dev/null
@@ -1,527 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Sun OS Solaris platform implementation."""
-
-import errno
-import os
-import socket
-import subprocess
-import sys
-from collections import namedtuple
-
-from psutil import _common
-from psutil import _psposix
-from psutil._common import usage_percent, isfile_strict
-from psutil._compat import PY3
-import _psutil_posix
-import _psutil_sunos as cext
-
-
-__extra__all__ = ["CONN_IDLE", "CONN_BOUND"]
-
-PAGE_SIZE = os.sysconf('SC_PAGE_SIZE')
-
-CONN_IDLE = "IDLE"
-CONN_BOUND = "BOUND"
-
-PROC_STATUSES = {
-    cext.SSLEEP: _common.STATUS_SLEEPING,
-    cext.SRUN: _common.STATUS_RUNNING,
-    cext.SZOMB: _common.STATUS_ZOMBIE,
-    cext.SSTOP: _common.STATUS_STOPPED,
-    cext.SIDL: _common.STATUS_IDLE,
-    cext.SONPROC: _common.STATUS_RUNNING,  # same as run
-    cext.SWAIT: _common.STATUS_WAITING,
-}
-
-TCP_STATUSES = {
-    cext.TCPS_ESTABLISHED: _common.CONN_ESTABLISHED,
-    cext.TCPS_SYN_SENT: _common.CONN_SYN_SENT,
-    cext.TCPS_SYN_RCVD: _common.CONN_SYN_RECV,
-    cext.TCPS_FIN_WAIT_1: _common.CONN_FIN_WAIT1,
-    cext.TCPS_FIN_WAIT_2: _common.CONN_FIN_WAIT2,
-    cext.TCPS_TIME_WAIT: _common.CONN_TIME_WAIT,
-    cext.TCPS_CLOSED: _common.CONN_CLOSE,
-    cext.TCPS_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
-    cext.TCPS_LAST_ACK: _common.CONN_LAST_ACK,
-    cext.TCPS_LISTEN: _common.CONN_LISTEN,
-    cext.TCPS_CLOSING: _common.CONN_CLOSING,
-    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
-    cext.TCPS_IDLE: CONN_IDLE,  # sunos specific
-    cext.TCPS_BOUND: CONN_BOUND,  # sunos specific
-}
-
-scputimes = namedtuple('scputimes', ['user', 'system', 'idle', 'iowait'])
-svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
-pextmem = namedtuple('pextmem', ['rss', 'vms'])
-pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss', 'anon', 'locked'])
-pmmap_ext = namedtuple(
-    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
-
-# set later from __init__.py
-NoSuchProcess = None
-AccessDenied = None
-TimeoutExpired = None
-
-# --- functions
-
-disk_io_counters = cext.disk_io_counters
-net_io_counters = cext.net_io_counters
-disk_usage = _psposix.disk_usage
-
-
-def virtual_memory():
-    # we could have done this with kstat, but imho this is good enough
-    total = os.sysconf('SC_PHYS_PAGES') * PAGE_SIZE
-    # note: there's no difference on Solaris
-    free = avail = os.sysconf('SC_AVPHYS_PAGES') * PAGE_SIZE
-    used = total - free
-    percent = usage_percent(used, total, _round=1)
-    return svmem(total, avail, percent, used, free)
-
-
-def swap_memory():
-    sin, sout = cext.swap_mem()
-    # XXX
-    # we are supposed to get total/free by doing so:
-    # http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/
-    #     usr/src/cmd/swap/swap.c
-    # ...nevertheless I can't manage to obtain the same numbers as 'swap'
-    # cmdline utility, so let's parse its output (sigh!)
-    p = subprocess.Popen(['swap', '-l', '-k'], stdout=subprocess.PIPE)
-    stdout, stderr = p.communicate()
-    if PY3:
-        stdout = stdout.decode(sys.stdout.encoding)
-    if p.returncode != 0:
-        raise RuntimeError("'swap -l -k' failed (retcode=%s)" % p.returncode)
-
-    lines = stdout.strip().split('\n')[1:]
-    if not lines:
-        raise RuntimeError('no swap device(s) configured')
-    total = free = 0
-    for line in lines:
-        line = line.split()
-        t, f = line[-2:]
-        t = t.replace('K', '')
-        f = f.replace('K', '')
-        total += int(int(t) * 1024)
-        free += int(int(f) * 1024)
-    used = total - free
-    percent = usage_percent(used, total, _round=1)
-    return _common.sswap(total, used, free, percent,
-                         sin * PAGE_SIZE, sout * PAGE_SIZE)
-
-
-def pids():
-    """Returns a list of PIDs currently running on the system."""
-    return [int(x) for x in os.listdir('/proc') if x.isdigit()]
-
-
-def pid_exists(pid):
-    """Check for the existence of a unix pid."""
-    return _psposix.pid_exists(pid)
-
-
-def cpu_times():
-    """Return system-wide CPU times as a named tuple"""
-    ret = cext.per_cpu_times()
-    return scputimes(*[sum(x) for x in zip(*ret)])
-
-
-def per_cpu_times():
-    """Return system per-CPU times as a list of named tuples"""
-    ret = cext.per_cpu_times()
-    return [scputimes(*x) for x in ret]
-
-
-def cpu_count_logical():
-    """Return the number of logical CPUs in the system."""
-    try:
-        return os.sysconf("SC_NPROCESSORS_ONLN")
-    except ValueError:
-        # mimic os.cpu_count() behavior
-        return None
-
-
-def cpu_count_physical():
-    """Return the number of physical CPUs in the system."""
-    return cext.cpu_count_phys()
-
-
-def boot_time():
-    """The system boot time expressed in seconds since the epoch."""
-    return cext.boot_time()
-
-
-def users():
-    """Return currently connected users as a list of namedtuples."""
-    retlist = []
-    rawlist = cext.users()
-    localhost = (':0.0', ':0')
-    for item in rawlist:
-        user, tty, hostname, tstamp, user_process = item
-        # note: the underlying C function includes entries about
-        # system boot, run level and others.  We might want
-        # to use them in the future.
-        if not user_process:
-            continue
-        if hostname in localhost:
-            hostname = 'localhost'
-        nt = _common.suser(user, tty, hostname, tstamp)
-        retlist.append(nt)
-    return retlist
-
-
-def disk_partitions(all=False):
-    """Return system disk partitions."""
-    # TODO - the filtering logic should be better checked so that
-    # it tries to reflect 'df' as much as possible
-    retlist = []
-    partitions = cext.disk_partitions()
-    for partition in partitions:
-        device, mountpoint, fstype, opts = partition
-        if device == 'none':
-            device = ''
-        if not all:
-            # Differently from, say, Linux, we don't have a list of
-            # common fs types so the best we can do, AFAIK, is to
-            # filter by filesystem having a total size > 0.
-            if not disk_usage(mountpoint).total:
-                continue
-        ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
-        retlist.append(ntuple)
-    return retlist
-
-
-def net_connections(kind, _pid=-1):
-    """Return socket connections.  If pid == -1 return system-wide
-    connections (as opposed to connections opened by one process only).
-    Only INET sockets are returned (UNIX are not).
-    """
-    cmap = _common.conn_tmap.copy()
-    if _pid == -1:
-        cmap.pop('unix', 0)
-    if kind not in cmap:
-        raise ValueError("invalid %r kind argument; choose between %s"
-                         % (kind, ', '.join([repr(x) for x in cmap])))
-    families, types = _common.conn_tmap[kind]
-    rawlist = cext.net_connections(_pid, families, types)
-    ret = []
-    for item in rawlist:
-        fd, fam, type_, laddr, raddr, status, pid = item
-        if fam not in families:
-            continue
-        if type_ not in types:
-            continue
-        status = TCP_STATUSES[status]
-        if _pid == -1:
-            nt = _common.sconn(fd, fam, type_, laddr, raddr, status, pid)
-        else:
-            nt = _common.pconn(fd, fam, type_, laddr, raddr, status)
-        ret.append(nt)
-    return ret
-
-
-def wrap_exceptions(fun):
-    """Call callable into a try/except clause and translate ENOENT,
-    EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
-    """
-    def wrapper(self, *args, **kwargs):
-        try:
-            return fun(self, *args, **kwargs)
-        except EnvironmentError as err:
-            # support for private module import
-            if NoSuchProcess is None or AccessDenied is None:
-                raise
-            # ENOENT (no such file or directory) gets raised on open().
-            # ESRCH (no such process) can get raised on read() if
-            # process is gone in meantime.
-            if err.errno in (errno.ENOENT, errno.ESRCH):
-                raise NoSuchProcess(self.pid, self._name)
-            if err.errno in (errno.EPERM, errno.EACCES):
-                raise AccessDenied(self.pid, self._name)
-            raise
-    return wrapper
-
-
-class Process(object):
-    """Wrapper class around underlying C implementation."""
-
-    __slots__ = ["pid", "_name"]
-
-    def __init__(self, pid):
-        self.pid = pid
-        self._name = None
-
-    @wrap_exceptions
-    def name(self):
-        # note: max len == 15
-        return cext.proc_name_and_args(self.pid)[0]
-
-    @wrap_exceptions
-    def exe(self):
-        # Will be guess later from cmdline but we want to explicitly
-        # invoke cmdline here in order to get an AccessDenied
-        # exception if the user has not enough privileges.
-        self.cmdline()
-        return ""
-
-    @wrap_exceptions
-    def cmdline(self):
-        return cext.proc_name_and_args(self.pid)[1].split(' ')
-
-    @wrap_exceptions
-    def create_time(self):
-        return cext.proc_basic_info(self.pid)[3]
-
-    @wrap_exceptions
-    def num_threads(self):
-        return cext.proc_basic_info(self.pid)[5]
-
-    @wrap_exceptions
-    def nice_get(self):
-        # For some reason getpriority(3) return ESRCH (no such process)
-        # for certain low-pid processes, no matter what (even as root).
-        # The process actually exists though, as it has a name,
-        # creation time, etc.
-        # The best thing we can do here appears to be raising AD.
-        # Note: tested on Solaris 11; on Open Solaris 5 everything is
-        # fine.
-        try:
-            return _psutil_posix.getpriority(self.pid)
-        except EnvironmentError as err:
-            if err.errno in (errno.ENOENT, errno.ESRCH):
-                if pid_exists(self.pid):
-                    raise AccessDenied(self.pid, self._name)
-            raise
-
-    @wrap_exceptions
-    def nice_set(self, value):
-        if self.pid in (2, 3):
-            # Special case PIDs: internally setpriority(3) return ESRCH
-            # (no such process), no matter what.
-            # The process actually exists though, as it has a name,
-            # creation time, etc.
-            raise AccessDenied(self.pid, self._name)
-        return _psutil_posix.setpriority(self.pid, value)
-
-    @wrap_exceptions
-    def ppid(self):
-        return cext.proc_basic_info(self.pid)[0]
-
-    @wrap_exceptions
-    def uids(self):
-        real, effective, saved, _, _, _ = cext.proc_cred(self.pid)
-        return _common.puids(real, effective, saved)
-
-    @wrap_exceptions
-    def gids(self):
-        _, _, _, real, effective, saved = cext.proc_cred(self.pid)
-        return _common.puids(real, effective, saved)
-
-    @wrap_exceptions
-    def cpu_times(self):
-        user, system = cext.proc_cpu_times(self.pid)
-        return _common.pcputimes(user, system)
-
-    @wrap_exceptions
-    def terminal(self):
-        hit_enoent = False
-        tty = wrap_exceptions(
-            cext.proc_basic_info(self.pid)[0])
-        if tty != cext.PRNODEV:
-            for x in (0, 1, 2, 255):
-                try:
-                    return os.readlink('/proc/%d/path/%d' % (self.pid, x))
-                except OSError as err:
-                    if err.errno == errno.ENOENT:
-                        hit_enoent = True
-                        continue
-                    raise
-        if hit_enoent:
-            # raise NSP if the process disappeared on us
-            os.stat('/proc/%s' % self.pid)
-
-    @wrap_exceptions
-    def cwd(self):
-        # /proc/PID/path/cwd may not be resolved by readlink() even if
-        # it exists (ls shows it). If that's the case and the process
-        # is still alive return None (we can return None also on BSD).
-        # Reference: http://goo.gl/55XgO
-        try:
-            return os.readlink("/proc/%s/path/cwd" % self.pid)
-        except OSError as err:
-            if err.errno == errno.ENOENT:
-                os.stat("/proc/%s" % self.pid)
-                return None
-            raise
-
-    @wrap_exceptions
-    def memory_info(self):
-        ret = cext.proc_basic_info(self.pid)
-        rss, vms = ret[1] * 1024, ret[2] * 1024
-        return _common.pmem(rss, vms)
-
-    # it seems Solaris uses rss and vms only
-    memory_info_ex = memory_info
-
-    @wrap_exceptions
-    def status(self):
-        code = cext.proc_basic_info(self.pid)[6]
-        # XXX is '?' legit? (we're not supposed to return it anyway)
-        return PROC_STATUSES.get(code, '?')
-
-    @wrap_exceptions
-    def threads(self):
-        ret = []
-        tids = os.listdir('/proc/%d/lwp' % self.pid)
-        hit_enoent = False
-        for tid in tids:
-            tid = int(tid)
-            try:
-                utime, stime = cext.query_process_thread(
-                    self.pid, tid)
-            except EnvironmentError as err:
-                # ENOENT == thread gone in meantime
-                if err.errno == errno.ENOENT:
-                    hit_enoent = True
-                    continue
-                raise
-            else:
-                nt = _common.pthread(tid, utime, stime)
-                ret.append(nt)
-        if hit_enoent:
-            # raise NSP if the process disappeared on us
-            os.stat('/proc/%s' % self.pid)
-        return ret
-
-    @wrap_exceptions
-    def open_files(self):
-        retlist = []
-        hit_enoent = False
-        pathdir = '/proc/%d/path' % self.pid
-        for fd in os.listdir('/proc/%d/fd' % self.pid):
-            path = os.path.join(pathdir, fd)
-            if os.path.islink(path):
-                try:
-                    file = os.readlink(path)
-                except OSError as err:
-                    # ENOENT == file which is gone in the meantime
-                    if err.errno == errno.ENOENT:
-                        hit_enoent = True
-                        continue
-                    raise
-                else:
-                    if isfile_strict(file):
-                        retlist.append(_common.popenfile(file, int(fd)))
-        if hit_enoent:
-            # raise NSP if the process disappeared on us
-            os.stat('/proc/%s' % self.pid)
-        return retlist
-
-    def _get_unix_sockets(self, pid):
-        """Get UNIX sockets used by process by parsing 'pfiles' output."""
-        # TODO: rewrite this in C (...but the damn netstat source code
-        # does not include this part! Argh!!)
-        cmd = "pfiles %s" % pid
-        p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE)
-        stdout, stderr = p.communicate()
-        if PY3:
-            stdout, stderr = [x.decode(sys.stdout.encoding)
-                              for x in (stdout, stderr)]
-        if p.returncode != 0:
-            if 'permission denied' in stderr.lower():
-                raise AccessDenied(self.pid, self._name)
-            if 'no such process' in stderr.lower():
-                raise NoSuchProcess(self.pid, self._name)
-            raise RuntimeError("%r command error\n%s" % (cmd, stderr))
-
-        lines = stdout.split('\n')[2:]
-        for i, line in enumerate(lines):
-            line = line.lstrip()
-            if line.startswith('sockname: AF_UNIX'):
-                path = line.split(' ', 2)[2]
-                type = lines[i - 2].strip()
-                if type == 'SOCK_STREAM':
-                    type = socket.SOCK_STREAM
-                elif type == 'SOCK_DGRAM':
-                    type = socket.SOCK_DGRAM
-                else:
-                    type = -1
-                yield (-1, socket.AF_UNIX, type, path, "", _common.CONN_NONE)
-
-    @wrap_exceptions
-    def connections(self, kind='inet'):
-        ret = net_connections(kind, _pid=self.pid)
-        # The underlying C implementation retrieves all OS connections
-        # and filters them by PID.  At this point we can't tell whether
-        # an empty list means there were no connections for process or
-        # process is no longer active so we force NSP in case the PID
-        # is no longer there.
-        if not ret:
-            os.stat('/proc/%s' % self.pid)  # will raise NSP if process is gone
-
-        # UNIX sockets
-        if kind in ('all', 'unix'):
-            ret.extend([_common.pconn(*conn) for conn in
-                        self._get_unix_sockets(self.pid)])
-        return ret
-
-    nt_mmap_grouped = namedtuple('mmap', 'path rss anon locked')
-    nt_mmap_ext = namedtuple('mmap', 'addr perms path rss anon locked')
-
-    @wrap_exceptions
-    def memory_maps(self):
-        def toaddr(start, end):
-            return '%s-%s' % (hex(start)[2:].strip('L'),
-                              hex(end)[2:].strip('L'))
-
-        retlist = []
-        rawlist = cext.proc_memory_maps(self.pid)
-        hit_enoent = False
-        for item in rawlist:
-            addr, addrsize, perm, name, rss, anon, locked = item
-            addr = toaddr(addr, addrsize)
-            if not name.startswith('['):
-                try:
-                    name = os.readlink('/proc/%s/path/%s' % (self.pid, name))
-                except OSError as err:
-                    if err.errno == errno.ENOENT:
-                        # sometimes the link may not be resolved by
-                        # readlink() even if it exists (ls shows it).
-                        # If that's the case we just return the
-                        # unresolved link path.
-                        # This seems an incosistency with /proc similar
-                        # to: http://goo.gl/55XgO
-                        name = '/proc/%s/path/%s' % (self.pid, name)
-                        hit_enoent = True
-                    else:
-                        raise
-            retlist.append((addr, perm, name, rss, anon, locked))
-        if hit_enoent:
-            # raise NSP if the process disappeared on us
-            os.stat('/proc/%s' % self.pid)
-        return retlist
-
-    @wrap_exceptions
-    def num_fds(self):
-        return len(os.listdir("/proc/%s/fd" % self.pid))
-
-    @wrap_exceptions
-    def num_ctx_switches(self):
-        return _common.pctxsw(*cext.proc_num_ctx_switches(self.pid))
-
-    @wrap_exceptions
-    def wait(self, timeout=None):
-        try:
-            return _psposix.wait_pid(self.pid, timeout)
-        except _psposix.TimeoutExpired:
-            # support for private module import
-            if TimeoutExpired is None:
-                raise
-            raise TimeoutExpired(timeout, self.pid, self._name)
diff --git a/io_scenario/libs/ubuntu_14_04_x64/psutil/_pswindows.py b/io_scenario/libs/ubuntu_14_04_x64/psutil/_pswindows.py
deleted file mode 100644
index 6068446..0000000
--- a/io_scenario/libs/ubuntu_14_04_x64/psutil/_pswindows.py
+++ /dev/null
@@ -1,484 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Windows platform implementation."""
-
-import errno
-import functools
-import os
-from collections import namedtuple
-
-from psutil import _common
-from psutil._common import conn_tmap, usage_percent, isfile_strict
-from psutil._compat import PY3, xrange, lru_cache
-import _psutil_windows as cext
-
-# process priority constants, import from __init__.py:
-# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx
-__extra__all__ = ["ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS",
-                  "HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS",
-                  "NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS",
-                  #
-                  "CONN_DELETE_TCB",
-                  ]
-
-# --- module level constants (gets pushed up to psutil module)
-
-CONN_DELETE_TCB = "DELETE_TCB"
-WAIT_TIMEOUT = 0x00000102  # 258 in decimal
-ACCESS_DENIED_SET = frozenset([errno.EPERM, errno.EACCES,
-                               cext.ERROR_ACCESS_DENIED])
-
-TCP_STATUSES = {
-    cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED,
-    cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT,
-    cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV,
-    cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1,
-    cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2,
-    cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT,
-    cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE,
-    cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
-    cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK,
-    cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN,
-    cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING,
-    cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB,
-    cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
-}
-
-
-scputimes = namedtuple('scputimes', ['user', 'system', 'idle'])
-svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
-pextmem = namedtuple(
-    'pextmem', ['num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool',
-                'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool',
-                'pagefile', 'peak_pagefile', 'private'])
-pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss'])
-pmmap_ext = namedtuple(
-    'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
-
-# set later from __init__.py
-NoSuchProcess = None
-AccessDenied = None
-TimeoutExpired = None
-
-
-@lru_cache(maxsize=512)
-def _win32_QueryDosDevice(s):
-    return cext.win32_QueryDosDevice(s)
-
-
-def _convert_raw_path(s):
-    # convert paths using native DOS format like:
-    # "\Device\HarddiskVolume1\Windows\systemew\file.txt"
-    # into: "C:\Windows\systemew\file.txt"
-    if PY3 and not isinstance(s, str):
-        s = s.decode('utf8')
-    rawdrive = '\\'.join(s.split('\\')[:3])
-    driveletter = _win32_QueryDosDevice(rawdrive)
-    return os.path.join(driveletter, s[len(rawdrive):])
-
-
-# --- public functions
-
-
-def virtual_memory():
-    """System virtual memory as a namedtuple."""
-    mem = cext.virtual_mem()
-    totphys, availphys, totpagef, availpagef, totvirt, freevirt = mem
-    #
-    total = totphys
-    avail = availphys
-    free = availphys
-    used = total - avail
-    percent = usage_percent((total - avail), total, _round=1)
-    return svmem(total, avail, percent, used, free)
-
-
-def swap_memory():
-    """Swap system memory as a (total, used, free, sin, sout) tuple."""
-    mem = cext.virtual_mem()
-    total = mem[2]
-    free = mem[3]
-    used = total - free
-    percent = usage_percent(used, total, _round=1)
-    return _common.sswap(total, used, free, percent, 0, 0)
-
-
-def disk_usage(path):
-    """Return disk usage associated with path."""
-    try:
-        total, free = cext.disk_usage(path)
-    except WindowsError:
-        if not os.path.exists(path):
-            msg = "No such file or directory: '%s'" % path
-            raise OSError(errno.ENOENT, msg)
-        raise
-    used = total - free
-    percent = usage_percent(used, total, _round=1)
-    return _common.sdiskusage(total, used, free, percent)
-
-
-def disk_partitions(all):
-    """Return disk partitions."""
-    rawlist = cext.disk_partitions(all)
-    return [_common.sdiskpart(*x) for x in rawlist]
-
-
-def cpu_times():
-    """Return system CPU times as a named tuple."""
-    user, system, idle = cext.cpu_times()
-    return scputimes(user, system, idle)
-
-
-def per_cpu_times():
-    """Return system per-CPU times as a list of named tuples."""
-    ret = []
-    for cpu_t in cext.per_cpu_times():
-        user, system, idle = cpu_t
-        item = scputimes(user, system, idle)
-        ret.append(item)
-    return ret
-
-
-def cpu_count_logical():
-    """Return the number of logical CPUs in the system."""
-    return cext.cpu_count_logical()
-
-
-def cpu_count_physical():
-    """Return the number of physical CPUs in the system."""
-    return cext.cpu_count_phys()
-
-
-def boot_time():
-    """The system boot time expressed in seconds since the epoch."""
-    return cext.boot_time()
-
-
-def net_connections(kind, _pid=-1):
-    """Return socket connections.  If pid == -1 return system-wide
-    connections (as opposed to connections opened by one process only).
-    """
-    if kind not in conn_tmap:
-        raise ValueError("invalid %r kind argument; choose between %s"
-                         % (kind, ', '.join([repr(x) for x in conn_tmap])))
-    families, types = conn_tmap[kind]
-    rawlist = cext.net_connections(_pid, families, types)
-    ret = []
-    for item in rawlist:
-        fd, fam, type, laddr, raddr, status, pid = item
-        status = TCP_STATUSES[status]
-        if _pid == -1:
-            nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)
-        else:
-            nt = _common.pconn(fd, fam, type, laddr, raddr, status)
-        ret.append(nt)
-    return ret
-
-
-def users():
-    """Return currently connected users as a list of namedtuples."""
-    retlist = []
-    rawlist = cext.users()
-    for item in rawlist:
-        user, hostname, tstamp = item
-        nt = _common.suser(user, None, hostname, tstamp)
-        retlist.append(nt)
-    return retlist
-
-
-pids = cext.pids
-pid_exists = cext.pid_exists
-net_io_counters = cext.net_io_counters
-disk_io_counters = cext.disk_io_counters
-ppid_map = cext.ppid_map  # not meant to be public
-
-
-def wrap_exceptions(fun):
-    """Decorator which translates bare OSError and WindowsError
-    exceptions into NoSuchProcess and AccessDenied.
-    """
-    @functools.wraps(fun)
-    def wrapper(self, *args, **kwargs):
-        try:
-            return fun(self, *args, **kwargs)
-        except OSError as err:
-            # support for private module import
-            if NoSuchProcess is None or AccessDenied is None:
-                raise
-            if err.errno in ACCESS_DENIED_SET:
-                raise AccessDenied(self.pid, self._name)
-            if err.errno == errno.ESRCH:
-                raise NoSuchProcess(self.pid, self._name)
-            raise
-    return wrapper
-
-
-class Process(object):
-    """Wrapper class around underlying C implementation."""
-
-    __slots__ = ["pid", "_name"]
-
-    def __init__(self, pid):
-        self.pid = pid
-        self._name = None
-
-    @wrap_exceptions
-    def name(self):
-        """Return process name, which on Windows is always the final
-        part of the executable.
-        """
-        # This is how PIDs 0 and 4 are always represented in taskmgr
-        # and process-hacker.
-        if self.pid == 0:
-            return "System Idle Process"
-        elif self.pid == 4:
-            return "System"
-        else:
-            return os.path.basename(self.exe())
-
-    @wrap_exceptions
-    def exe(self):
-        # Note: os.path.exists(path) may return False even if the file
-        # is there, see:
-        # http://stackoverflow.com/questions/3112546/os-path-exists-lies
-
-        # see https://github.com/giampaolo/psutil/issues/414
-        # see https://github.com/giampaolo/psutil/issues/528
-        if self.pid in (0, 4):
-            raise AccessDenied(self.pid, self._name)
-        return _convert_raw_path(cext.proc_exe(self.pid))
-
-    @wrap_exceptions
-    def cmdline(self):
-        return cext.proc_cmdline(self.pid)
-
-    def ppid(self):
-        try:
-            return ppid_map()[self.pid]
-        except KeyError:
-            raise NoSuchProcess(self.pid, self._name)
-
-    def _get_raw_meminfo(self):
-        try:
-            return cext.proc_memory_info(self.pid)
-        except OSError as err:
-            if err.errno in ACCESS_DENIED_SET:
-                return cext.proc_memory_info_2(self.pid)
-            raise
-
-    @wrap_exceptions
-    def memory_info(self):
-        # on Windows RSS == WorkingSetSize and VSM == PagefileUsage
-        # fields of PROCESS_MEMORY_COUNTERS struct:
-        # http://msdn.microsoft.com/en-us/library/windows/desktop/
-        #     ms684877(v=vs.85).aspx
-        t = self._get_raw_meminfo()
-        return _common.pmem(t[2], t[7])
-
-    @wrap_exceptions
-    def memory_info_ex(self):
-        return pextmem(*self._get_raw_meminfo())
-
-    def memory_maps(self):
-        try:
-            raw = cext.proc_memory_maps(self.pid)
-        except OSError as err:
-            # XXX - can't use wrap_exceptions decorator as we're
-            # returning a generator; probably needs refactoring.
-            if err.errno in ACCESS_DENIED_SET:
-                raise AccessDenied(self.pid, self._name)
-            if err.errno == errno.ESRCH:
-                raise NoSuchProcess(self.pid, self._name)
-            raise
-        else:
-            for addr, perm, path, rss in raw:
-                path = _convert_raw_path(path)
-                addr = hex(addr)
-                yield (addr, perm, path, rss)
-
-    @wrap_exceptions
-    def kill(self):
-        return cext.proc_kill(self.pid)
-
-    @wrap_exceptions
-    def wait(self, timeout=None):
-        if timeout is None:
-            timeout = cext.INFINITE
-        else:
-            # WaitForSingleObject() expects time in milliseconds
-            timeout = int(timeout * 1000)
-        ret = cext.proc_wait(self.pid, timeout)
-        if ret == WAIT_TIMEOUT:
-            # support for private module import
-            if TimeoutExpired is None:
-                raise RuntimeError("timeout expired")
-            raise TimeoutExpired(timeout, self.pid, self._name)
-        return ret
-
-    @wrap_exceptions
-    def username(self):
-        if self.pid in (0, 4):
-            return 'NT AUTHORITY\\SYSTEM'
-        return cext.proc_username(self.pid)
-
-    @wrap_exceptions
-    def create_time(self):
-        # special case for kernel process PIDs; return system boot time
-        if self.pid in (0, 4):
-            return boot_time()
-        try:
-            return cext.proc_create_time(self.pid)
-        except OSError as err:
-            if err.errno in ACCESS_DENIED_SET:
-                return cext.proc_create_time_2(self.pid)
-            raise
-
-    @wrap_exceptions
-    def num_threads(self):
-        return cext.proc_num_threads(self.pid)
-
-    @wrap_exceptions
-    def threads(self):
-        rawlist = cext.proc_threads(self.pid)
-        retlist = []
-        for thread_id, utime, stime in rawlist:
-            ntuple = _common.pthread(thread_id, utime, stime)
-            retlist.append(ntuple)
-        return retlist
-
-    @wrap_exceptions
-    def cpu_times(self):
-        try:
-            ret = cext.proc_cpu_times(self.pid)
-        except OSError as err:
-            if err.errno in ACCESS_DENIED_SET:
-                ret = cext.proc_cpu_times_2(self.pid)
-            else:
-                raise
-        return _common.pcputimes(*ret)
-
-    @wrap_exceptions
-    def suspend(self):
-        return cext.proc_suspend(self.pid)
-
-    @wrap_exceptions
-    def resume(self):
-        return cext.proc_resume(self.pid)
-
-    @wrap_exceptions
-    def cwd(self):
-        if self.pid in (0, 4):
-            raise AccessDenied(self.pid, self._name)
-        # return a normalized pathname since the native C function appends
-        # "\\" at the and of the path
-        path = cext.proc_cwd(self.pid)
-        return os.path.normpath(path)
-
-    @wrap_exceptions
-    def open_files(self):
-        if self.pid in (0, 4):
-            return []
-        retlist = []
-        # Filenames come in in native format like:
-        # "\Device\HarddiskVolume1\Windows\systemew\file.txt"
-        # Convert the first part in the corresponding drive letter
-        # (e.g. "C:\") by using Windows's QueryDosDevice()
-        raw_file_names = cext.proc_open_files(self.pid)
-        for file in raw_file_names:
-            file = _convert_raw_path(file)
-            if isfile_strict(file) and file not in retlist:
-                ntuple = _common.popenfile(file, -1)
-                retlist.append(ntuple)
-        return retlist
-
-    @wrap_exceptions
-    def connections(self, kind='inet'):
-        return net_connections(kind, _pid=self.pid)
-
-    @wrap_exceptions
-    def nice_get(self):
-        return cext.proc_priority_get(self.pid)
-
-    @wrap_exceptions
-    def nice_set(self, value):
-        return cext.proc_priority_set(self.pid, value)
-
-    # available on Windows >= Vista
-    if hasattr(cext, "proc_io_priority_get"):
-        @wrap_exceptions
-        def ionice_get(self):
-            return cext.proc_io_priority_get(self.pid)
-
-        @wrap_exceptions
-        def ionice_set(self, value, _):
-            if _:
-                raise TypeError("set_proc_ionice() on Windows takes only "
-                                "1 argument (2 given)")
-            if value not in (2, 1, 0):
-                raise ValueError("value must be 2 (normal), 1 (low) or 0 "
-                                 "(very low); got %r" % value)
-            return cext.proc_io_priority_set(self.pid, value)
-
-    @wrap_exceptions
-    def io_counters(self):
-        try:
-            ret = cext.proc_io_counters(self.pid)
-        except OSError as err:
-            if err.errno in ACCESS_DENIED_SET:
-                ret = cext.proc_io_counters_2(self.pid)
-            else:
-                raise
-        return _common.pio(*ret)
-
-    @wrap_exceptions
-    def status(self):
-        suspended = cext.proc_is_suspended(self.pid)
-        if suspended:
-            return _common.STATUS_STOPPED
-        else:
-            return _common.STATUS_RUNNING
-
-    @wrap_exceptions
-    def cpu_affinity_get(self):
-        from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x]
-        bitmask = cext.proc_cpu_affinity_get(self.pid)
-        return from_bitmask(bitmask)
-
-    @wrap_exceptions
-    def cpu_affinity_set(self, value):
-        def to_bitmask(l):
-            if not l:
-                raise ValueError("invalid argument %r" % l)
-            out = 0
-            for b in l:
-                out |= 2 ** b
-            return out
-
-        # SetProcessAffinityMask() states that ERROR_INVALID_PARAMETER
-        # is returned for an invalid CPU but this seems not to be true,
-        # therefore we check CPUs validy beforehand.
-        allcpus = list(range(len(per_cpu_times())))
-        for cpu in value:
-            if cpu not in allcpus:
-                raise ValueError("invalid CPU %r" % cpu)
-
-        bitmask = to_bitmask(value)
-        cext.proc_cpu_affinity_set(self.pid, bitmask)
-
-    @wrap_exceptions
-    def num_handles(self):
-        try:
-            return cext.proc_num_handles(self.pid)
-        except OSError as err:
-            if err.errno in ACCESS_DENIED_SET:
-                return cext.proc_num_handles_2(self.pid)
-            raise
-
-    @wrap_exceptions
-    def num_ctx_switches(self):
-        tupl = cext.proc_num_ctx_switches(self.pid)
-        return _common.pctxsw(*tupl)
diff --git a/io_scenario/sensors.py b/io_scenario/sensors.py
deleted file mode 100644
index b7f8cd0..0000000
--- a/io_scenario/sensors.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import os.path
-
-import psutil
-
-
-def get_disk_by_mountpoint(mnt_point):
-    """ Return disk of mountpoint """
-    diskparts = psutil.disk_partitions()
-    for item in diskparts:
-        if item.mountpoint == mnt_point:
-            return os.path.realpath(item.device)
-
-    raise OSError("Can't define disk for {0!r}".format(mnt_point))
-
-
-def find_mount_point(path):
-    """ Find mount point by provided path """
-    path = os.path.abspath(path)
-    while not os.path.ismount(path):
-        path = os.path.dirname(path)
-    return path
-
-
-class DiskInfo(object):
-    def __init__(self, name, rd_cnt=0, wr_cnt=0, rd_bytes=0,
-                 wr_bytes=0, rd_time=0, wr_time=0):
-        self.name = name
-        self.rd_cnt = rd_cnt
-        self.wr_cnt = wr_cnt
-        self.rd_bytes = rd_bytes
-        self.wr_bytes = wr_bytes
-        self.rd_time = rd_time
-        self.wr_time = wr_time
-
-    def __str__(self):
-        message = 'DISK {0.name}: read count {0.rd_cnt}' + \
-                  ', write count {0.wr_cnt}' + \
-                  ', read bytes {0.rd_bytes}' + \
-                  ', write bytes {0.wr_bytes}' + \
-                  ', read time {0.rd_time}' + \
-                  ', write time {0.wr_time}'
-        return message.format(self)
-
-
-def get_io_stats(path):
-    """ Return list of CEPHDiskInfo for all disks that used by CEPH on the
-        local node
-    """
-    stat = psutil.disk_io_counters(perdisk=True)
-    disk = get_disk_by_mountpoint(find_mount_point(path))
-    disk_base = os.path.basename(disk)
-    print disk_base
-    try:
-        return stat[disk_base]
-    except IndexError:
-        raise OSError("Disk {0} not found in stats".format(disk))
diff --git a/report.py b/report.py
index 434c844..4d23b60 100644
--- a/report.py
+++ b/report.py
@@ -1,9 +1,8 @@
 import argparse
 from collections import OrderedDict
-import sys
 
 from chart import charts
-
+from utils import ssize_to_kb
 
 
 OPERATIONS = (('async', ('randwrite asynchronous', 'randread asynchronous',
@@ -15,23 +14,6 @@
                    'a': 'asynchronous'}
 
 
-def ssize_to_kb(ssize):
-    try:
-        smap = dict(k=1, K=1, M=1024, m=1024, G=1024**2, g=1024**2)
-        for ext, coef in smap.items():
-            if ssize.endswith(ext):
-                return int(ssize[:-1]) * coef
-
-        if int(ssize) % 1024 != 0:
-            raise ValueError()
-
-        return int(ssize) / 1024
-
-    except (ValueError, TypeError, AttributeError):
-        tmpl = "Unknow size format {0!r} (or size not multiples 1024)"
-        raise ValueError(tmpl.format(ssize))
-
-
 def parse_args(argv):
     parser = argparse.ArgumentParser()
     parser.add_argument('-s', '--storage', help='storage location', dest="url")
diff --git a/scripts/data2.py b/scripts/data2.py
new file mode 100644
index 0000000..08dbc77
--- /dev/null
+++ b/scripts/data2.py
@@ -0,0 +1,105 @@
+import sys
+import math
+import itertools
+
+
+def key(x):
+    return (x['__meta__']['blocksize'],
+            'd' if x['__meta__']['direct_io'] else 's',
+            x['__meta__']['action'],
+            x['__meta__']['concurence'])
+
+
+def med_dev(vals):
+    med = sum(vals) / len(vals)
+    dev = ((sum(abs(med - i) ** 2 for i in vals) / len(vals)) ** 0.5)
+    return int(med), int(dev)
+
+
+def round_deviation(med_dev):
+    med, dev = med_dev
+
+    if dev < 1E-7:
+        return med_dev
+
+    dev_div = 10.0 ** (math.floor(math.log10(dev)) - 1)
+    dev = int(dev / dev_div) * dev_div
+    med = int(med / dev_div) * dev_div
+    return (type(med_dev[0])(med),
+            type(med_dev[1])(dev))
+
+
+def groupby_globally(data, key_func):
+    grouped = {}
+    grouped_iter = itertools.groupby(data, key_func)
+
+    for (bs, cache_tp, act, conc), curr_data_it in grouped_iter:
+        key = (bs, cache_tp, act, conc)
+        grouped.setdefault(key, []).extend(curr_data_it)
+
+    return grouped
+
+
+template = "{bs:>4}  {action:>12}  {cache_tp:>3}  {conc:>4}"
+template += " | {iops[0]:>6} ~ {iops[1]:>5} | {bw[0]:>7} ~ {bw[1]:>6}"
+template += " | {lat[0]:>6} ~ {lat[1]:>5} |"
+
+headers = dict(bs="BS",
+               action="operation",
+               cache_tp="S/D",
+               conc="CONC",
+               iops=("IOPS", "dev"),
+               bw=("BW kBps", "dev"),
+               lat=("LAT ms", "dev"))
+
+
+def main(argv):
+    data = []
+
+    with open(argv[1]) as fc:
+        block = None
+        for line in fc:
+            if line.startswith("{'__meta__':"):
+                block = line
+            elif block is not None:
+                block += line
+
+            if block is not None:
+                if block.count('}') == block.count('{'):
+                    data.append(eval(block))
+                    block = None
+
+    grouped = groupby_globally(data, key)
+
+    print template.format(**headers)
+
+    for (bs, cache_tp, act, conc), curr_data in sorted(grouped.items()):
+        iops = med_dev([i['iops'] * int(conc) for i in curr_data])
+        bw_mean = med_dev([i['bw_mean'] * int(conc) for i in curr_data])
+        lat = med_dev([i['lat'] / 1000 for i in curr_data])
+
+        iops = round_deviation(iops)
+        bw_mean = round_deviation(bw_mean)
+        lat = round_deviation(lat)
+
+        params = dict(
+            bs=bs,
+            action=act,
+            cache_tp=cache_tp,
+            iops=iops,
+            bw=bw_mean,
+            lat=lat,
+            conc=conc
+        )
+
+        print template.format(**params)
+
+
+if __name__ == "__main__":
+    exit(main(sys.argv))
+
+    # vals = [(123, 23), (125678, 5678), (123.546756, 23.77),
+    #         (123.546756, 102.77), (0.1234, 0.0224),
+    #         (0.001234, 0.000224), (0.001234, 0.0000224)]
+    # for val in :
+    #     print val, "=>", round_deviation(val)
diff --git a/scripts/gen_load.sh b/scripts/gen_load.sh
new file mode 100755
index 0000000..e3af3cd
--- /dev/null
+++ b/scripts/gen_load.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+TESTER="--tester-type fio"
+CACHE="--cache-modes d"
+REPEATS="--repeats 3"
+
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x1000
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x2000
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x4000
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x8000
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x16000
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x32000
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x64000
+# python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x128000
+
+python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 4 8 $REPEATS --io-size 10G
+python generate_load.py $TESTER --size 4k --opers randread $CACHE --concurrences 1 4 8 $REPEATS --io-size 10G
+
+python generate_load.py $TESTER --size 4k --opers randwrite --cache-modes s --concurrences 1 $REPEATS --io-size 10G
+python generate_load.py $TESTER --size 4k --opers randread randwrite $CACHE --concurrences 1 $REPEATS --io-size 10G
+python generate_load.py $TESTER --size 2m --opers read write $CACHE --concurrences 1 $REPEATS --io-size 10G
diff --git a/scripts/generate_load.py b/scripts/generate_load.py
index f99c816..0d25038 100644
--- a/scripts/generate_load.py
+++ b/scripts/generate_load.py
@@ -1,14 +1,7 @@
-# BLOCK_SIZES = "1k 4k 64k 256k 1m"
-# OPERATIONS="randwrite write randread read"
-# SYNC_TYPES="s a d"
-# REPEAT_COUNT="3"
-# CONCURRENCES="1 8 64"
+import sys
+import argparse
 
-from utils import ssize_to_kb
-
-SYNC_FACTOR = "x500"
-DIRECT_FACTOR = "x10000"
-ASYNC_FACTOR = "r2"
+from disk_perf_test_tool.utils import ssize_to_b
 
 
 def make_list(x):
@@ -16,76 +9,97 @@
         return [x]
     return x
 
-HDD_SIZE_KB = 45 * 1000 * 1000
 
-
-def max_file():
-    pass
-
-
-def make_load(sizes, opers, sync_types, concurrence,
-              tester_type='iozone', repeat_count=3):
+def make_load(settings):
 
     iodepth = 1
-    for conc in make_list(concurrence):
-        for bsize in make_list(sizes):
-            for oper in make_list(opers):
-                for sync_type in make_list(sync_types):
+    for conc in make_list(settings.concurrences):
+        for bsize in make_list(settings.sizes):
+            for oper in make_list(settings.opers):
+                for cache_mode in make_list(settings.cache_modes):
 
                     # filter out too slow options
-                    if bsize in "1k 4k" and sync_type == "a":
+                    if bsize in "1k 4k" and cache_mode == "a":
                         continue
 
                     # filter out sync reads
-                    if oper in "read randread" and sync_type == "s":
+                    if oper in "read randread" and cache_mode == "s":
                         continue
 
-                    if sync_type == "s":
-                        size_sync_opts = "--iosize {0} -s".format(SYNC_FACTOR)
-                    elif sync_type == "d":
-                        if oper == 'randread':
-                            assert SYNC_FACTOR[0] == 'x'
-                            max_f = int(SYNC_FACTOR[1:])
-                        else:
-                            max_f = None
-
-                        mmax_f = HDD_SIZE_KB / (int(conc) * ssize_to_kb(bsize))
-
-                        if max_f is None or mmax_f > max_f:
-                            max_f = mmax_f
-
-                        assert DIRECT_FACTOR[0] == 'x'
-                        if max_f > int(DIRECT_FACTOR[1:]):
-                            max_f = DIRECT_FACTOR
-                        else:
-                            max_f = "x{0}".format(max_f)
-
-                        size_sync_opts = "--iosize {0} -d".format(max_f)
-
+                    if settings.io_size is not None:
+                        size_sync_opts = " --iosize " + str(settings.io_size)
+                        if cache_mode == "s":
+                            size_sync_opts += " -s"
+                        elif cache_mode == "d":
+                            size_sync_opts += " -d"
                     else:
-                        if oper == 'randread' or oper == 'read':
-                            size_sync_opts = "--iosize " + str(SYNC_FACTOR)
+                        if cache_mode == "s":
+                            size_sync_opts = "--iosize {0} -s".format(
+                                settings.sync_default_size)
+                        elif cache_mode == "d":
+                            if oper == 'randread':
+                                assert settings.sync_default_size[0] == 'x'
+                                max_f = int(settings.sync_default_size[1:])
+                            else:
+                                max_f = None
+
+                            mmax_f = ssize_to_b(settings.hdd_size) / \
+                                (int(conc) * ssize_to_b(bsize))
+
+                            if max_f is None or mmax_f > max_f:
+                                max_f = mmax_f
+
+                            assert settings.direct_default_size[0] == 'x'
+                            if max_f > int(settings.direct_default_size[1:]):
+                                max_f = settings.direct_default_size
+                            else:
+                                max_f = "x{0}".format(max_f)
+
+                            size_sync_opts = "--iosize {0} -d".format(max_f)
+
                         else:
-                            size_sync_opts = "--iosize " + str(ASYNC_FACTOR)
+                            if oper == 'randread' or oper == 'read':
+                                size_sync_opts = "--iosize " + \
+                                    str(settings.sync_default_size)
+                            else:
+                                size_sync_opts = "--iosize " + \
+                                    str(settings.sync_default_size)
 
                     # size_sync_opts = get_file_size_opts(sync_type)
 
-                    io_opts = "--type {0} ".format(tester_type)
+                    io_opts = "--type {0} ".format(settings.tester_type)
                     io_opts += "-a {0} ".format(oper)
                     io_opts += "--iodepth {0} ".format(iodepth)
                     io_opts += "--blocksize {0} ".format(bsize)
                     io_opts += size_sync_opts + " "
                     io_opts += "--concurrency {0}".format(conc)
 
-                    for i in range(repeat_count):
+                    for i in range(settings.repeats):
                         yield io_opts
 
 
-sizes = "4k 64k 2m".split()
-opers = "randwrite write randread read".split()
-sync_types = "s a d".split()
-concurrence = "1 8 64".split()
+def parse_opts(args):
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--sizes', nargs="+", required=True)
+    parser.add_argument('--opers', nargs="+", required=True)
+    parser.add_argument('--cache-modes', nargs="+", required=True)
+    parser.add_argument('--concurrences', nargs="+", required=True)
+    parser.add_argument('--repeats', type=int, default=3)
+    parser.add_argument("--hdd-size", default="45G")
+    parser.add_argument("--tester-type", default="iozone")
+    parser.add_argument("--io-size", default=None)
 
-for io_opts in make_load(sizes=sizes, concurrence=concurrence,
-                         sync_types=sync_types, opers=opers):
-    print io_opts
+    parser.add_argument("--direct-default-size", default="x1000")
+    parser.add_argument("--sync-default-size", default="x1000")
+    parser.add_argument("--async-default-size", default="r2")
+
+    return parser.parse_args(args[1:])
+
+
+def main(args):
+    opts = parse_opts(args)
+    for io_opts in make_load(opts):
+        print "python io.py --test-file /opt/xxx.bin " + io_opts
+
+if __name__ == "__main__":
+    exit(main(sys.argv))
diff --git a/scripts/prepare.sh b/scripts/prepare.sh
index 3d85072..7151b2f 100644
--- a/scripts/prepare.sh
+++ b/scripts/prepare.sh
@@ -6,7 +6,7 @@
 
 # settings
 FL_RAM=256
-FL_HDD=50
+FL_HDD=20
 FL_CPU=1
 
 
@@ -100,6 +100,7 @@
     if [ -z "$keypair_id" ] ; then
         echo "Creating server group $SERV_GROUP. Key would be stored into $KEY_FILE_NAME"
         nova keypair-add "$KEYPAIR_NAME" > "$KEY_FILE_NAME"
+        chmod og= "$KEY_FILE_NAME"
     fi
 
     echo "Adding rules for ping and ssh"
diff --git a/scripts/run_vm.sh b/scripts/run_vm.sh
new file mode 100644
index 0000000..ddc5cfc
--- /dev/null
+++ b/scripts/run_vm.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+MASTER_IP=$1
+FUEL_PASSWD=$2
+
+OS_ORIGIN_IP=192.168.0.2
+OS_EXT_IP=172.16.53.2
+VM_NAME=koder-disk-test
+
+FIXED_NET_NAME="net04"
+FLOATING_NET="net04_ext"
+
+my_dir="$(dirname "$0")"
+source "$my_dir/config.sh"
+SSH_OVER_MASTER="sshpass -p${FUEL_PASSWD} ssh root@${MASTER_IP}"
+VOLUME_NAME="test-volume"
+VOLUME_SIZE=20
+VOLUME_DEVICE="/dev/vdb"
+
+# VM_IP=$(nova floating-ip-create "$FLOATIN_NET" | grep "$FLOATIN_NET" | awk '{print $2}')
+VM_IP=172.16.53.18
+
+function get_openrc() {
+	OPENRC=`tempfile`
+	CONTROLLER_NODE=$($SSH_OVER_MASTER fuel node | grep controller | awk '-F|' '{gsub(" ", "", $5); print $5}')
+	$SSH_OVER_MASTER ssh $CONTROLLER_NODE cat openrc 2>/dev/null | sed "s/$OS_ORIGIN_IP/$OS_EXT_IP/g" > $OPENRC
+	echo $OPENRC
+}
+
+function boot_vm() {
+	FIXED_NET_ID=$(nova net-list | grep "\b${FIXED_NET_NAME}\b" | awk '{print $2}')
+	nova boot --flavor "$FLAVOR_NAME" --image "$IMAGE_NAME" --key-name "$KEYPAIR_NAME" --security-groups default --nic net-id=$FIXED_NET_ID $VM_NAME
+	nova floating-ip-associate $VM_NAME $VM_IP
+	VOL_ID=$(cinder create --display-name $VOLUME_NAME $VOLUME_SIZE | grep '\bid\b' | awk '{print $4}')
+	nova volume-attach $VM_NAME $VOL_ID $VOLUME_DEVICE
+}
+
+function prepare_vm() {
+	scp -i "$KEY_FILE_NAME" -r ../io_scenario ubuntu@${VM_IP}:/tmp
+	scp -i "$KEY_FILE_NAME" $DEBS ubuntu@${VM_IP}:/tmp
+	scp -i "$KEY_FILE_NAME" single_node_test_short.sh ubuntu@${VM_IP}:/tmp
+	ssh -i "$KEY_FILE_NAME" ubuntu@${VM_IP} sudo dpkg -i $DEBS
+}
+
+function prepare_node() {
+	COMPUTE_NODE=$($SSH_OVER_MASTER fuel node | grep compute | awk '-F|' '{gsub(" ", "", $5); print $5}')
+
+	sshpass -p${FUEL_PASSWD} scp -r ../io_scenario root@${MASTER_IP}:/tmp
+	$SSH_OVER_MASTER scp -r /tmp/io_scenario $COMPUTE_NODE:/tmp
+
+	sshpass -p${FUEL_PASSWD} scp $DEBS root@${MASTER_IP}:/tmp
+
+	$SSH_OVER_MASTER scp $DEBS $COMPUTE_NODE:/tmp
+	$SSH_OVER_MASTER ssh $COMPUTE_NODE dpkg -i $DEBS
+
+	sshpass -p${FUEL_PASSWD} scp single_node_test_short.sh root@${MASTER_IP}:/tmp
+	$SSH_OVER_MASTER scp /tmp/single_node_test_short.sh $COMPUTE_NODE:/tmp
+}
+
+function download_debs() {
+	pushd /tmp >/dev/null
+	rm -f *.deb >/dev/null
+	aptitude download libibverbs1 librdmacm1 libaio1 fio >/dev/null
+	popd >/dev/null
+	echo /tmp/*.deb
+}
+
+DEBS=`download_debs`
+OPENRC=`get_openrc`
+source $OPENRC
+rm $OPENRC
+
+boot_vm
+prepare_vm
+
+
diff --git a/scripts/show_disk_delta.py b/scripts/show_disk_delta.py
index 634b201..754e7a8 100644
--- a/scripts/show_disk_delta.py
+++ b/scripts/show_disk_delta.py
@@ -1,7 +1,7 @@
 import os
 import sys
 import time
-# import pprint
+import pprint
 import threading
 
 
@@ -36,8 +36,11 @@
     return {key: (val - obj2[key]) for key, val in obj1.items()}
 
 
-def run_tool(cmd, suppress_console=True):
-    os.system(" ".join(cmd) + " >/dev/null 2>&1 ")
+def run_tool(cmd, suppress_console=False):
+    s_cmd = " ".join(cmd)
+    if suppress_console:
+        s_cmd += " >/dev/null 2>&1 "
+    os.system(s_cmd)
 
 devices = sys.argv[1].split(',')
 cmd = sys.argv[2:]
@@ -47,6 +50,7 @@
 
 rstats = read_dstats()
 prev_stats = {device: rstats[device] for device in devices}
+begin_stats = prev_stats
 
 th.start()
 
@@ -58,15 +62,15 @@
     rstats = read_dstats()
     new_stats = {device: rstats[device] for device in devices}
 
-    print "Delta writes complete =",
+    # print "Delta writes complete =",
     for device in devices:
         delta = new_stats[device][wr_compl] - prev_stats[device][wr_compl]
-        print device, delta,
-    print
+        # print device, delta,
+    # print
 
     prev_stats = new_stats
 
     if not th.is_alive():
         break
 
-# pprint.pprint(diff_stats(stat2, stat1))
+pprint.pprint(diff_stats(new_stats[device], begin_stats[device]))
diff --git a/scripts/single_node_test_short.sh b/scripts/single_node_test_short.sh
new file mode 100644
index 0000000..384e8bd
--- /dev/null
+++ b/scripts/single_node_test_short.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+set -x
+
+TEST_FILE=$1
+OUT_FILE=$2
+NUM_CYCLES=7
+
+function run_tests(){
+	OPTS="--test-file $TEST_FILE --type fio --iodepth 1 --iosize 10G"
+
+	sync ; echo 3 > /proc/sys/vm/drop_caches ; python io.py $OPTS -a randwrite --blocksize 4k -d --concurrency 1
+
+	sync ; echo 3 > /proc/sys/vm/drop_caches ; dd if=/dev/zero of=$TEST_FILE bs=1048576 count=10240
+	sync ; echo 3 > /proc/sys/vm/drop_caches ; dd if=/dev/zero of=$TEST_FILE bs=1048576 count=10240
+
+	for cycle in $(seq $NUM_CYCLES) ; do
+		for conc in 1 4 8 ; do
+			sync ; echo 3 > /proc/sys/vm/drop_caches ; python io.py $OPTS -a randwrite --blocksize 4k -d --concurrency $conc
+		done
+
+		for conc in 1 4 8 ; do
+			sync ; echo 3 > /proc/sys/vm/drop_caches ; python io.py $OPTS -a randread  --blocksize 4k -d --concurrency $conc
+		done
+
+		sync ; echo 3 > /proc/sys/vm/drop_caches ; python io.py $OPTS -a randwrite --blocksize 4k -s --concurrency 1
+
+		sync ; echo 3 > /proc/sys/vm/drop_caches ; python io.py $OPTS -a read      --blocksize 2m -d --concurrency 1
+		sync ; echo 3 > /proc/sys/vm/drop_caches ; python io.py $OPTS -a write     --blocksize 2m -d --concurrency 1
+	done
+}
+
+run_tests "$FILE_1" 2>&1 | tee "$OUT_FILE"
+
+
diff --git a/sensors/host1_config.json b/sensors/host1_config.json
deleted file mode 100644
index 5f9d2e8..0000000
--- a/sensors/host1_config.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-	"block-io": {
-		"allowed_prefixes": ["sdb1"]
-	},
-	"net-io": {
-		"allowed_prefixes": ["eth0"]
-	},
-    "system-cpu": {
-        "allowed_prefixes": ["cpu"]
-    },
-    "system-ram": {
-    },
-    "perprocess-cpu": {
-        "allowed_prefixes": ["ceph"]
-    },
-    "perprocess-ram": {
-        "allowed_prefixes": ["ceph"]
-    }
-}
diff --git a/utils.py b/utils.py
index ca65409..5b9c0a5 100644
--- a/utils.py
+++ b/utils.py
@@ -154,3 +154,16 @@
     except (ValueError, TypeError, AttributeError):
         tmpl = "Unknow size format {0!r} (or size not multiples 1024)"
         raise ValueError(tmpl.format(ssize))
+
+
+def ssize_to_b(ssize):
+    try:
+        smap = dict(k=1, K=1, M=1024, m=1024, G=1024**2, g=1024**2)
+        for ext, coef in smap.items():
+            if ssize.endswith(ext):
+                return int(ssize[:-1]) * coef * 1024
+
+        return int(ssize)
+    except (ValueError, TypeError, AttributeError):
+        tmpl = "Unknow size format {0!r} (or size not multiples 1024)"
+        raise ValueError(tmpl.format(ssize))