Initial commit with fixtures

- add fixtures for hardware and underlay
- add fuel-devops template tcpcloud-default.yaml

* Migration of fixtures is not finished yet
diff --git a/tcp_tests/managers/__init__.py b/tcp_tests/managers/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tcp_tests/managers/__init__.py
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
new file mode 100644
index 0000000..51b6520
--- /dev/null
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -0,0 +1,373 @@
+#    Copyright 2016 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+
+from devops import error
+from devops.helpers import helpers
+from devops import models
+from django import db
+from oslo_config import cfg
+
+from tcp_tests import settings
+from tcp_tests import settings_oslo
+from tcp_tests.helpers import env_config
+from tcp_tests.helpers import ext
+from tcp_tests.helpers import exceptions
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+class EnvironmentManager(object):
+    """Class-helper for creating VMs via devops environments"""
+
+    __config = None
+
+    def __init__(self, config=None):
+        """Initializing class instance and create the environment
+
+        :param config: oslo.config object
+        :param config.hardware.conf_path: path to devops YAML template
+        :param config.hardware.current_snapshot: name of the snapshot that
+                                                 descriebe environment status.
+        """
+        self.__devops_config = env_config.EnvironmentConfig()
+        self._env = None
+        self.__config = config
+
+        if config.hardware.conf_path is not None:
+            self._devops_config.load_template(config.hardware.conf_path)
+        else:
+            raise Exception("Devops YAML template is not set in config object")
+
+        try:
+            self._get_env_by_name(self._d_env_name)
+            if not self.has_snapshot(config.hardware.current_snapshot):
+                raise exceptions.EnvironmentSnapshotMissing(
+                    self._d_env_name, config.hardware.current_snapshot)
+        except error.DevopsObjNotFound:
+            LOG.info("Environment doesn't exist, creating a new one")
+            self._create_environment()
+        self.set_dns_config()
+
+    @property
+    def _devops_config(self):
+        return self.__devops_config
+
+    @_devops_config.setter
+    def _devops_config(self, conf):
+        """Setter for self.__devops_config
+
+        :param conf: tcp_tests.helpers.env_config.EnvironmentConfig
+        """
+        if not isinstance(conf, env_config.EnvironmentConfig):
+            msg = ("Unexpected type of devops config. Got '{0}' " +
+                   "instead of '{1}'")
+            raise TypeError(
+                msg.format(
+                    type(conf).__name__,
+                    env_config.EnvironmentConfig.__name__
+                )
+            )
+        self.__devops_config = conf
+
+    def lvm_storages(self):
+        """Returns a dict object of lvm storages in current environment
+
+        returned data example:
+            {
+                "master": {
+                    "id": "virtio-bff72959d1a54cb19d08"
+                },
+                "slave-0": {
+                    "id": "virtio-5e33affc8fe44503839f"
+                },
+                "slave-1": {
+                    "id": "virtio-10b6a262f1ec4341a1ba"
+                },
+            }
+
+        :rtype: dict
+        """
+        result = {}
+#        for node in self.k8s_nodes:
+        for node in self.master_nodes + self.slave_nodes:
+            lvm = filter(lambda x: x.volume.name == 'lvm', node.disk_devices)
+            if len(lvm) == 0:
+                continue
+            lvm = lvm[0]
+            result[node.name] = {}
+            result_node = result[node.name]
+            result_node['id'] = "{bus}-{serial}".format(
+                bus=lvm.bus,
+                serial=lvm.volume.serial[:20])
+            LOG.info("Got disk-id '{}' for node '{}'".format(
+                result_node['id'], node.name))
+        return result
+
+    @property
+    def _d_env_name(self):
+        """Get environment name from fuel devops config
+
+        :rtype: string
+        """
+        return self._devops_config['env_name']
+
+    def _get_env_by_name(self, name):
+        """Set existing environment by name
+
+        :param name: string
+        """
+        self._env = models.Environment.get(name=name)
+
+    def _get_default_node_group(self):
+        return self._env.get_group(name='default')
+
+    def _get_network_pool(self, net_pool_name):
+        default_node_group = self._get_default_node_group()
+        network_pool = default_node_group.get_network_pool(name=net_pool_name)
+        return network_pool
+
+    def get_ssh_data(self, roles=None):
+        """Generate ssh config for Underlay
+
+        :param roles: list of strings
+        """
+        if roles is None:
+            raise Exception("No roles specified for the environment!")
+
+        config_ssh = []
+        for d_node in self._env.get_nodes(role__in=roles):
+            ssh_data = {
+                'node_name': d_node.name,
+                'address_pool': self._get_network_pool(
+                    ext.NETWORK_TYPE.public).address_pool.name,
+                'host': self.node_ip(d_node),
+                'login': settings.SSH_NODE_CREDENTIALS['login'],
+                'password': settings.SSH_NODE_CREDENTIALS['password'],
+            }
+            config_ssh.append(ssh_data)
+        return config_ssh
+
+    def create_snapshot(self, name, description=None):
+        """Create named snapshot of current env.
+
+        - Create a libvirt snapshots for all nodes in the environment
+        - Save 'config' object to a file 'config_<name>.ini'
+
+        :name: string
+        """
+        LOG.info("Creating snapshot named '{0}'".format(name))
+        self.__config.hardware.current_snapshot = name
+        LOG.info("current config '{0}'".format(
+            self.__config.hardware.current_snapshot))
+        if self._env is not None:
+            LOG.info('trying to suspend ....')
+            self._env.suspend()
+            LOG.info('trying to snapshot ....')
+            self._env.snapshot(name, description=description, force=True)
+            LOG.info('trying to resume ....')
+            self._env.resume()
+        else:
+            raise exceptions.EnvironmentIsNotSet()
+        settings_oslo.save_config(self.__config, name, self._env.name)
+
+    def _get_snapshot_config_name(self, snapshot_name):
+        """Get config name for the environment"""
+        env_name = self._env.name
+        if env_name is None:
+            env_name = 'config'
+        test_config_path = os.path.join(
+            settings.LOGS_DIR, '{0}_{1}.ini'.format(env_name, snapshot_name))
+        return test_config_path
+
+    def revert_snapshot(self, name):
+        """Revert snapshot by name
+
+        - Revert a libvirt snapshots for all nodes in the environment
+        - Try to reload 'config' object from a file 'config_<name>.ini'
+          If the file not found, then pass with defaults.
+        - Set <name> as the current state of the environment after reload
+
+        :param name: string
+        """
+        LOG.info("Reverting from snapshot named '{0}'".format(name))
+        if self._env is not None:
+            self._env.revert(name=name)
+            LOG.info("Resuming environment after revert")
+            self._env.resume()
+        else:
+            raise exceptions.EnvironmentIsNotSet()
+
+        try:
+            test_config_path = self._get_snapshot_config_name(name)
+            settings_oslo.reload_snapshot_config(self.__config,
+                                                 test_config_path)
+        except cfg.ConfigFilesNotFoundError as conf_err:
+            LOG.error("Config file(s) {0} not found!".format(
+                conf_err.config_files))
+
+        self.__config.hardware.current_snapshot = name
+
+    def _create_environment(self):
+        """Create environment and start VMs.
+
+        If config was provided earlier, we simply create and start VMs,
+        otherwise we tries to generate config from self.config_file,
+        """
+        if self._devops_config.config is None:
+            raise exceptions.DevopsConfigPathIsNotSet()
+        settings = self._devops_config
+        env_name = settings['env_name']
+        LOG.debug(
+            'Preparing to create environment named "{0}"'.format(env_name)
+        )
+        if env_name is None:
+            LOG.error('Environment name is not set!')
+            raise exceptions.EnvironmentNameIsNotSet()
+        try:
+            self._env = models.Environment.create_environment(
+                settings.config
+            )
+        except db.IntegrityError:
+            LOG.error(
+                'Seems like environment {0} already exists.'.format(env_name)
+            )
+            raise exceptions.EnvironmentAlreadyExists(env_name)
+        self._env.define()
+        LOG.info(
+            'Environment "{0}" created and started'.format(env_name)
+        )
+
+    def start(self):
+        """Method for start environment
+
+        """
+        if self._env is None:
+            raise exceptions.EnvironmentIsNotSet()
+        self._env.start()
+#        for node in self.k8s_nodes:
+        for node in self.master_nodes + self.slave_nodes:
+            LOG.debug("Waiting for SSH on node '{}...'".format(node.name))
+            timeout = 360
+            helpers.wait(
+                lambda: helpers.tcp_ping(self.node_ip(node), 22),
+                timeout=timeout,
+                timeout_msg="Node '{}' didn't open SSH in {} sec".format(
+                    node.name, timeout
+                )
+            )
+
+    def resume(self):
+        """Resume environment"""
+        if self._env is None:
+            raise exceptions.EnvironmentIsNotSet()
+        self._env.resume()
+
+    def suspend(self):
+        """Suspend environment"""
+        if self._env is None:
+            raise exceptions.EnvironmentIsNotSet()
+        self._env.suspend()
+
+    def stop(self):
+        """Stop environment"""
+        if self._env is None:
+            raise exceptions.EnvironmentIsNotSet()
+        self._env.destroy()
+
+    def has_snapshot(self, name):
+        return self._env.has_snapshot(name)
+
+    def has_snapshot_config(self, name):
+        test_config_path = self._get_snapshot_config_name(name)
+        return os.path.isfile(test_config_path)
+
+    def delete_environment(self):
+        """Delete environment
+
+        """
+        LOG.debug("Deleting environment")
+        self._env.erase()
+
+    def __get_nodes_by_role(self, node_role):
+        """Get node by given role name
+
+        :param node_role: string
+        :rtype: devops.models.Node
+        """
+        LOG.debug('Trying to get nodes by role {0}'.format(node_role))
+        return self._env.get_nodes(role=node_role)
+
+    @property
+    def master_nodes(self):
+        """Get all master nodes
+
+        :rtype: list
+        """
+        nodes = self.__get_nodes_by_role(
+            node_role=ext.UNDERLAY_NODE_ROLE.salt-master)
+        return nodes
+
+    @property
+    def slave_nodes(self):
+        """Get all slave nodes
+
+        :rtype: list
+        """
+        nodes = self.__get_nodes_by_role(
+            node_role=ext.UNDERLAY_NODE_ROLE.salt-minion)
+        return nodes
+
+#    @staticmethod
+#    def node_ip(node):
+#        """Determine node's IP
+#
+#        :param node: devops.models.Node
+#        :return: string
+#        """
+#        LOG.debug('Trying to determine {0} ip.'.format(node.name))
+#        return node.get_ip_address_by_network_name(
+#            ext.NETWORK_TYPE.public
+#        )
+
+#    @property
+#    def admin_ips(self):
+#        """Property to get ip of admin role VMs
+#
+#        :return: list
+#        """
+#        nodes = self.master_nodes
+#        return [self.node_ip(node) for node in nodes]
+
+#    @property
+#    def slave_ips(self):
+#        """Property to get ip(s) of slave role VMs
+#
+#        :return: list
+#        """
+#        nodes = self.slave_nodes
+#        return [self.node_ip(node) for node in nodes]
+
+    @property
+    def nameserver(self):
+        return self._env.router(ext.NETWORK_TYPE.public)
+
+    def set_dns_config(self):
+        # Set local nameserver to use by default
+        if not self.__config.underlay.nameservers:
+            self.__config.underlay.nameservers = [self.nameserver]
+        if not self.__config.underlay.upstream_dns_servers:
+            self.__config.underlay.upstream_dns_servers = [self.nameserver]
diff --git a/tcp_tests/managers/envmanager_empty.py b/tcp_tests/managers/envmanager_empty.py
new file mode 100644
index 0000000..b9ab8e1
--- /dev/null
+++ b/tcp_tests/managers/envmanager_empty.py
@@ -0,0 +1,106 @@
+#    Copyright 2016 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tcp_tests import settings_oslo
+
+
+class EnvironmentManagerEmpty(object):
+    """Class-helper for creating VMs via devops environments"""
+
+    __config = None
+
+    def __init__(self, config=None):
+        """Initializing class instance and create the environment
+
+        :param config: oslo.config object
+        :param config.hardware.conf_path: path to devops YAML template
+        :param config.hardware.current_snapshot: name of the snapshot that
+                                                 descriebe environment status.
+        """
+        self.__config = config
+
+    def lvm_storages(self):
+        """Returns data of lvm_storages on nodes in environment
+
+        It's expected that data of self.__config.lvm_storages will be
+        like this:
+            {
+                "node1": {
+                    "device": "vdb"
+                },
+                "node2": {
+                    "device": "vdb"
+                },
+                "node3": {
+                    "device": "vdb"
+                },
+            }
+        :rtype: dict
+        """
+        return self.__config.underlay.lvm
+
+    def get_ssh_data(self, roles=None):
+        raise Exception("EnvironmentManagerEmpty doesn't have SSH details. "
+                        "Please provide SSH details in config.underlay.ssh")
+
+    def create_snapshot(self, name, description=None):
+        """Store environmetn state into the config object
+
+        - Store the state of the environment <name> to the 'config' object
+        - Save 'config' object to a file 'config_<name>.ini'
+        """
+        self.__config.hardware.current_snapshot = name
+        settings_oslo.save_config(self.__config, name)
+
+    def revert_snapshot(self, name):
+        """Check the current state <name> of the environment
+
+        - Check that the <name> matches the current state of the environment
+          that is stored in the 'self.__config.hardware.current_snapshot'
+        - Try to reload 'config' object from a file 'config_<name>.ini'
+          If the file not found, then pass with defaults.
+        - Set <name> as the current state of the environment after reload
+
+        :param name: string
+        """
+        if self.__config.hardware.current_snapshot != name:
+            raise Exception(
+                "EnvironmentManagerEmpty cannot revert nodes from {} to {}"
+                .format(self.__config.hardware.current_snapshot, name))
+
+    def start(self):
+        """Start environment"""
+        pass
+
+    def resume(self):
+        """Resume environment"""
+        pass
+
+    def suspend(self):
+        """Suspend environment"""
+        pass
+
+    def stop(self):
+        """Stop environment"""
+        pass
+
+    def has_snapshot(self, name):
+        return self.__config.hardware.current_snapshot == name
+
+    def has_snapshot_config(self, name):
+        return self.__config.hardware.current_snapshot == name
+
+    def delete_environment(self):
+        """Delete environment"""
+        pass
diff --git a/tcp_tests/managers/rallymanager.py b/tcp_tests/managers/rallymanager.py
new file mode 100644
index 0000000..bd937d5
--- /dev/null
+++ b/tcp_tests/managers/rallymanager.py
@@ -0,0 +1,118 @@
+#    Copyright 2016 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+
+LOG = logger.logger
+
+
+class RallyManager(object):
+    """docstring for RallyManager"""
+
+    image_name = 'rallyforge/rally'
+    image_version = '0.5.0'
+
+    def __init__(self, underlay, admin_node_name):
+        super(RallyManager, self).__init__()
+        self._admin_node_name = admin_node_name
+        self._underlay = underlay
+
+    def prepare(self):
+        content = """
+sed -i 's|#swift_operator_role = Member|swift_operator_role=SwiftOperator|g' /etc/rally/rally.conf  # noqa
+source /home/rally/openrc
+rally-manage db recreate
+rally deployment create --fromenv --name=tempest
+rally verify install
+rally verify genconfig
+rally verify showconfig"""
+        cmd = "cat > {path} << EOF\n{content}\nEOF".format(
+            path='/home/{user}/rally/install_tempest.sh'.format(
+                user=settings.SSH_LOGIN), content=content)
+        cmd1 = "chmod +x /home/{user}/rally/install_tempest.sh".format(
+            user=settings.SSH_LOGIN)
+        cmd2 = "cp /home/{user}/openrc-* /home/{user}/rally/openrc".format(
+            user=settings.SSH_LOGIN)
+
+        with self._underlay.remote(node_name=self._admin_node_name) as remote:
+            LOG.info("Create rally workdir")
+            remote.check_call('mkdir -p /home/{user}/rally'.format(
+                user=settings.SSH_LOGIN))
+            LOG.info("Create install_tempest.sh")
+            remote.check_call(cmd)
+            LOG.info("Chmod +x install_tempest.sh")
+            remote.check_call(cmd1)
+            LOG.info("Copy openstackrc")
+            remote.check_call(cmd2)
+
+    def pull_image(self, version=None):
+        version = version or self.image_version
+        image = self.image_name
+        cmd = "docker pull {image}:{version}".format(image=image,
+                                                     version=version)
+        with self._underlay.remote(node_name=self._admin_node_name) as remote:
+            LOG.info("Pull {image}:{version}".format(image=image,
+                                                     version=version))
+            remote.check_call(cmd)
+
+        with self._underlay.remote(node_name=self._admin_node_name) as remote:
+            LOG.info("Getting image id")
+            cmd = "docker images | grep 0.5.0| awk '{print $3}'"
+            res = remote.check_call(cmd)
+            self.image_id = res['stdout'][0].strip()
+            LOG.info("Image ID is {}".format(self.image_id))
+
+    def run(self):
+        with self._underlay.remote(node_name=self._admin_node_name) as remote:
+            cmd = ("docker run --net host -v /home/{user}/rally:/home/rally "
+                   "-tid -u root {image_id}".format(
+                       user=settings.SSH_LOGIN, image_id=self.image_id))
+            LOG.info("Run Rally container")
+            remote.check_call(cmd)
+
+            cmd = ("docker ps | grep {image_id} | "
+                   "awk '{{print $1}}'| head -1").format(
+                       image_id=self.image_id)
+            LOG.info("Getting container id")
+            res = remote.check_call(cmd)
+            self.docker_id = res['stdout'][0].strip()
+            LOG.info("Container ID is {}".format(self.docker_id))
+
+    def run_tempest(self, test=''):
+        docker_exec = ('source /home/{user}/rally/openrc; '
+                       'docker exec -i {docker_id} bash -c "{cmd}"')
+        commands = [
+            docker_exec.format(cmd="./install_tempest.sh",
+                               user=settings.SSH_LOGIN,
+                               docker_id=self.docker_id),
+            docker_exec.format(
+                cmd="source /home/rally/openrc && "
+                    "rally verify start {test}".format(test=test),
+                user=settings.SSH_LOGIN,
+                docker_id=self.docker_id),
+            docker_exec.format(
+                cmd="rally verify results --json --output-file result.json",
+                user=settings.SSH_LOGIN,
+                docker_id=self.docker_id),
+            docker_exec.format(
+                cmd="rally verify results --html --output-file result.html",
+                user=settings.SSH_LOGIN,
+                docker_id=self.docker_id),
+        ]
+        with self._underlay.remote(node_name=self._admin_node_name) as remote:
+            LOG.info("Run tempest inside Rally container")
+            for cmd in commands:
+                remote.check_call(cmd, verbose=True)
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
new file mode 100644
index 0000000..2880272
--- /dev/null
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -0,0 +1,361 @@
+#    Copyright 2016 Mirantis, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import random
+
+from devops.helpers import helpers
+from devops.helpers import ssh_client
+from paramiko import rsakey
+
+from tcp_tests import logger
+from tcp_tests.helpers import utils
+
+LOG = logger.logger
+
+
+class UnderlaySSHManager(object):
+    """Keep the list of SSH access credentials to Underlay nodes.
+
+       This object is initialized using config.underlay.ssh.
+
+       :param config_ssh: JSONList of SSH access credentials for nodes:
+          [
+            {
+              node_name: node1,
+              address_pool: 'public-pool01',
+              host: ,
+              port: ,
+              keys: [],
+              keys_source_host: None,
+              login: ,
+              password: ,
+            },
+            {
+              node_name: node1,
+              address_pool: 'private-pool01',
+              host:
+              port:
+              keys: []
+              keys_source_host: None,
+              login:
+              password:
+            },
+            {
+              node_name: node2,
+              address_pool: 'public-pool01',
+              keys_source_host: node1
+              ...
+            }
+            ,
+            ...
+          ]
+
+       self.node_names(): list of node names registered in underlay.
+       self.remote(): SSHClient object by a node name (w/wo address pool)
+                      or by a hostname.
+    """
+    config_ssh = None
+    config_lvm = None
+
+    def __init__(self, config_ssh):
+        """Read config.underlay.ssh object
+
+           :param config_ssh: dict
+        """
+        if self.config_ssh is None:
+            self.config_ssh = []
+
+        if self.config_lvm is None:
+            self.config_lvm = {}
+
+        self.add_config_ssh(config_ssh)
+
+    def add_config_ssh(self, config_ssh):
+
+        if config_ssh is None:
+            config_ssh = []
+
+        for ssh in config_ssh:
+            ssh_data = {
+                # Required keys:
+                'node_name': ssh['node_name'],
+                'host': ssh['host'],
+                'login': ssh['login'],
+                'password': ssh['password'],
+                # Optional keys:
+                'address_pool': ssh.get('address_pool', None),
+                'port': ssh.get('port', None),
+                'keys': ssh.get('keys', []),
+            }
+
+            if 'keys_source_host' in ssh:
+                node_name = ssh['keys_source_host']
+                remote = self.remote(node_name)
+                keys = self.__get_keys(remote)
+                ssh_data['keys'].extend(keys)
+
+            self.config_ssh.append(ssh_data)
+
+    def remove_config_ssh(self, config_ssh):
+        if config_ssh is None:
+            config_ssh = []
+
+        for ssh in config_ssh:
+            ssh_data = {
+                # Required keys:
+                'node_name': ssh['node_name'],
+                'host': ssh['host'],
+                'login': ssh['login'],
+                'password': ssh['password'],
+                # Optional keys:
+                'address_pool': ssh.get('address_pool', None),
+                'port': ssh.get('port', None),
+                'keys': ssh.get('keys', []),
+            }
+            self.config_ssh.remove(ssh_data)
+
+    def __get_keys(self, remote):
+        keys = []
+        remote.execute('cd ~')
+        key_string = './.ssh/id_rsa'
+        if remote.exists(key_string):
+            with remote.open(key_string) as f:
+                keys.append(rsakey.RSAKey.from_private_key(f))
+        return keys
+
+    def __ssh_data(self, node_name=None, host=None, address_pool=None):
+
+        ssh_data = None
+
+        if host is not None:
+            for ssh in self.config_ssh:
+                if host == ssh['host']:
+                    ssh_data = ssh
+                    break
+
+        elif node_name is not None:
+            for ssh in self.config_ssh:
+                if node_name == ssh['node_name']:
+                    if address_pool is not None:
+                        if address_pool == ssh['address_pool']:
+                            ssh_data = ssh
+                            break
+                    else:
+                        ssh_data = ssh
+        if ssh_data is None:
+            raise Exception('Auth data for node was not found using '
+                            'node_name="{}" , host="{}" , address_pool="{}"'
+                            .format(node_name, host, address_pool))
+        return ssh_data
+
+    def node_names(self):
+        """Get list of node names registered in config.underlay.ssh"""
+
+        names = []  # List is used to keep the original order of names
+        for ssh in self.config_ssh:
+            if ssh['node_name'] not in names:
+                names.append(ssh['node_name'])
+        return names
+
+    def enable_lvm(self, lvmconfig):
+        """Method for enabling lvm oh hosts in environment
+
+        :param lvmconfig: dict with ids or device' names of lvm storage
+        :raises: devops.error.DevopsCalledProcessError,
+        devops.error.TimeoutError, AssertionError, ValueError
+        """
+        def get_actions(lvm_id):
+            return [
+                "systemctl enable lvm2-lvmetad.service",
+                "systemctl enable lvm2-lvmetad.socket",
+                "systemctl start lvm2-lvmetad.service",
+                "systemctl start lvm2-lvmetad.socket",
+                "pvcreate {} && pvs".format(lvm_id),
+                "vgcreate default {} && vgs".format(lvm_id),
+                "lvcreate -L 1G -T default/pool && lvs",
+            ]
+        lvmpackages = ["lvm2", "liblvm2-dev", "thin-provisioning-tools"]
+        for node_name in self.node_names():
+            lvm = lvmconfig.get(node_name, None)
+            if not lvm:
+                continue
+            if 'id' in lvm:
+                lvmdevice = '/dev/disk/by-id/{}'.format(lvm['id'])
+            elif 'device' in lvm:
+                lvmdevice = '/dev/{}'.format(lvm['device'])
+            else:
+                raise ValueError("Unknown LVM device type")
+            if lvmdevice:
+                self.apt_install_package(
+                    packages=lvmpackages, node_name=node_name, verbose=True)
+                for command in get_actions(lvmdevice):
+                    self.sudo_check_call(command, node_name=node_name,
+                                         verbose=True)
+        self.config_lvm = dict(lvmconfig)
+
+    def host_by_node_name(self, node_name, address_pool=None):
+        ssh_data = self.__ssh_data(node_name=node_name,
+                                   address_pool=address_pool)
+        return ssh_data['host']
+
+    def remote(self, node_name=None, host=None, address_pool=None):
+        """Get SSHClient by a node name or hostname.
+
+           One of the following arguments should be specified:
+           - host (str): IP address or hostname. If specified, 'node_name' is
+                         ignored.
+           - node_name (str): Name of the node stored to config.underlay.ssh
+           - address_pool (str): optional for node_name.
+                                 If None, use the first matched node_name.
+        """
+        ssh_data = self.__ssh_data(node_name=node_name, host=host,
+                                   address_pool=address_pool)
+        return ssh_client.SSHClient(
+            host=ssh_data['host'],
+            port=ssh_data['port'] or 22,
+            username=ssh_data['login'],
+            password=ssh_data['password'],
+            private_keys=ssh_data['keys'])
+
+    def check_call(
+            self, cmd,
+            node_name=None, host=None, address_pool=None,
+            verbose=False, timeout=None,
+            error_info=None,
+            expected=None, raise_on_err=True):
+        """Execute command on the node_name/host and check for exit code
+
+        :type cmd: str
+        :type node_name: str
+        :type host: str
+        :type verbose: bool
+        :type timeout: int
+        :type error_info: str
+        :type expected: list
+        :type raise_on_err: bool
+        :rtype: list stdout
+        :raises: devops.error.DevopsCalledProcessError
+        """
+        remote = self.remote(node_name=node_name, host=host,
+                             address_pool=address_pool)
+        return remote.check_call(
+            command=cmd, verbose=verbose, timeout=timeout,
+            error_info=error_info, expected=expected,
+            raise_on_err=raise_on_err)
+
+    def apt_install_package(self, packages=None, node_name=None, host=None,
+                            **kwargs):
+        """Method to install packages on ubuntu nodes
+
+        :type packages: list
+        :type node_name: str
+        :type host: str
+        :raises: devops.error.DevopsCalledProcessError,
+        devops.error.TimeoutError, AssertionError, ValueError
+
+        Other params of check_call and sudo_check_call are allowed
+        """
+        expected = kwargs.pop('expected', None)
+        if not packages or not isinstance(packages, list):
+            raise ValueError("packages list should be provided!")
+        install = "apt-get install -y {}".format(" ".join(packages))
+        # Should wait until other 'apt' jobs are finished
+        pgrep_expected = [0, 1]
+        pgrep_command = "pgrep -a -f apt"
+        helpers.wait(
+            lambda: (self.check_call(
+                pgrep_command, expected=pgrep_expected, host=host,
+                node_name=node_name, **kwargs).exit_code == 1
+            ), interval=30, timeout=1200,
+            timeout_msg="Timeout reached while waiting for apt lock"
+        )
+        # Install packages
+        self.sudo_check_call("apt-get update", node_name=node_name, host=host,
+                             **kwargs)
+        self.sudo_check_call(install, expected=expected, node_name=node_name,
+                             host=host, **kwargs)
+
+    def sudo_check_call(
+            self, cmd,
+            node_name=None, host=None, address_pool=None,
+            verbose=False, timeout=None,
+            error_info=None,
+            expected=None, raise_on_err=True):
+        """Execute command with sudo on node_name/host and check for exit code
+
+        :type cmd: str
+        :type node_name: str
+        :type host: str
+        :type verbose: bool
+        :type timeout: int
+        :type error_info: str
+        :type expected: list
+        :type raise_on_err: bool
+        :rtype: list stdout
+        :raises: devops.error.DevopsCalledProcessError
+        """
+        remote = self.remote(node_name=node_name, host=host,
+                             address_pool=address_pool)
+        with remote.get_sudo(remote):
+            return remote.check_call(
+                command=cmd, verbose=verbose, timeout=timeout,
+                error_info=error_info, expected=expected,
+                raise_on_err=raise_on_err)
+
+    def dir_upload(self, host, source, destination):
+        """Upload local directory content to remote host
+
+        :param host: str, remote node name
+        :param source: str, local directory path
+        :param destination: str, local directory path
+        """
+        with self.remote(node_name=host) as remote:
+            remote.upload(source, destination)
+
+    def get_random_node(self):
+        """Get random node name
+
+        :return: str, name of node
+        """
+        return random.choice(self.node_names())
+
+    def yaml_editor(self, file_path, node_name=None, host=None,
+                    address_pool=None):
+        """Returns an initialized YamlEditor instance for context manager
+
+        Usage (with 'underlay' fixture):
+
+        # Local YAML file
+        with underlay.yaml_editor('/path/to/file') as editor:
+            editor.content[key] = "value"
+
+        # Remote YAML file on TCP host
+        with underlay.yaml_editor('/path/to/file',
+                                  host=config.tcp.tcp_host) as editor:
+            editor.content[key] = "value"
+        """
+        # Local YAML file
+        if node_name is None and host is None:
+            return utils.YamlEditor(file_path=file_path)
+
+        # Remote YAML file
+        ssh_data = self.__ssh_data(node_name=node_name, host=host,
+                                   address_pool=address_pool)
+        return utils.YamlEditor(
+            file_path=file_path,
+            host=ssh_data['host'],
+            port=ssh_data['port'] or 22,
+            username=ssh_data['login'],
+            password=ssh_data['password'],
+            private_keys=ssh_data['keys'])