Initial commit with fixtures
- add fixtures for hardware and underlay
- add fuel-devops template tcpcloud-default.yaml
* Migration of fixtures is not finished yet
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..ea68183
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,64 @@
+*.py[cod]
+
+# C extensions
+*.so
+
+# Packages
+*.egg
+*.egg-info
+dist
+build
+include
+eggs
+parts
+bin
+var
+sdist
+develop-eggs
+.installed.cfg
+lib
+local
+lib64
+MANIFEST
+TAGS
+
+# Installer logs
+pip-log.txt
+
+# Unit test / coverage reports
+.coverage
+.tox
+nosetests.xml
+
+# Translations
+*.mo
+
+# Mr Developer
+.mr.developer.cfg
+.cache
+.project
+.pydevproject
+.idea
+
+# Local example
+example_local.py
+
+# Local settings
+local_settings.py
+
+# Documentation
+doc/_build/
+
+# Logs
+/logs
+tests.log
+
+# Certs
+/ca.crt
+/ca.pem
+
+# Cache
+/.cache
+
+# Snapshot configs
+config_test_*.ini
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 0000000..7dfb0a5
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=review.openstack.org
+port=29418
+project=Mirantis/tcp-qa.git
diff --git a/README.md b/README.md
index a3d584e..0e17cca 100644
--- a/README.md
+++ b/README.md
@@ -1 +1,52 @@
-# tcp-qa
+# tcpcloud-devops-env
+
+This template requires 20 vCPU and 52Gb host RAM.
+
+Clone the repo
+--------------
+
+git clone https://github.com/dis-xcom/tcpcloud-devops-env
+
+cd ./tcpcloud-devops-env
+
+Install requirements
+--------------------
+
+pip install -r ./requirements.txt
+
+Initialize fuel-devops database if needed:
+------------------------------------------
+
+dos-manage.py migrate
+
+Get cloudinit image
+-------------------
+
+wget https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img -O ./trusty-server-cloudimg-amd64.qcow2
+
+Export variables
+----------------
+
+export ENV_NAME=tcpcloud-mk20 # Optional
+
+export IMAGE_PATH=./trusty-server-cloudimg-amd64.qcow2
+
+Run deploy test
+-----------------------------------------
+export WORKSPACE=$(pwd)
+export SUSPEND_ENV_ON_TEARDOWN=false # Optional
+
+py.test -vvv -s -k test_tcp_install_default
+
+
+
+
+Create and start the env for manual tests
+-----------------------------------------
+
+dos.py create-env ./tcpcloud-wk20.yaml
+
+dos.py start "${ENV_NAME}"
+
+
+Then, wait until cloud-init is finished and port 22 is open (~3-4 minutes), and login with ' vagrant / vagrant '.
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 0000000..fa4659c
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,3 @@
+[pytest]
+addopts = -vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml
+testpaths = tcp_tests
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..de79002
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,30 @@
+[metadata]
+name = tcp-tests
+summary = Tests tools for Fuel CCP
+description-file =
+ README.rst
+author = OpenStack
+author-email = openstack-dev@lists.openstack.org
+home-page = http://www.openstack.org/
+classifier =
+ Environment :: OpenStack
+ Intended Audience :: Information Technology
+ Intended Audience :: System Administrators
+ License :: OSI Approved :: Apache Software License
+ Operating System :: POSIX :: Linux
+
+[build_sphinx]
+source-dir = doc/source
+build-dir = doc/build
+all_files = 1
+
+[upload_sphinx]
+upload-dir = doc/build/html
+
+[files]
+packages =
+ tcp_tests
+
+[entry_points]
+console_scripts =
+ tcp_tests = tcp_tests.run_test:shell
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..056c16c
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
+import setuptools
+
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+ import multiprocessing # noqa
+except ImportError:
+ pass
+
+setuptools.setup(
+ setup_requires=['pbr'],
+ pbr=True)
diff --git a/tcp_tests/__init__.py b/tcp_tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tcp_tests/__init__.py
diff --git a/tcp_tests/conftest.py b/tcp_tests/conftest.py
new file mode 100644
index 0000000..3dff580
--- /dev/null
+++ b/tcp_tests/conftest.py
@@ -0,0 +1,3 @@
+# DO NOT EDIT THIS FILE!
+# To include new fixtures, please use conftest.py files
+# located in the appropriate ./tests/* folders.
diff --git a/tcp_tests/fixtures/__init__.py b/tcp_tests/fixtures/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tcp_tests/fixtures/__init__.py
diff --git a/tcp_tests/fixtures/common_fixtures.py b/tcp_tests/fixtures/common_fixtures.py
new file mode 100644
index 0000000..9d121e5
--- /dev/null
+++ b/tcp_tests/fixtures/common_fixtures.py
@@ -0,0 +1,75 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import division
+import time
+
+import pytest
+
+from tcp_tests import logger
+from tcp_tests.helpers import log_step
+from tcp_tests.helpers import utils
+
+
+LOG = logger.logger
+
+
+@pytest.yield_fixture(scope='session')
+def ssh_keys_dir(request):
+ ssh_keys_dir = utils.generate_keys()
+ LOG.info("SSH keys were generated in {}".format(ssh_keys_dir))
+ yield ssh_keys_dir
+ utils.clean_dir(ssh_keys_dir)
+ LOG.info("Tmp dir {} with generated ssh keys was cleaned".format(
+ ssh_keys_dir))
+
+
+@pytest.hookimpl(tryfirst=True, hookwrapper=True)
+def pytest_runtest_makereport(item, call):
+ outcome = yield
+ rep = outcome.get_result()
+ setattr(item, "rep_" + rep.when, rep)
+
+
+def pytest_runtest_setup(item):
+ if item.cls is not None:
+ item.cls._current_test = item.function
+ item._start_time = time.time()
+ head = "<" * 5 + "#" * 30 + "[ {} ]" + "#" * 30 + ">" * 5
+ head = head.format(item.function.__name__)
+ start_step = "\n{head}".format(head=head)
+ LOG.info(start_step)
+
+
+def pytest_runtest_teardown(item):
+ step_name = item.function.__name__
+ if hasattr(item, '_start_time'):
+ spent_time = time.time() - item._start_time
+ else:
+ spent_time = 0
+ minutes = spent_time // 60
+ seconds = int(round(spent_time)) % 60
+ finish_step = "FINISH {} TEST. TOOK {} min {} sec".format(
+ step_name, minutes, seconds
+ )
+ foot = "\n" + "<" * 5 + "#" * 30 + "[ {} ]" + "#" * 30 + ">" * 5
+ foot = foot.format(finish_step)
+ LOG.info(foot)
+
+
+@pytest.fixture(scope='function')
+def show_step(request):
+ def _show_step(step_number):
+ return log_step.log_step(request.function, step_number)
+ return _show_step
diff --git a/tcp_tests/fixtures/config_fixtures.py b/tcp_tests/fixtures/config_fixtures.py
new file mode 100644
index 0000000..ffcaea9
--- /dev/null
+++ b/tcp_tests/fixtures/config_fixtures.py
@@ -0,0 +1,33 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import os
+
+import pytest
+
+from tcp_tests import settings_oslo
+
+
+@pytest.fixture(scope='session')
+def config():
+
+ config_files = []
+
+ tests_configs = os.environ.get('TESTS_CONFIGS', None)
+ if tests_configs:
+ for test_config in tests_configs.split(','):
+ config_files.append(test_config)
+
+ config_opts = settings_oslo.load_config(config_files)
+
+ return config_opts
diff --git a/tcp_tests/fixtures/rally_fixtures.py b/tcp_tests/fixtures/rally_fixtures.py
new file mode 100644
index 0000000..45e828a
--- /dev/null
+++ b/tcp_tests/fixtures/rally_fixtures.py
@@ -0,0 +1,30 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pytest
+
+from tcp_tests.managers import rallymanager
+
+
+@pytest.fixture(scope='function')
+def rally(underlay):
+ """Fixture that provides various actions for TCP
+
+ :param config: fixture provides oslo.config
+ :param underlay: fixture provides underlay manager
+ :rtype: RallyManager
+
+ For use in tests or fixtures to deploy a custom TCP
+ """
+ return rallymanager.RallyManager(underlay, 'cfg01')
diff --git a/tcp_tests/fixtures/underlay_fixtures.py b/tcp_tests/fixtures/underlay_fixtures.py
new file mode 100644
index 0000000..e7aaaf8
--- /dev/null
+++ b/tcp_tests/fixtures/underlay_fixtures.py
@@ -0,0 +1,211 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pytest
+from datetime import datetime
+
+from tcp_tests.helpers import ext
+from tcp_tests import logger
+from tcp_tests import settings
+from tcp_tests.managers import envmanager_devops
+from tcp_tests.managers import envmanager_empty
+from tcp_tests.managers import underlay_ssh_manager
+
+LOG = logger.logger
+
+
+def extract_name_from_mark(mark):
+ """Simple function to extract name from mark
+
+ :param mark: pytest.mark.MarkInfo
+ :rtype: string or None
+ """
+ if mark:
+ if len(mark.args) > 0:
+ return mark.args[0]
+ elif 'name' in mark.kwargs:
+ return mark.kwargs['name']
+ return None
+
+
+@pytest.fixture(scope="session")
+def hardware(request, config):
+ """Fixture for manage the hardware layer.
+
+ - start/stop/reboot libvirt/IPMI(/MaaS?) nodes
+ - snapshot/revert libvirt nodes (fuel-devops only)
+ - block/unblock libvirt networks/interfaces (fuel-devops only)
+
+ This fixture should get a hardware configuration from
+ 'config' object or create a virtual/baremetal underlay
+ using EnvironmentManager.
+
+ Creates a snapshot 'hardware' with ready-to-use virtual environment
+ (Only for config.hardware.manager='devops'):
+ - just created virtual nodes in power-on state
+ - node volumes filled with necessary content
+ - node network interfaces connected to necessary devices
+
+ config.hardware.manager: one of ('devops', 'maas', None)
+ config.hardware.config: path to the config file for the manager
+ config.hardware.current_snapshot = Latest created or reverted snapshot
+
+ :rtype EnvironmentModel: if config.hardware.manager == 'devops'
+ :rtype EnvironmentManagerEmpty: if config.hardware.manager == 'empty'
+ """
+ env = None
+
+ manager = config.hardware.manager
+
+ if manager == 'empty':
+ # No environment manager is used.
+ # 'config' should contain config.underlay.ssh settings
+ # 'config' should contain config.underlay.current_snapshot setting
+ env = envmanager_empty.EnvironmentManagerEmpty(config=config)
+
+ elif manager == 'devops':
+ # fuel-devops environment manager is used.
+ # config.underlay.ssh settings can be empty or witn SSH to existing env
+ # config.underlay.current_snapshot
+ env = envmanager_devops.EnvironmentManager(config=config)
+ else:
+ raise Exception("Unknown hardware manager: '{}'".format(manager))
+
+ # for devops manager: power on nodes and wait for SSH
+ # for empty manager: do nothing
+ # for maas manager: provision nodes and wait for SSH
+ env.start()
+ if not env.has_snapshot(ext.SNAPSHOT.hardware):
+ env.create_snapshot(ext.SNAPSHOT.hardware)
+
+ def fin():
+ if settings.SHUTDOWN_ENV_ON_TEARDOWN:
+ LOG.info("Shutdown environment...")
+ env.stop()
+
+ request.addfinalizer(fin)
+ return env
+
+
+@pytest.fixture(scope='function')
+def revert_snapshot(request, hardware):
+ """Revert snapshot for the test case
+
+ Usage:
+ @pytest.mark.revert_snapshot(name='<required_snapshot_name>')
+
+ If the mark 'revert_snapshot' is absend, or <required_snapshot_name>
+ not found, then an initial 'hardware' snapshot will be reverted.
+
+ :rtype string: name of the reverted snapshot or None
+ """
+ revert_snapshot = request.keywords.get('revert_snapshot', None)
+ snapshot_name = extract_name_from_mark(revert_snapshot)
+
+ if snapshot_name and \
+ hardware.has_snapshot(snapshot_name) and \
+ hardware.has_snapshot_config(snapshot_name):
+ hardware.revert_snapshot(snapshot_name)
+ return snapshot_name
+ else:
+ hardware.revert_snapshot(ext.SNAPSHOT.hardware)
+ return None
+
+
+@pytest.fixture(scope='function', autouse=True)
+def snapshot(request, hardware):
+ """Fixture for creating snapshot at the end of test if it's needed
+
+ Marks:
+ snapshot_needed(name=None) - make snapshot if test is passed. If
+ name argument provided, it will be used for creating snapshot,
+ otherwise, test function name will be used
+
+ fail_snapshot - make snapshot if test failed
+
+ :param request: pytest.python.FixtureRequest
+ :param env: envmanager.EnvironmentManager
+ """
+ snapshot_needed = request.keywords.get('snapshot_needed', None)
+ fail_snapshot = request.keywords.get('fail_snapshot', None)
+
+ def test_fin():
+ default_snapshot_name = getattr(request.node.function,
+ '_snapshot_name',
+ request.node.function.__name__)
+ if hasattr(request.node, 'rep_call') and request.node.rep_call.passed \
+ and snapshot_needed:
+ snapshot_name = extract_name_from_mark(snapshot_needed) or \
+ "{}_passed".format(default_snapshot_name)
+ hardware.create_snapshot(snapshot_name)
+
+ elif hasattr(request.node, 'rep_setup') and \
+ request.node.rep_setup.failed and fail_snapshot:
+ snapshot_name = "{0}_prep_failed".format(default_snapshot_name)
+ hardware.create_snapshot(snapshot_name)
+
+ elif hasattr(request.node, 'rep_call') and \
+ request.node.rep_call.failed and fail_snapshot:
+ snapshot_name = "{0}_failed".format(default_snapshot_name)
+ hardware.create_snapshot(snapshot_name)
+
+ request.addfinalizer(test_fin)
+
+
+@pytest.fixture(scope="function")
+def underlay(revert_snapshot, config, hardware):
+ """Fixture that should provide SSH access to underlay objects.
+
+ - Starts the 'hardware' environment and creates 'underlay' with required
+ configuration.
+ - Fills the following object using the 'hardware' fixture:
+ config.underlay.ssh = JSONList of SSH access credentials for nodes.
+ This list will be used for initialization the
+ model UnderlaySSHManager, see it for details.
+
+ :rtype UnderlaySSHManager: Object that encapsulate SSH credentials;
+ - provide list of underlay nodes;
+ - provide SSH access to underlay nodes using
+ node names or node IPs.
+ """
+ # If no snapshot was reverted, then try to revert the snapshot
+ # that belongs to the fixture.
+ # Note: keep fixtures in strict dependences from each other!
+ if not revert_snapshot:
+ if hardware.has_snapshot(ext.SNAPSHOT.underlay) and \
+ hardware.has_snapshot_config(ext.SNAPSHOT.underlay):
+ hardware.revert_snapshot(ext.SNAPSHOT.underlay)
+
+ # Create Underlay
+ if not config.underlay.ssh:
+ # If config.underlay.ssh wasn't provided from external config, then
+ # try to get necessary data from hardware manager (fuel-devops)
+ config.underlay.ssh = hardware.get_ssh_data(
+ roles=config.underlay.roles)
+
+ underlay = underlay_ssh_manager.UnderlaySSHManager(config.underlay.ssh)
+
+ if not config.underlay.lvm:
+ underlay.enable_lvm(hardware.lvm_storages())
+ config.underlay.lvm = underlay.config_lvm
+
+ hardware.create_snapshot(ext.SNAPSHOT.underlay)
+
+ else:
+ # 1. hardware environment created and powered on
+ # 2. config.underlay.ssh contains SSH access to provisioned nodes
+ # (can be passed from external config with TESTS_CONFIGS variable)
+ underlay = underlay_ssh_manager.UnderlaySSHManager(config.underlay.ssh)
+
+ return underlay
diff --git a/tcp_tests/helpers/__init__.py b/tcp_tests/helpers/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tcp_tests/helpers/__init__.py
diff --git a/tcp_tests/helpers/containers.py b/tcp_tests/helpers/containers.py
new file mode 100644
index 0000000..7d1306a
--- /dev/null
+++ b/tcp_tests/helpers/containers.py
@@ -0,0 +1,162 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import division
+
+from tcp_tests import logger
+
+
+LOG = logger.logger
+
+
+def exec_in_container(container, cmd):
+ command = container.create_exec(cmd)
+ stdout = container.start_exec(command)
+ inspect = container.client.exec_inspect(command['Id'])
+ return stdout, inspect['ExitCode']
+
+
+class ContainerEngine(object):
+ def __init__(self,
+ remote=None,
+ image_name=None,
+ container_repo=None,
+ proxy_url=None,
+ user_id=0,
+ container_name=None,
+ dir_for_home='/var/home',
+ ):
+ self.remote = remote
+ self.container_repo = container_repo
+ self.repository_tag = 'latest'
+ self.proxy_url = proxy_url or ""
+ self.user_id = user_id
+ self.image_name = image_name
+ self.container_name = container_name
+ self.dir_for_home = dir_for_home
+ self.home_bind_path = '{0}/{1}'.format(
+ self.dir_for_home, self.container_name)
+ self.setup()
+
+ def image_exists(self, tag='latest'):
+ cmd = "docker images | grep {0}| awk '{{print $1}}'".format(
+ self.image_name)
+ LOG.info('Checking Docker images...')
+ result = self.remote.execute(cmd)
+ LOG.debug(result)
+ existing_images = [line.strip().split() for line in result['stdout']]
+ return [self.container_repo, tag] in existing_images
+
+ def pull_image(self):
+ # TODO(dtyzhnenko): add possibility to load image from local path or
+ # remote link provided in settings, in order to speed up downloading
+ cmd = 'docker pull {0}'.format(self.container_repo)
+ LOG.debug('Downloading Rally repository/image from registry...')
+ result = self.remote.execute(cmd)
+ LOG.debug(result)
+ return self.image_exists()
+
+ def run_container_command(self, command, in_background=False):
+ command = str(command).replace(r"'", r"'\''")
+ options = ''
+ if in_background:
+ options = '{0} -d'.format(options)
+ cmd = ("docker run {options} --user {user_id} --net=\"host\" -e "
+ "\"http_proxy={proxy_url}\" -e \"https_proxy={proxy_url}\" "
+ "-v {dir_for_home}:{home_bind_path} {container_repo}:{tag} "
+ "/bin/bash -c '{command}'".format(
+ options=options,
+ user_id=self.user_id,
+ proxy_url=self.proxy_url,
+ dir_for_home=self.dir_for_home,
+ home_bind_path=self.home_bind_path,
+ container_repo=self.container_repo,
+ tag=self.repository_tag,
+ command=command))
+ LOG.debug(
+ 'Executing command "{0}" in Rally container {1}..'.format(
+ cmd, self.container_repo
+ )
+ )
+ result = self.remote.execute(cmd)
+ LOG.debug(result)
+ return result
+
+ def setup_utils(self):
+ utils = ['gawk', 'vim', 'curl']
+ cmd = ('unset http_proxy https_proxy; apt-get update; '
+ 'apt-get install -y {0}'.format(' '.join(utils)))
+ LOG.debug('Installing utils "{0}" to the container...'.format(
+ utils))
+ result = self.run_container_command(cmd)
+ assert result['exit_code'] == 0, \
+ "Utils installation failed in container: {0}".format(result)
+
+ def prepare_image(self):
+ self.setup_utils()
+ last_container_cmd = "docker ps -lq"
+ result = self.remote.execute(last_container_cmd)
+ assert result['exit_code'] == 0, \
+ "Unable to get last container ID: {0}!".format(result)
+ last_container = ''.join([line.strip() for line in result['stdout']])
+ commit_cmd = 'docker commit {0} {1}:ready'.format(last_container,
+ self.container_repo)
+ result = self.remote.execute(commit_cmd)
+ assert result['exit_code'] == 0, \
+ "Commit to Docker image '{0}' failed: {1}.".format(
+ self.container_repo, result)
+ return self.image_exists(tag='ready')
+
+ def setup_bash_alias(self):
+ alias_name = '{}_docker'.format(self.image_name)
+ check_alias_cmd = '. /root/.bashrc && alias {0}'.format(alias_name)
+ result = self.remote.execute(check_alias_cmd)
+ if result['exit_code'] == 0:
+ return
+ LOG.debug(
+ 'Creating bash alias for {} inside container...'.format(
+ self.image_name
+ )
+ )
+ create_alias_cmd = ("alias {alias_name}='docker run --user {user_id} "
+ "--net=\"host\" -e \"http_proxy={proxy_url}\" -t "
+ "-i -v {dir_for_home}:{home_bind_path} "
+ "{container_repo}:{tag} {image_name}'".format(
+ alias_name=alias_name,
+ user_id=self.user_id,
+ proxy_url=self.proxy_url,
+ dir_for_home=self.dir_for_home,
+ home_bind_path=self.home_bind_path,
+ container_repo=self.container_repo,
+ tag=self.repository_tag,
+ image_name=self.image_name))
+ result = self.remote.execute('echo "{0}">> /root/.bashrc'.format(
+ create_alias_cmd))
+ assert result['exit_code'] == 0, \
+ ("Alias creation for running {0} from container "
+ "failed: {1}.").format(self.image_name, result)
+ result = self.remote.execute(check_alias_cmd)
+ assert result['exit_code'] == 0, \
+ ("Alias creation for running {} from container "
+ "failed: {1}.").format(self.image_name, result)
+
+ def setup(self):
+ if not self.image_exists():
+ assert self.pull_image(), \
+ "Docker image for {} not found!".format(self.image_name)
+ if not self.image_exists(tag='ready'):
+ assert self.prepare_image(), \
+ "Docker image for {} is not ready!".format(self.image_name)
+ self.repository_tag = 'ready'
+ self.setup_bash_alias()
diff --git a/tcp_tests/helpers/env_config.py b/tcp_tests/helpers/env_config.py
new file mode 100644
index 0000000..3ad9a36
--- /dev/null
+++ b/tcp_tests/helpers/env_config.py
@@ -0,0 +1,318 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# TODO(slebedev): implement unit tests
+
+import copy
+import json
+import re
+
+from devops.helpers import templates
+import yaml
+
+from tcp_tests.helpers import exceptions
+from tcp_tests import logger
+
+
+LOG = logger.logger
+
+
+class DevopsConfigMissingKey(KeyError):
+ def __init__(self, key, keypath):
+ super(DevopsConfigMissingKey, self).__init__()
+ self.key = key
+ self.keypath
+
+ def __str__(self):
+ return "Key '{0}' by keypath '{1}' is missing".format(
+ self.key,
+ self.keypath
+ )
+
+
+def fail_if_obj(x):
+ if not isinstance(x, int):
+ raise TypeError("Expecting int value!")
+
+
+def fix_devops_config(config):
+ """Function for get correct structure of config
+
+ :param config: dict
+ :returns: config dict
+ """
+ if not isinstance(config, dict):
+ raise exceptions.DevopsConfigTypeError(
+ type_name=type(config).__name__
+ )
+ if 'template' in config:
+ return copy.deepcopy(config)
+ else:
+ return {
+ "template": {
+ "devops_settings": copy.deepcopy(config)
+ }
+ }
+
+
+def list_update(obj, indexes, value):
+ """Procedure for setting value into list (nested too), need
+ in some functions where we are not able to set value directly.
+
+ e.g.: we want to change element in nested list.
+
+ obj = [12, 34, [3, 5, [0, 4], 3], 85]
+ list_update(obj, [2, 2, 1], 50) => obj[2][2][1] = 50
+ print(obj) => [12, 34, [3, 5, [0, 50], 3], 85]
+
+ :param obj: source list
+ :param indexes: list with indexes for recursive process
+ :param value: some value for setting
+ """
+ def check_obj(obj):
+ if not isinstance(obj, list):
+ raise TypeError("obj must be a list instance!")
+ check_obj(obj)
+ if len(indexes) > 0:
+ cur = obj
+ last_index = indexes[-1]
+ fail_if_obj(last_index)
+ for i in indexes[:-1]:
+ fail_if_obj(i)
+ check_obj(cur[i])
+ cur = cur[i]
+ cur[last_index] = value
+
+
+def return_obj(indexes=[]):
+ """Function returns dict() or list() object given nesting, it needs by
+ set_value_for_dict_by_keypath().
+
+ Examples:
+ return_obj() => {}
+ return_obj([0]) => [{}]
+ return_obj([-1]) => [{}]
+ return_obj([-1, 1, -2]) => [[None, [{}, None]]]
+ return_obj([2]) => [None, None, {}]
+ return_obj([1,3]) => [None, [None, None, None, {}]]
+ """
+ if not isinstance(indexes, list):
+ raise TypeError("indexes must be a list!")
+ if len(indexes) > 0:
+ # Create resulting initial object with 1 element
+ result = [None]
+ # And save it's ref
+ cur = result
+ # lambda for extending list elements
+ li = (lambda x: [None] * x)
+ # lambda for nesting of list
+ nesting = (lambda x: x if x >= 0 else abs(x) - 1)
+ # save last index
+ last_index = indexes[-1]
+ fail_if_obj(last_index)
+ # loop from first till penultimate elements of indexes
+ # we must create nesting list and set current position to
+ # element at next index in indexes list
+ for i in indexes[:-1]:
+ fail_if_obj(i)
+ cur.extend(li(nesting(i)))
+ cur[i] = [None]
+ cur = cur[i]
+ # Perform last index
+ cur.extend(li(nesting(last_index)))
+ cur[last_index] = {}
+ return result
+ else:
+ return dict()
+
+
+def keypath(paths):
+ """Function to make string keypath from list of paths"""
+ return ".".join(list(paths))
+
+
+def disassemble_path(path):
+ """Func for disassembling path into key and indexes list (if needed)
+
+ :param path: string
+ :returns: key string, indexes list
+ """
+ pattern = re.compile("\[([0-9]*)\]")
+ # find all indexes of possible list object in path
+ indexes = (lambda x: [int(r) for r in pattern.findall(x)]
+ if pattern.search(x) else [])
+ # get key
+ base_key = (lambda x: re.sub(pattern, '', x))
+ return base_key(path), indexes(path)
+
+
+def set_value_for_dict_by_keypath(source, paths, value, new_on_missing=True):
+ """Procedure for setting specific value by keypath in dict
+
+ :param source: dict
+ :param paths: string
+ :param value: value to set by keypath
+ """
+ paths = paths.lstrip(".").split(".")
+ walked_paths = []
+ # Store the last path
+ last_path = paths.pop()
+ data = source
+ # loop to go through dict
+ while len(paths) > 0:
+ path = paths.pop(0)
+ key, indexes = disassemble_path(path)
+ walked_paths.append(key)
+ if key not in data:
+ if new_on_missing:
+ # if object is missing, we create new one
+ data[key] = return_obj(indexes)
+ else:
+ raise DevopsConfigMissingKey(key, keypath(walked_paths[:-1]))
+
+ data = data[key]
+
+ # if we can not get element in list, we should
+ # throw an exception with walked path
+ for i in indexes:
+ try:
+ tmp = data[i]
+ except IndexError as err:
+ LOG.error(
+ "Couldn't access {0} element of '{1}' keypath".format(
+ i, keypath(walked_paths)
+ )
+ )
+ LOG.error(
+ "Dump of '{0}':\n{1}".format(
+ keypath(walked_paths),
+ json.dumps(data)
+ )
+ )
+ raise type(err)(
+ "Can't access '{0}' element of '{1}' object! "
+ "'{2}' object found!".format(
+ i,
+ keypath(walked_paths),
+ data
+ )
+ )
+ data = tmp
+ walked_paths[-1] += "[{0}]".format(i)
+
+ key, indexes = disassemble_path(last_path)
+ i_count = len(indexes)
+ if key not in data:
+ if new_on_missing:
+ data[key] = return_obj(indexes)
+ else:
+ raise DevopsConfigMissingKey(key, keypath(walked_paths))
+ elif i_count > 0 and not isinstance(data[key], list):
+ raise TypeError(
+ ("Key '{0}' by '{1}' keypath expected as list "
+ "but '{3}' obj found").format(
+ key, keypath(walked_paths), type(data[key]).__name__
+ )
+ )
+ if i_count == 0:
+ data[key] = value
+ else:
+ try:
+ list_update(data[key], indexes, value)
+ except (IndexError, TypeError) as err:
+ LOG.error(
+ "Error while setting by '{0}' key of '{1}' keypath".format(
+ last_path,
+ keypath(walked_paths)
+ )
+ )
+ LOG.error(
+ "Dump of object by '{0}' keypath:\n{1}".format(
+ keypath(walked_paths),
+ json.dumps(data)
+ )
+ )
+ raise type(err)(
+ "Couldn't set value by '{0}' key of '{1}' keypath'".format(
+ last_path,
+ keypath(walked_paths)
+ )
+ )
+
+
+class EnvironmentConfig(object):
+ def __init__(self):
+ super(EnvironmentConfig, self).__init__()
+ self._config = None
+
+ @property
+ def config(self):
+ return self._config
+
+ @config.setter
+ def config(self, config):
+ """Setter for config
+
+ :param config: dict
+ """
+ self._config = fix_devops_config(config)
+
+ def __getitem__(self, key):
+ if self._config is not None:
+ conf = self._config['template']['devops_settings']
+ return copy.deepcopy(conf.get(key, None))
+ else:
+ return None
+
+ @logger.logwrap
+ def set_value_by_keypath(self, keypath, value):
+ """Function for set value of devops settings by keypath.
+
+ It's forbidden to set value of self.config directly, so
+ it's possible simply set value by keypath
+ """
+ if self.config is None:
+ raise exceptions.DevopsConfigIsNone()
+ conf = self._config['template']['devops_settings']
+ set_value_for_dict_by_keypath(conf, keypath, value)
+
+ def save(self, filename):
+ """Dump current config into given file
+
+ :param filename: string
+ """
+ if self._config is None:
+ raise exceptions.DevopsConfigIsNone()
+ with open(filename, 'w') as f:
+ f.write(
+ yaml.dump(
+ self._config, default_flow_style=False
+ )
+ )
+
+ def load_template(self, filename):
+ """Method for reading file with devops config
+
+ :param filename: string
+ """
+ if filename is not None:
+ LOG.debug(
+ "Preparing to load config from template '{0}'".format(
+ filename
+ )
+ )
+ self.config = templates.yaml_template_load(filename)
+ else:
+ LOG.error("Template filename is not set, loading config " +
+ "from template aborted.")
diff --git a/tcp_tests/helpers/exceptions.py b/tcp_tests/helpers/exceptions.py
new file mode 100644
index 0000000..259880e
--- /dev/null
+++ b/tcp_tests/helpers/exceptions.py
@@ -0,0 +1,123 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class UnexpectedExitCode(Exception):
+ def __init__(self, command, ec, expected_ec, stdout=None, stderr=None):
+ """Exception for unexpected exit code after executing shell/ssh command
+
+ :param command: str - executed command
+ :param ec: int - actual exit code
+ :param expected_ec: list of integers - expected exit codes
+ :param stdout: str
+ :param stderr: str
+ """
+ self.ec = ec
+ self.expected_ec = expected_ec
+ self.cmd = command
+ self.stdout = stdout
+ self.stderr = stderr
+ super(UnexpectedExitCode, self).__init__()
+
+ def __str__(self):
+ message = "Command '{cmd:s}' returned unexpected exit code {code:d}," \
+ " while waiting for {exp}".format(cmd=self.cmd,
+ code=self.ec,
+ exp=self.expected_ec)
+ if self.stdout:
+ message += "stdout: {}\n".format(self.stdout)
+ if self.stderr:
+ message += "stderr: {}\n".format(self.stderr)
+ return message
+
+
+class VariableNotSet(Exception):
+ def __init__(self, variable_name, expected_value):
+ self.variable_name = variable_name
+ self.expected_value = expected_value
+ super(VariableNotSet, self).__init__()
+
+ def __str__(self):
+ return "Variable {0} was not set in value {1}".format(
+ self.variable_name, self.expected_value)
+
+
+class DevopsConfigPathIsNotSet(ValueError):
+ def __str__(self):
+ return "Devops config/template path is not set!"
+
+
+class DevopsConfigTypeError(TypeError):
+ def __init__(self, type_name):
+ self.type_name = type_name
+ super(DevopsConfigTypeError, self).__init__()
+
+ def __str__(self):
+ return "Devops config should be dict instead of {0}".format(
+ self.type_name
+ )
+
+
+class DevopsConfigIsNone(ValueError):
+ def __str__(self):
+ return "Devops config is None!"
+
+
+class EnvironmentNameIsNotSet(ValueError):
+ def __str__(self):
+ return "Couldn't get environment name!"
+
+
+class EnvironmentDoesNotExist(BaseException):
+ def __init__(self, env_name):
+ super(EnvironmentDoesNotExist, self).__init__()
+ self.env_name = env_name
+
+ def __str__(self):
+ return "Environment {0} does not exist!".format(
+ self.env_name
+ )
+
+
+class EnvironmentAlreadyExists(BaseException):
+ def __init__(self, env_name):
+ super(EnvironmentAlreadyExists, self).__init__()
+ self.env_name = env_name
+
+ def __str__(self):
+ return "Environment {0} already exists!".format(
+ self.env_name
+ )
+
+
+class EnvironmentSnapshotMissing(BaseException):
+ def __init__(self, env_name, snapshot_name):
+ super(EnvironmentSnapshotMissing, self).__init__()
+ self.env_name = env_name
+ self.snapshot_name = snapshot_name
+
+ def __str__(self):
+ return ("Environment '{0}' doesn't have requested snapshot '{1}'! "
+ "Please create the snapshot manually or erase the environment."
+ .format(self.env_name, self.snapshot_name))
+
+
+class EnvironmentIsNotSet(BaseException):
+ def __str__(self):
+ return "Environment is not set!"
+
+
+class BaseImageIsNotSet(BaseException):
+ def __str__(self):
+ return "Base image for creating VMs is not set!"
diff --git a/tcp_tests/helpers/ext.py b/tcp_tests/helpers/ext.py
new file mode 100644
index 0000000..5771eae
--- /dev/null
+++ b/tcp_tests/helpers/ext.py
@@ -0,0 +1,50 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+
+from enum import IntEnum
+
+
+def enum(*values, **kwargs):
+ names = kwargs.get('names')
+ if names:
+ return collections.namedtuple('Enum', names)(*values)
+ return collections.namedtuple('Enum', values)(*values)
+
+UNDERLAY_NODE_ROLE = enum(
+ 'salt-master',
+ 'salt-minion',
+)
+
+NETWORK_TYPE = enum(
+ 'private',
+ 'public'
+)
+
+SNAPSHOT = enum(
+ 'hardware',
+ 'underlay',
+ 'tcp_deployed',
+ 'os_deployed',
+)
+
+LOG_LEVELS = enum(
+ 'INFO',
+ 'WARNING',
+ 'ERROR',
+ 'CRITICAL',
+ 'DEBUG',
+ 'NOTE'
+)
diff --git a/tcp_tests/helpers/log_step.py b/tcp_tests/helpers/log_step.py
new file mode 100644
index 0000000..64ec7aa
--- /dev/null
+++ b/tcp_tests/helpers/log_step.py
@@ -0,0 +1,70 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+
+from tcp_tests import logger
+
+
+LOG = logger.logger
+
+
+def parse_test_doc(docstring):
+ test_case = {}
+ parse_regex = re.compile(r'(?P<title>^(.*\S.*\n)+)+'
+ r'(?P<empty_line1>\s*\n)'
+ r'\s*Scenario:\s*\n(?P<scenario>(.+\n)+)'
+ r'(?P<empty_line2>\s*(\n|$))?'
+ r'(\s*Duration:\s+(?P<duration>\d+).*\n)?')
+ doc_match = re.match(parse_regex, docstring)
+
+ if not doc_match:
+ LOG.error("Can't parse test docstring, unknown format!")
+ return test_case
+
+ test_case['title'] = re.sub(r'[\n\s]+', # replace multiple spaces and
+ ' ', # line breaks by single space
+ doc_match.group('title')
+ ).strip()
+
+ test_case['steps'] = []
+ for raw_step in re.split(r'\s+\d+\.\s*', doc_match.group('scenario')):
+ if not raw_step:
+ # start or end of the string
+ continue
+ test_case['steps'].append(
+ re.sub(r'[\n\s]+', # replace multiple spaces and
+ ' ', # line breaks by single space
+ raw_step
+ ).strip()
+ )
+
+ # TODO(apanchenko): now it works only with 'seconds'
+ duration = doc_match.group('duration') or 1000
+ test_case['duration'] = int(duration)
+ return test_case
+
+
+def log_step(func, step_num):
+ if not func.__doc__:
+ LOG.error("Can't show step #{0}: docstring for method {1} not "
+ "found!".format(step_num, func.__name__))
+ test_case_steps = parse_test_doc(func.__doc__)['steps']
+ try:
+ LOG.info(" *** [STEP#{0}] {1} ***".format(
+ step_num,
+ test_case_steps[step_num - 1]))
+ except IndexError:
+ LOG.error("Can't show step #{0}: docstring for method {1} does't "
+ "contain it!".format(step_num, func.__name__))
diff --git a/tcp_tests/helpers/metaclasses.py b/tcp_tests/helpers/metaclasses.py
new file mode 100644
index 0000000..6e1e79b
--- /dev/null
+++ b/tcp_tests/helpers/metaclasses.py
@@ -0,0 +1,27 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class SingletonMeta(type):
+ """Metaclass for Singleton
+
+ Main goals: not need to implement __new__ in singleton classes
+ """
+ _instances = {}
+
+ def __call__(cls, *args, **kwargs):
+ if cls not in cls._instances:
+ cls._instances[cls] = super(
+ SingletonMeta, cls).__call__(*args, **kwargs)
+ return cls._instances[cls]
diff --git a/tcp_tests/helpers/oslo_cfg_types.py b/tcp_tests/helpers/oslo_cfg_types.py
new file mode 100644
index 0000000..7465ad0
--- /dev/null
+++ b/tcp_tests/helpers/oslo_cfg_types.py
@@ -0,0 +1,106 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import json
+import os
+
+from oslo_config import cfg
+from oslo_config import types
+
+
+# See http://docs.openstack.org/developer/oslo.config/types.html
+Boolean = types.Boolean
+Integer = types.Integer
+Float = types.Float
+String = types.String
+MultiString = types.MultiString
+List = types.List
+Dict = types.Dict
+IPAddress = types.IPAddress
+Hostname = types.Hostname
+URI = types.URI
+
+
+# JSON config types inspired by https://review.openstack.org/100521
+class JSONList(types.ConfigType):
+ """JSON list type.
+
+ Decode JSON list from a string value to python list.
+ """
+
+ def __init__(self, type_name='JSONList value'):
+ super(JSONList, self).__init__(type_name=type_name)
+
+ def __call__(self, value):
+ if isinstance(value, list):
+ return value
+
+ try:
+ result = json.loads(value)
+ except ValueError:
+ raise ValueError("No JSON object could be decoded from the value: "
+ "{0}".format(value))
+ if not isinstance(result, list):
+ raise ValueError("Expected JSONList, but decoded '{0}' from the "
+ "value: {1}".format(type(result), value))
+ return result
+
+ def __repr__(self):
+ return 'JSONList'
+
+ def __eq__(self, other):
+ return self.__class__ == other.__class__
+
+ def _formatter(self, value):
+ return json.dumps(value)
+
+
+class JSONDict(types.ConfigType):
+ """JSON dictionary type.
+
+ Decode JSON dictionary from a string value to python dict.
+ """
+ def __init__(self, type_name='JSONDict value'):
+ super(JSONDict, self).__init__(type_name=type_name)
+
+ def __call__(self, value):
+ if isinstance(value, dict):
+ return value
+
+ try:
+ result = json.loads(value)
+ except ValueError:
+ raise ValueError("No JSON object could be decoded from the value: "
+ "{0}".format(value))
+ if not isinstance(result, dict):
+ raise ValueError("Expected JSONDict, but decoded '{0}' from the "
+ "value: {1}".format(type(result), value))
+ return result
+
+ def __repr__(self):
+ return 'JSONDict'
+
+ def __eq__(self, other):
+ return self.__class__ == other.__class__
+
+ def _formatter(self, value):
+ return json.dumps(value)
+
+
+class Cfg(cfg.Opt):
+ """Wrapper for cfg.Opt class that reads default form evironment variables.
+ """
+ def __init__(self, *args, **kwargs):
+ super(Cfg, self).__init__(*args, **kwargs)
+ env_var_name = self.name.upper()
+ self.default = os.environ.get(env_var_name, self.default)
diff --git a/tcp_tests/helpers/utils.py b/tcp_tests/helpers/utils.py
new file mode 100644
index 0000000..a5ad2b8
--- /dev/null
+++ b/tcp_tests/helpers/utils.py
@@ -0,0 +1,413 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import os
+import shutil
+import tempfile
+import time
+import traceback
+
+import paramiko
+import yaml
+from devops.helpers import helpers
+from devops.helpers import ssh_client
+from elasticsearch import Elasticsearch
+
+from tcp_tests import logger
+from tcp_tests import settings
+from tcp_tests.helpers import ext
+
+LOG = logger.logger
+
+
+def get_test_method_name():
+ raise NotImplementedError
+
+
+def update_yaml(yaml_tree=None, yaml_value='', is_uniq=True,
+ yaml_file=settings.TIMESTAT_PATH_YAML, remote=None):
+ """Store/update a variable in YAML file.
+
+ yaml_tree - path to the variable in YAML file, will be created if absent,
+ yaml_value - value of the variable, will be overwritten if exists,
+ is_uniq - If false, add the unique two-digit suffix to the variable name.
+ """
+ def get_file(path, remote=None, mode="r"):
+ if remote:
+ return remote.open(path, mode)
+ else:
+ return open(path, mode)
+
+ if yaml_tree is None:
+ yaml_tree = []
+ with get_file(yaml_file, remote) as file_obj:
+ yaml_data = yaml.safe_load(file_obj)
+
+ # Walk through the 'yaml_data' dict, find or create a tree using
+ # sub-keys in order provided in 'yaml_tree' list
+ item = yaml_data
+ for n in yaml_tree[:-1]:
+ if n not in item:
+ item[n] = {}
+ item = item[n]
+
+ if is_uniq:
+ last = yaml_tree[-1]
+ else:
+ # Create an uniq suffix in range '_00' to '_99'
+ for n in range(100):
+ last = str(yaml_tree[-1]) + '_' + str(n).zfill(2)
+ if last not in item:
+ break
+
+ item[last] = yaml_value
+ with get_file(yaml_file, remote, mode='w') as file_obj:
+ yaml.dump(yaml_data, file_obj, default_flow_style=False)
+
+
+class TimeStat(object):
+ """Context manager for measuring the execution time of the code.
+
+ Usage:
+ with TimeStat([name],[is_uniq=True]):
+ """
+
+ def __init__(self, name=None, is_uniq=False):
+ if name:
+ self.name = name
+ else:
+ self.name = 'timestat'
+ self.is_uniq = is_uniq
+ self.begin_time = 0
+ self.end_time = 0
+ self.total_time = 0
+
+ def __enter__(self):
+ self.begin_time = time.time()
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ self.end_time = time.time()
+ self.total_time = self.end_time - self.begin_time
+
+ # Create a path where the 'self.total_time' will be stored.
+ yaml_path = []
+
+ # There will be a list of one or two yaml subkeys:
+ # - first key name is the method name of the test
+ method_name = get_test_method_name()
+ if method_name:
+ yaml_path.append(method_name)
+
+ # - second (subkey) name is provided from the decorator (the name of
+ # the just executed function), or manually.
+ yaml_path.append(self.name)
+
+ try:
+ update_yaml(yaml_path, '{:.2f}'.format(self.total_time),
+ self.is_uniq)
+ except Exception:
+ LOG.error("Error storing time statistic for {0}"
+ " {1}".format(yaml_path, traceback.format_exc()))
+ raise
+
+ @property
+ def spent_time(self):
+ return time.time() - self.begin_time
+
+
+def reduce_occurrences(items, text):
+ """ Return string without items(substrings)
+ Args:
+ items: iterable of strings
+ test: string
+ Returns:
+ string
+ Raise:
+ AssertionError if any substing not present in source text
+ """
+ for item in items:
+ LOG.debug(
+ "Verifying string {} is shown in "
+ "\"\"\"\n{}\n\"\"\"".format(item, text))
+ assert text.count(item) != 0
+ text = text.replace(item, "", 1)
+ return text
+
+
+def generate_keys():
+ key = paramiko.RSAKey.generate(1024)
+ public = key.get_base64()
+ dirpath = tempfile.mkdtemp()
+ key.write_private_key_file(os.path.join(dirpath, 'id_rsa'))
+ with open(os.path.join(dirpath, 'id_rsa.pub'), 'w') as pub_file:
+ pub_file.write(public)
+ return dirpath
+
+
+def clean_dir(dirpath):
+ shutil.rmtree(dirpath)
+
+
+def retry(tries_number=3, exception=Exception):
+ def _retry(func):
+ assert tries_number >= 1, 'ERROR! @retry is called with no tries!'
+
+ def wrapper(*args, **kwargs):
+ iter_number = 1
+ while True:
+ try:
+ LOG.debug('Calling function "{0}" with args "{1}" and '
+ 'kwargs "{2}". Try # {3}.'.format(func.__name__,
+ args,
+ kwargs,
+ iter_number))
+ return func(*args, **kwargs)
+ except exception as e:
+ if iter_number > tries_number:
+ LOG.debug('Failed to execute function "{0}" with {1} '
+ 'tries!'.format(func.__name__, tries_number))
+ raise e
+ iter_number += 1
+ return wrapper
+ return _retry
+
+
+class ElasticClient(object):
+ def __init__(self, host='localhost', port=9200):
+ self.es = Elasticsearch([{'host': '{}'.format(host),
+ 'port': port}])
+ self.host = host
+ self.port = port
+
+ def find(self, key, value):
+ LOG.info('Search for {} for {}'.format(key, value))
+ search_request_body = '{' +\
+ ' "query": {' +\
+ ' "simple_query_string": {' +\
+ ' "query": "{}",'.format(value) +\
+ ' "analyze_wildcard" : "true",' +\
+ ' "fields" : ["{}"],'.format(key) +\
+ ' "default_operator": "AND"' +\
+ ' }' +\
+ ' },' +\
+ ' "size": 1' +\
+ '}'
+ LOG.info('Search by {}'.format(search_request_body))
+
+ def is_found():
+ def temporary_status():
+ res = self.es.search(index='_all', body=search_request_body)
+ return res['hits']['total'] != 0
+ return temporary_status
+
+ predicate = is_found()
+ helpers.wait(predicate, timeout=300,
+ timeout_msg='Timeout waiting, result from elastic')
+
+ es_raw = self.es.search(index='_all', body=search_request_body)
+ if es_raw['timed_out']:
+ raise RuntimeError('Elastic search timeout exception')
+
+ return ElasticSearchResult(key, value, es_raw['hits']['total'], es_raw)
+
+
+class ElasticSearchResult(object):
+ def __init__(self, key, value, count, raw):
+ self.key = key
+ self.value = value
+ self.count = count
+ self.raw = raw
+ if self.count != 0:
+ self.items = raw['hits']['hits']
+
+ def get(self, index):
+ if self.count != 0:
+ return self.items[index]['_source']
+ else:
+ None
+
+
+def create_file(node, pod, path, size,
+ namespace=ext.Namespace.BASE_NAMESPACE):
+ node.check_call(
+ 'kubectl exec {} --namespace={} {}'.format(
+ pod.name,
+ namespace,
+ 'dd -- if=/dev/zero -- of={} bs=1MB count={}'.format(path, size)),
+ expected=[ext.ExitCodes.EX_OK])
+
+
+def run_daily_cron(node, pod, task,
+ namespace=ext.Namespace.BASE_NAMESPACE):
+ node.check_call(
+ 'kubectl exec {} --namespace={} {}'.format(
+ pod.name,
+ namespace,
+ '/etc/cron.daily/{}'.format(task)),
+ expected=[ext.ExitCodes.EX_OK])
+
+
+def list_files(node, pod, path, mask,
+ namespace=ext.Namespace.BASE_NAMESPACE):
+ return "".join(node.check_call(
+ 'kubectl exec {} --namespace={} {}'.format(
+ pod.name,
+ namespace,
+ 'find {} -- -iname {}'.format(path, mask)),
+ expected=[ext.ExitCodes.EX_OK])['stdout']) \
+ .replace('\n', ' ').strip().split(" ")
+
+
+def rm_files(node, pod, path,
+ namespace=ext.Namespace.BASE_NAMESPACE):
+ node.execute(
+ 'kubectl exec {} --namespace={} {}'.format(
+ pod.name,
+ namespace,
+ 'rm -- {}'.format(path)))
+
+
+class YamlEditor(object):
+ """Manipulations with local or remote .yaml files.
+
+ Usage:
+
+ with YamlEditor("tasks.yaml") as editor:
+ editor.content[key] = "value"
+
+ with YamlEditor("astute.yaml", ip=self.admin_ip) as editor:
+ editor.content[key] = "value"
+ """
+
+ def __init__(self, file_path, host=None, port=None,
+ username=None, password=None, private_keys=None,
+ document_id=0,
+ default_flow_style=False, default_style=None):
+ self.__file_path = file_path
+ self.host = host
+ self.port = port or 22
+ self.username = username
+ self.__password = password
+ self.__private_keys = private_keys or []
+ self.__content = None
+ self.__documents = [{}, ]
+ self.__document_id = document_id
+ self.__original_content = None
+ self.default_flow_style = default_flow_style
+ self.default_style = default_style
+
+ @property
+ def file_path(self):
+ """Open file path
+
+ :rtype: str
+ """
+ return self.__file_path
+
+ @property
+ def content(self):
+ if self.__content is None:
+ self.__content = self.get_content()
+ return self.__content
+
+ @content.setter
+ def content(self, new_content):
+ self.__content = new_content
+
+ def __get_file(self, mode="r"):
+ if self.host:
+ remote = ssh_client.SSHClient(
+ host=self.host,
+ port=self.port,
+ username=self.username,
+ password=self.__password,
+ private_keys=self.__private_keys)
+
+ return remote.open(self.__file_path, mode=mode)
+ else:
+ return open(self.__file_path, mode=mode)
+
+ def get_content(self):
+ """Return a single document from YAML"""
+ def multi_constructor(loader, tag_suffix, node):
+ """Stores all unknown tags content into a dict
+
+ Original yaml:
+ !unknown_tag
+ - some content
+
+ Python object:
+ {"!unknown_tag": ["some content", ]}
+ """
+ if type(node.value) is list:
+ if type(node.value[0]) is tuple:
+ return {node.tag: loader.construct_mapping(node)}
+ else:
+ return {node.tag: loader.construct_sequence(node)}
+ else:
+ return {node.tag: loader.construct_scalar(node)}
+
+ yaml.add_multi_constructor("!", multi_constructor)
+ with self.__get_file() as file_obj:
+ self.__documents = [x for x in yaml.load_all(file_obj)]
+ return self.__documents[self.__document_id]
+
+ def write_content(self, content=None):
+ if content:
+ self.content = content
+ self.__documents[self.__document_id] = self.content
+
+ def representer(dumper, data):
+ """Represents a dict key started with '!' as a YAML tag
+
+ Assumes that there is only one !tag in the dict at the
+ current indent.
+
+ Python object:
+ {"!unknown_tag": ["some content", ]}
+
+ Resulting yaml:
+ !unknown_tag
+ - some content
+ """
+ key = data.keys()[0]
+ if key.startswith("!"):
+ value = data[key]
+ if type(value) is dict:
+ node = dumper.represent_mapping(key, value)
+ elif type(value) is list:
+ node = dumper.represent_sequence(key, value)
+ else:
+ node = dumper.represent_scalar(key, value)
+ else:
+ node = dumper.represent_mapping(u'tag:yaml.org,2002:map', data)
+ return node
+
+ yaml.add_representer(dict, representer)
+ with self.__get_file("w") as file_obj:
+ yaml.dump_all(self.__documents, file_obj,
+ default_flow_style=self.default_flow_style,
+ default_style=self.default_style)
+
+ def __enter__(self):
+ self.__content = self.get_content()
+ self.__original_content = copy.deepcopy(self.content)
+ return self
+
+ def __exit__(self, x, y, z):
+ if self.content == self.__original_content:
+ return
+ self.write_content()
diff --git a/tcp_tests/logger.py b/tcp_tests/logger.py
new file mode 100644
index 0000000..b9d0c5a
--- /dev/null
+++ b/tcp_tests/logger.py
@@ -0,0 +1,73 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import functools
+import logging
+import os
+import traceback
+
+from tcp_tests import settings
+
+if not os.path.exists(settings.LOGS_DIR):
+ os.makedirs(settings.LOGS_DIR)
+
+logging.basicConfig(level=logging.DEBUG,
+ format='%(asctime)s - %(levelname)s %(filename)s:'
+ '%(lineno)d -- %(message)s',
+ filename=os.path.join(settings.LOGS_DIR, 'tests.log'),
+ filemode='w')
+
+console = logging.StreamHandler()
+console.setLevel(logging.INFO)
+formatter = logging.Formatter('%(asctime)s - %(levelname)s %(filename)s:'
+ '%(lineno)d -- %(message)s')
+console.setFormatter(formatter)
+
+logger = logging.getLogger(__name__)
+logger.addHandler(console)
+
+
+# suppress iso8601 and paramiko debug logging
+class NoDebugMessageFilter(logging.Filter):
+ def filter(self, record):
+ return not record.levelno <= logging.DEBUG
+
+logging.getLogger('paramiko.transport').addFilter(NoDebugMessageFilter())
+logging.getLogger('paramiko.hostkeys').addFilter(NoDebugMessageFilter())
+logging.getLogger('iso8601.iso8601').addFilter(NoDebugMessageFilter())
+
+
+def debug(logger):
+ def wrapper(func):
+ @functools.wraps(func)
+ def wrapped(*args, **kwargs):
+ logger.debug(
+ "Calling: {} with args: {} {}".format(
+ func.__name__, args, kwargs
+ )
+ )
+ try:
+ result = func(*args, **kwargs)
+ logger.debug(
+ "Done: {} with result: {}".format(func.__name__, result))
+ except BaseException as e:
+ logger.error(
+ '{func} raised: {exc!r}\n'
+ 'Traceback: {tb!s}'.format(
+ func=func.__name__, exc=e, tb=traceback.format_exc()))
+ raise
+ return result
+ return wrapped
+ return wrapper
+
+logwrap = debug(logger)
diff --git a/tcp_tests/managers/__init__.py b/tcp_tests/managers/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tcp_tests/managers/__init__.py
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
new file mode 100644
index 0000000..51b6520
--- /dev/null
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -0,0 +1,373 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from devops import error
+from devops.helpers import helpers
+from devops import models
+from django import db
+from oslo_config import cfg
+
+from tcp_tests import settings
+from tcp_tests import settings_oslo
+from tcp_tests.helpers import env_config
+from tcp_tests.helpers import ext
+from tcp_tests.helpers import exceptions
+from tcp_tests import logger
+
+LOG = logger.logger
+
+
+class EnvironmentManager(object):
+ """Class-helper for creating VMs via devops environments"""
+
+ __config = None
+
+ def __init__(self, config=None):
+ """Initializing class instance and create the environment
+
+ :param config: oslo.config object
+ :param config.hardware.conf_path: path to devops YAML template
+ :param config.hardware.current_snapshot: name of the snapshot that
+ descriebe environment status.
+ """
+ self.__devops_config = env_config.EnvironmentConfig()
+ self._env = None
+ self.__config = config
+
+ if config.hardware.conf_path is not None:
+ self._devops_config.load_template(config.hardware.conf_path)
+ else:
+ raise Exception("Devops YAML template is not set in config object")
+
+ try:
+ self._get_env_by_name(self._d_env_name)
+ if not self.has_snapshot(config.hardware.current_snapshot):
+ raise exceptions.EnvironmentSnapshotMissing(
+ self._d_env_name, config.hardware.current_snapshot)
+ except error.DevopsObjNotFound:
+ LOG.info("Environment doesn't exist, creating a new one")
+ self._create_environment()
+ self.set_dns_config()
+
+ @property
+ def _devops_config(self):
+ return self.__devops_config
+
+ @_devops_config.setter
+ def _devops_config(self, conf):
+ """Setter for self.__devops_config
+
+ :param conf: tcp_tests.helpers.env_config.EnvironmentConfig
+ """
+ if not isinstance(conf, env_config.EnvironmentConfig):
+ msg = ("Unexpected type of devops config. Got '{0}' " +
+ "instead of '{1}'")
+ raise TypeError(
+ msg.format(
+ type(conf).__name__,
+ env_config.EnvironmentConfig.__name__
+ )
+ )
+ self.__devops_config = conf
+
+ def lvm_storages(self):
+ """Returns a dict object of lvm storages in current environment
+
+ returned data example:
+ {
+ "master": {
+ "id": "virtio-bff72959d1a54cb19d08"
+ },
+ "slave-0": {
+ "id": "virtio-5e33affc8fe44503839f"
+ },
+ "slave-1": {
+ "id": "virtio-10b6a262f1ec4341a1ba"
+ },
+ }
+
+ :rtype: dict
+ """
+ result = {}
+# for node in self.k8s_nodes:
+ for node in self.master_nodes + self.slave_nodes:
+ lvm = filter(lambda x: x.volume.name == 'lvm', node.disk_devices)
+ if len(lvm) == 0:
+ continue
+ lvm = lvm[0]
+ result[node.name] = {}
+ result_node = result[node.name]
+ result_node['id'] = "{bus}-{serial}".format(
+ bus=lvm.bus,
+ serial=lvm.volume.serial[:20])
+ LOG.info("Got disk-id '{}' for node '{}'".format(
+ result_node['id'], node.name))
+ return result
+
+ @property
+ def _d_env_name(self):
+ """Get environment name from fuel devops config
+
+ :rtype: string
+ """
+ return self._devops_config['env_name']
+
+ def _get_env_by_name(self, name):
+ """Set existing environment by name
+
+ :param name: string
+ """
+ self._env = models.Environment.get(name=name)
+
+ def _get_default_node_group(self):
+ return self._env.get_group(name='default')
+
+ def _get_network_pool(self, net_pool_name):
+ default_node_group = self._get_default_node_group()
+ network_pool = default_node_group.get_network_pool(name=net_pool_name)
+ return network_pool
+
+ def get_ssh_data(self, roles=None):
+ """Generate ssh config for Underlay
+
+ :param roles: list of strings
+ """
+ if roles is None:
+ raise Exception("No roles specified for the environment!")
+
+ config_ssh = []
+ for d_node in self._env.get_nodes(role__in=roles):
+ ssh_data = {
+ 'node_name': d_node.name,
+ 'address_pool': self._get_network_pool(
+ ext.NETWORK_TYPE.public).address_pool.name,
+ 'host': self.node_ip(d_node),
+ 'login': settings.SSH_NODE_CREDENTIALS['login'],
+ 'password': settings.SSH_NODE_CREDENTIALS['password'],
+ }
+ config_ssh.append(ssh_data)
+ return config_ssh
+
+ def create_snapshot(self, name, description=None):
+ """Create named snapshot of current env.
+
+ - Create a libvirt snapshots for all nodes in the environment
+ - Save 'config' object to a file 'config_<name>.ini'
+
+ :name: string
+ """
+ LOG.info("Creating snapshot named '{0}'".format(name))
+ self.__config.hardware.current_snapshot = name
+ LOG.info("current config '{0}'".format(
+ self.__config.hardware.current_snapshot))
+ if self._env is not None:
+ LOG.info('trying to suspend ....')
+ self._env.suspend()
+ LOG.info('trying to snapshot ....')
+ self._env.snapshot(name, description=description, force=True)
+ LOG.info('trying to resume ....')
+ self._env.resume()
+ else:
+ raise exceptions.EnvironmentIsNotSet()
+ settings_oslo.save_config(self.__config, name, self._env.name)
+
+ def _get_snapshot_config_name(self, snapshot_name):
+ """Get config name for the environment"""
+ env_name = self._env.name
+ if env_name is None:
+ env_name = 'config'
+ test_config_path = os.path.join(
+ settings.LOGS_DIR, '{0}_{1}.ini'.format(env_name, snapshot_name))
+ return test_config_path
+
+ def revert_snapshot(self, name):
+ """Revert snapshot by name
+
+ - Revert a libvirt snapshots for all nodes in the environment
+ - Try to reload 'config' object from a file 'config_<name>.ini'
+ If the file not found, then pass with defaults.
+ - Set <name> as the current state of the environment after reload
+
+ :param name: string
+ """
+ LOG.info("Reverting from snapshot named '{0}'".format(name))
+ if self._env is not None:
+ self._env.revert(name=name)
+ LOG.info("Resuming environment after revert")
+ self._env.resume()
+ else:
+ raise exceptions.EnvironmentIsNotSet()
+
+ try:
+ test_config_path = self._get_snapshot_config_name(name)
+ settings_oslo.reload_snapshot_config(self.__config,
+ test_config_path)
+ except cfg.ConfigFilesNotFoundError as conf_err:
+ LOG.error("Config file(s) {0} not found!".format(
+ conf_err.config_files))
+
+ self.__config.hardware.current_snapshot = name
+
+ def _create_environment(self):
+ """Create environment and start VMs.
+
+ If config was provided earlier, we simply create and start VMs,
+ otherwise we tries to generate config from self.config_file,
+ """
+ if self._devops_config.config is None:
+ raise exceptions.DevopsConfigPathIsNotSet()
+ settings = self._devops_config
+ env_name = settings['env_name']
+ LOG.debug(
+ 'Preparing to create environment named "{0}"'.format(env_name)
+ )
+ if env_name is None:
+ LOG.error('Environment name is not set!')
+ raise exceptions.EnvironmentNameIsNotSet()
+ try:
+ self._env = models.Environment.create_environment(
+ settings.config
+ )
+ except db.IntegrityError:
+ LOG.error(
+ 'Seems like environment {0} already exists.'.format(env_name)
+ )
+ raise exceptions.EnvironmentAlreadyExists(env_name)
+ self._env.define()
+ LOG.info(
+ 'Environment "{0}" created and started'.format(env_name)
+ )
+
+ def start(self):
+ """Method for start environment
+
+ """
+ if self._env is None:
+ raise exceptions.EnvironmentIsNotSet()
+ self._env.start()
+# for node in self.k8s_nodes:
+ for node in self.master_nodes + self.slave_nodes:
+ LOG.debug("Waiting for SSH on node '{}...'".format(node.name))
+ timeout = 360
+ helpers.wait(
+ lambda: helpers.tcp_ping(self.node_ip(node), 22),
+ timeout=timeout,
+ timeout_msg="Node '{}' didn't open SSH in {} sec".format(
+ node.name, timeout
+ )
+ )
+
+ def resume(self):
+ """Resume environment"""
+ if self._env is None:
+ raise exceptions.EnvironmentIsNotSet()
+ self._env.resume()
+
+ def suspend(self):
+ """Suspend environment"""
+ if self._env is None:
+ raise exceptions.EnvironmentIsNotSet()
+ self._env.suspend()
+
+ def stop(self):
+ """Stop environment"""
+ if self._env is None:
+ raise exceptions.EnvironmentIsNotSet()
+ self._env.destroy()
+
+ def has_snapshot(self, name):
+ return self._env.has_snapshot(name)
+
+ def has_snapshot_config(self, name):
+ test_config_path = self._get_snapshot_config_name(name)
+ return os.path.isfile(test_config_path)
+
+ def delete_environment(self):
+ """Delete environment
+
+ """
+ LOG.debug("Deleting environment")
+ self._env.erase()
+
+ def __get_nodes_by_role(self, node_role):
+ """Get node by given role name
+
+ :param node_role: string
+ :rtype: devops.models.Node
+ """
+ LOG.debug('Trying to get nodes by role {0}'.format(node_role))
+ return self._env.get_nodes(role=node_role)
+
+ @property
+ def master_nodes(self):
+ """Get all master nodes
+
+ :rtype: list
+ """
+ nodes = self.__get_nodes_by_role(
+ node_role=ext.UNDERLAY_NODE_ROLE.salt-master)
+ return nodes
+
+ @property
+ def slave_nodes(self):
+ """Get all slave nodes
+
+ :rtype: list
+ """
+ nodes = self.__get_nodes_by_role(
+ node_role=ext.UNDERLAY_NODE_ROLE.salt-minion)
+ return nodes
+
+# @staticmethod
+# def node_ip(node):
+# """Determine node's IP
+#
+# :param node: devops.models.Node
+# :return: string
+# """
+# LOG.debug('Trying to determine {0} ip.'.format(node.name))
+# return node.get_ip_address_by_network_name(
+# ext.NETWORK_TYPE.public
+# )
+
+# @property
+# def admin_ips(self):
+# """Property to get ip of admin role VMs
+#
+# :return: list
+# """
+# nodes = self.master_nodes
+# return [self.node_ip(node) for node in nodes]
+
+# @property
+# def slave_ips(self):
+# """Property to get ip(s) of slave role VMs
+#
+# :return: list
+# """
+# nodes = self.slave_nodes
+# return [self.node_ip(node) for node in nodes]
+
+ @property
+ def nameserver(self):
+ return self._env.router(ext.NETWORK_TYPE.public)
+
+ def set_dns_config(self):
+ # Set local nameserver to use by default
+ if not self.__config.underlay.nameservers:
+ self.__config.underlay.nameservers = [self.nameserver]
+ if not self.__config.underlay.upstream_dns_servers:
+ self.__config.underlay.upstream_dns_servers = [self.nameserver]
diff --git a/tcp_tests/managers/envmanager_empty.py b/tcp_tests/managers/envmanager_empty.py
new file mode 100644
index 0000000..b9ab8e1
--- /dev/null
+++ b/tcp_tests/managers/envmanager_empty.py
@@ -0,0 +1,106 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tcp_tests import settings_oslo
+
+
+class EnvironmentManagerEmpty(object):
+ """Class-helper for creating VMs via devops environments"""
+
+ __config = None
+
+ def __init__(self, config=None):
+ """Initializing class instance and create the environment
+
+ :param config: oslo.config object
+ :param config.hardware.conf_path: path to devops YAML template
+ :param config.hardware.current_snapshot: name of the snapshot that
+ descriebe environment status.
+ """
+ self.__config = config
+
+ def lvm_storages(self):
+ """Returns data of lvm_storages on nodes in environment
+
+ It's expected that data of self.__config.lvm_storages will be
+ like this:
+ {
+ "node1": {
+ "device": "vdb"
+ },
+ "node2": {
+ "device": "vdb"
+ },
+ "node3": {
+ "device": "vdb"
+ },
+ }
+ :rtype: dict
+ """
+ return self.__config.underlay.lvm
+
+ def get_ssh_data(self, roles=None):
+ raise Exception("EnvironmentManagerEmpty doesn't have SSH details. "
+ "Please provide SSH details in config.underlay.ssh")
+
+ def create_snapshot(self, name, description=None):
+ """Store environmetn state into the config object
+
+ - Store the state of the environment <name> to the 'config' object
+ - Save 'config' object to a file 'config_<name>.ini'
+ """
+ self.__config.hardware.current_snapshot = name
+ settings_oslo.save_config(self.__config, name)
+
+ def revert_snapshot(self, name):
+ """Check the current state <name> of the environment
+
+ - Check that the <name> matches the current state of the environment
+ that is stored in the 'self.__config.hardware.current_snapshot'
+ - Try to reload 'config' object from a file 'config_<name>.ini'
+ If the file not found, then pass with defaults.
+ - Set <name> as the current state of the environment after reload
+
+ :param name: string
+ """
+ if self.__config.hardware.current_snapshot != name:
+ raise Exception(
+ "EnvironmentManagerEmpty cannot revert nodes from {} to {}"
+ .format(self.__config.hardware.current_snapshot, name))
+
+ def start(self):
+ """Start environment"""
+ pass
+
+ def resume(self):
+ """Resume environment"""
+ pass
+
+ def suspend(self):
+ """Suspend environment"""
+ pass
+
+ def stop(self):
+ """Stop environment"""
+ pass
+
+ def has_snapshot(self, name):
+ return self.__config.hardware.current_snapshot == name
+
+ def has_snapshot_config(self, name):
+ return self.__config.hardware.current_snapshot == name
+
+ def delete_environment(self):
+ """Delete environment"""
+ pass
diff --git a/tcp_tests/managers/rallymanager.py b/tcp_tests/managers/rallymanager.py
new file mode 100644
index 0000000..bd937d5
--- /dev/null
+++ b/tcp_tests/managers/rallymanager.py
@@ -0,0 +1,118 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tcp_tests import logger
+from tcp_tests import settings
+
+
+LOG = logger.logger
+
+
+class RallyManager(object):
+ """docstring for RallyManager"""
+
+ image_name = 'rallyforge/rally'
+ image_version = '0.5.0'
+
+ def __init__(self, underlay, admin_node_name):
+ super(RallyManager, self).__init__()
+ self._admin_node_name = admin_node_name
+ self._underlay = underlay
+
+ def prepare(self):
+ content = """
+sed -i 's|#swift_operator_role = Member|swift_operator_role=SwiftOperator|g' /etc/rally/rally.conf # noqa
+source /home/rally/openrc
+rally-manage db recreate
+rally deployment create --fromenv --name=tempest
+rally verify install
+rally verify genconfig
+rally verify showconfig"""
+ cmd = "cat > {path} << EOF\n{content}\nEOF".format(
+ path='/home/{user}/rally/install_tempest.sh'.format(
+ user=settings.SSH_LOGIN), content=content)
+ cmd1 = "chmod +x /home/{user}/rally/install_tempest.sh".format(
+ user=settings.SSH_LOGIN)
+ cmd2 = "cp /home/{user}/openrc-* /home/{user}/rally/openrc".format(
+ user=settings.SSH_LOGIN)
+
+ with self._underlay.remote(node_name=self._admin_node_name) as remote:
+ LOG.info("Create rally workdir")
+ remote.check_call('mkdir -p /home/{user}/rally'.format(
+ user=settings.SSH_LOGIN))
+ LOG.info("Create install_tempest.sh")
+ remote.check_call(cmd)
+ LOG.info("Chmod +x install_tempest.sh")
+ remote.check_call(cmd1)
+ LOG.info("Copy openstackrc")
+ remote.check_call(cmd2)
+
+ def pull_image(self, version=None):
+ version = version or self.image_version
+ image = self.image_name
+ cmd = "docker pull {image}:{version}".format(image=image,
+ version=version)
+ with self._underlay.remote(node_name=self._admin_node_name) as remote:
+ LOG.info("Pull {image}:{version}".format(image=image,
+ version=version))
+ remote.check_call(cmd)
+
+ with self._underlay.remote(node_name=self._admin_node_name) as remote:
+ LOG.info("Getting image id")
+ cmd = "docker images | grep 0.5.0| awk '{print $3}'"
+ res = remote.check_call(cmd)
+ self.image_id = res['stdout'][0].strip()
+ LOG.info("Image ID is {}".format(self.image_id))
+
+ def run(self):
+ with self._underlay.remote(node_name=self._admin_node_name) as remote:
+ cmd = ("docker run --net host -v /home/{user}/rally:/home/rally "
+ "-tid -u root {image_id}".format(
+ user=settings.SSH_LOGIN, image_id=self.image_id))
+ LOG.info("Run Rally container")
+ remote.check_call(cmd)
+
+ cmd = ("docker ps | grep {image_id} | "
+ "awk '{{print $1}}'| head -1").format(
+ image_id=self.image_id)
+ LOG.info("Getting container id")
+ res = remote.check_call(cmd)
+ self.docker_id = res['stdout'][0].strip()
+ LOG.info("Container ID is {}".format(self.docker_id))
+
+ def run_tempest(self, test=''):
+ docker_exec = ('source /home/{user}/rally/openrc; '
+ 'docker exec -i {docker_id} bash -c "{cmd}"')
+ commands = [
+ docker_exec.format(cmd="./install_tempest.sh",
+ user=settings.SSH_LOGIN,
+ docker_id=self.docker_id),
+ docker_exec.format(
+ cmd="source /home/rally/openrc && "
+ "rally verify start {test}".format(test=test),
+ user=settings.SSH_LOGIN,
+ docker_id=self.docker_id),
+ docker_exec.format(
+ cmd="rally verify results --json --output-file result.json",
+ user=settings.SSH_LOGIN,
+ docker_id=self.docker_id),
+ docker_exec.format(
+ cmd="rally verify results --html --output-file result.html",
+ user=settings.SSH_LOGIN,
+ docker_id=self.docker_id),
+ ]
+ with self._underlay.remote(node_name=self._admin_node_name) as remote:
+ LOG.info("Run tempest inside Rally container")
+ for cmd in commands:
+ remote.check_call(cmd, verbose=True)
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
new file mode 100644
index 0000000..2880272
--- /dev/null
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -0,0 +1,361 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import random
+
+from devops.helpers import helpers
+from devops.helpers import ssh_client
+from paramiko import rsakey
+
+from tcp_tests import logger
+from tcp_tests.helpers import utils
+
+LOG = logger.logger
+
+
+class UnderlaySSHManager(object):
+ """Keep the list of SSH access credentials to Underlay nodes.
+
+ This object is initialized using config.underlay.ssh.
+
+ :param config_ssh: JSONList of SSH access credentials for nodes:
+ [
+ {
+ node_name: node1,
+ address_pool: 'public-pool01',
+ host: ,
+ port: ,
+ keys: [],
+ keys_source_host: None,
+ login: ,
+ password: ,
+ },
+ {
+ node_name: node1,
+ address_pool: 'private-pool01',
+ host:
+ port:
+ keys: []
+ keys_source_host: None,
+ login:
+ password:
+ },
+ {
+ node_name: node2,
+ address_pool: 'public-pool01',
+ keys_source_host: node1
+ ...
+ }
+ ,
+ ...
+ ]
+
+ self.node_names(): list of node names registered in underlay.
+ self.remote(): SSHClient object by a node name (w/wo address pool)
+ or by a hostname.
+ """
+ config_ssh = None
+ config_lvm = None
+
+ def __init__(self, config_ssh):
+ """Read config.underlay.ssh object
+
+ :param config_ssh: dict
+ """
+ if self.config_ssh is None:
+ self.config_ssh = []
+
+ if self.config_lvm is None:
+ self.config_lvm = {}
+
+ self.add_config_ssh(config_ssh)
+
+ def add_config_ssh(self, config_ssh):
+
+ if config_ssh is None:
+ config_ssh = []
+
+ for ssh in config_ssh:
+ ssh_data = {
+ # Required keys:
+ 'node_name': ssh['node_name'],
+ 'host': ssh['host'],
+ 'login': ssh['login'],
+ 'password': ssh['password'],
+ # Optional keys:
+ 'address_pool': ssh.get('address_pool', None),
+ 'port': ssh.get('port', None),
+ 'keys': ssh.get('keys', []),
+ }
+
+ if 'keys_source_host' in ssh:
+ node_name = ssh['keys_source_host']
+ remote = self.remote(node_name)
+ keys = self.__get_keys(remote)
+ ssh_data['keys'].extend(keys)
+
+ self.config_ssh.append(ssh_data)
+
+ def remove_config_ssh(self, config_ssh):
+ if config_ssh is None:
+ config_ssh = []
+
+ for ssh in config_ssh:
+ ssh_data = {
+ # Required keys:
+ 'node_name': ssh['node_name'],
+ 'host': ssh['host'],
+ 'login': ssh['login'],
+ 'password': ssh['password'],
+ # Optional keys:
+ 'address_pool': ssh.get('address_pool', None),
+ 'port': ssh.get('port', None),
+ 'keys': ssh.get('keys', []),
+ }
+ self.config_ssh.remove(ssh_data)
+
+ def __get_keys(self, remote):
+ keys = []
+ remote.execute('cd ~')
+ key_string = './.ssh/id_rsa'
+ if remote.exists(key_string):
+ with remote.open(key_string) as f:
+ keys.append(rsakey.RSAKey.from_private_key(f))
+ return keys
+
+ def __ssh_data(self, node_name=None, host=None, address_pool=None):
+
+ ssh_data = None
+
+ if host is not None:
+ for ssh in self.config_ssh:
+ if host == ssh['host']:
+ ssh_data = ssh
+ break
+
+ elif node_name is not None:
+ for ssh in self.config_ssh:
+ if node_name == ssh['node_name']:
+ if address_pool is not None:
+ if address_pool == ssh['address_pool']:
+ ssh_data = ssh
+ break
+ else:
+ ssh_data = ssh
+ if ssh_data is None:
+ raise Exception('Auth data for node was not found using '
+ 'node_name="{}" , host="{}" , address_pool="{}"'
+ .format(node_name, host, address_pool))
+ return ssh_data
+
+ def node_names(self):
+ """Get list of node names registered in config.underlay.ssh"""
+
+ names = [] # List is used to keep the original order of names
+ for ssh in self.config_ssh:
+ if ssh['node_name'] not in names:
+ names.append(ssh['node_name'])
+ return names
+
+ def enable_lvm(self, lvmconfig):
+ """Method for enabling lvm oh hosts in environment
+
+ :param lvmconfig: dict with ids or device' names of lvm storage
+ :raises: devops.error.DevopsCalledProcessError,
+ devops.error.TimeoutError, AssertionError, ValueError
+ """
+ def get_actions(lvm_id):
+ return [
+ "systemctl enable lvm2-lvmetad.service",
+ "systemctl enable lvm2-lvmetad.socket",
+ "systemctl start lvm2-lvmetad.service",
+ "systemctl start lvm2-lvmetad.socket",
+ "pvcreate {} && pvs".format(lvm_id),
+ "vgcreate default {} && vgs".format(lvm_id),
+ "lvcreate -L 1G -T default/pool && lvs",
+ ]
+ lvmpackages = ["lvm2", "liblvm2-dev", "thin-provisioning-tools"]
+ for node_name in self.node_names():
+ lvm = lvmconfig.get(node_name, None)
+ if not lvm:
+ continue
+ if 'id' in lvm:
+ lvmdevice = '/dev/disk/by-id/{}'.format(lvm['id'])
+ elif 'device' in lvm:
+ lvmdevice = '/dev/{}'.format(lvm['device'])
+ else:
+ raise ValueError("Unknown LVM device type")
+ if lvmdevice:
+ self.apt_install_package(
+ packages=lvmpackages, node_name=node_name, verbose=True)
+ for command in get_actions(lvmdevice):
+ self.sudo_check_call(command, node_name=node_name,
+ verbose=True)
+ self.config_lvm = dict(lvmconfig)
+
+ def host_by_node_name(self, node_name, address_pool=None):
+ ssh_data = self.__ssh_data(node_name=node_name,
+ address_pool=address_pool)
+ return ssh_data['host']
+
+ def remote(self, node_name=None, host=None, address_pool=None):
+ """Get SSHClient by a node name or hostname.
+
+ One of the following arguments should be specified:
+ - host (str): IP address or hostname. If specified, 'node_name' is
+ ignored.
+ - node_name (str): Name of the node stored to config.underlay.ssh
+ - address_pool (str): optional for node_name.
+ If None, use the first matched node_name.
+ """
+ ssh_data = self.__ssh_data(node_name=node_name, host=host,
+ address_pool=address_pool)
+ return ssh_client.SSHClient(
+ host=ssh_data['host'],
+ port=ssh_data['port'] or 22,
+ username=ssh_data['login'],
+ password=ssh_data['password'],
+ private_keys=ssh_data['keys'])
+
+ def check_call(
+ self, cmd,
+ node_name=None, host=None, address_pool=None,
+ verbose=False, timeout=None,
+ error_info=None,
+ expected=None, raise_on_err=True):
+ """Execute command on the node_name/host and check for exit code
+
+ :type cmd: str
+ :type node_name: str
+ :type host: str
+ :type verbose: bool
+ :type timeout: int
+ :type error_info: str
+ :type expected: list
+ :type raise_on_err: bool
+ :rtype: list stdout
+ :raises: devops.error.DevopsCalledProcessError
+ """
+ remote = self.remote(node_name=node_name, host=host,
+ address_pool=address_pool)
+ return remote.check_call(
+ command=cmd, verbose=verbose, timeout=timeout,
+ error_info=error_info, expected=expected,
+ raise_on_err=raise_on_err)
+
+ def apt_install_package(self, packages=None, node_name=None, host=None,
+ **kwargs):
+ """Method to install packages on ubuntu nodes
+
+ :type packages: list
+ :type node_name: str
+ :type host: str
+ :raises: devops.error.DevopsCalledProcessError,
+ devops.error.TimeoutError, AssertionError, ValueError
+
+ Other params of check_call and sudo_check_call are allowed
+ """
+ expected = kwargs.pop('expected', None)
+ if not packages or not isinstance(packages, list):
+ raise ValueError("packages list should be provided!")
+ install = "apt-get install -y {}".format(" ".join(packages))
+ # Should wait until other 'apt' jobs are finished
+ pgrep_expected = [0, 1]
+ pgrep_command = "pgrep -a -f apt"
+ helpers.wait(
+ lambda: (self.check_call(
+ pgrep_command, expected=pgrep_expected, host=host,
+ node_name=node_name, **kwargs).exit_code == 1
+ ), interval=30, timeout=1200,
+ timeout_msg="Timeout reached while waiting for apt lock"
+ )
+ # Install packages
+ self.sudo_check_call("apt-get update", node_name=node_name, host=host,
+ **kwargs)
+ self.sudo_check_call(install, expected=expected, node_name=node_name,
+ host=host, **kwargs)
+
+ def sudo_check_call(
+ self, cmd,
+ node_name=None, host=None, address_pool=None,
+ verbose=False, timeout=None,
+ error_info=None,
+ expected=None, raise_on_err=True):
+ """Execute command with sudo on node_name/host and check for exit code
+
+ :type cmd: str
+ :type node_name: str
+ :type host: str
+ :type verbose: bool
+ :type timeout: int
+ :type error_info: str
+ :type expected: list
+ :type raise_on_err: bool
+ :rtype: list stdout
+ :raises: devops.error.DevopsCalledProcessError
+ """
+ remote = self.remote(node_name=node_name, host=host,
+ address_pool=address_pool)
+ with remote.get_sudo(remote):
+ return remote.check_call(
+ command=cmd, verbose=verbose, timeout=timeout,
+ error_info=error_info, expected=expected,
+ raise_on_err=raise_on_err)
+
+ def dir_upload(self, host, source, destination):
+ """Upload local directory content to remote host
+
+ :param host: str, remote node name
+ :param source: str, local directory path
+ :param destination: str, local directory path
+ """
+ with self.remote(node_name=host) as remote:
+ remote.upload(source, destination)
+
+ def get_random_node(self):
+ """Get random node name
+
+ :return: str, name of node
+ """
+ return random.choice(self.node_names())
+
+ def yaml_editor(self, file_path, node_name=None, host=None,
+ address_pool=None):
+ """Returns an initialized YamlEditor instance for context manager
+
+ Usage (with 'underlay' fixture):
+
+ # Local YAML file
+ with underlay.yaml_editor('/path/to/file') as editor:
+ editor.content[key] = "value"
+
+ # Remote YAML file on TCP host
+ with underlay.yaml_editor('/path/to/file',
+ host=config.tcp.tcp_host) as editor:
+ editor.content[key] = "value"
+ """
+ # Local YAML file
+ if node_name is None and host is None:
+ return utils.YamlEditor(file_path=file_path)
+
+ # Remote YAML file
+ ssh_data = self.__ssh_data(node_name=node_name, host=host,
+ address_pool=address_pool)
+ return utils.YamlEditor(
+ file_path=file_path,
+ host=ssh_data['host'],
+ port=ssh_data['port'] or 22,
+ username=ssh_data['login'],
+ password=ssh_data['password'],
+ private_keys=ssh_data['keys'])
diff --git a/tcp_tests/requirements.txt b/tcp_tests/requirements.txt
new file mode 100644
index 0000000..f3d037a
--- /dev/null
+++ b/tcp_tests/requirements.txt
@@ -0,0 +1,10 @@
+git+git://github.com/openstack/fuel-devops.git@3.0.3
+paramiko
+six
+requests>=2.2.0
+oslo.config>=3.12.0 # Apache-2.0
+pytest>=2.9
+docker-py
+docker-compose==1.7.1
+urllib3
+junit-xml
diff --git a/tcp_tests/run_test.py b/tcp_tests/run_test.py
new file mode 100644
index 0000000..ea29ded
--- /dev/null
+++ b/tcp_tests/run_test.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+import os
+import sys
+
+import pytest
+
+import tcp_tests
+
+
+def shell():
+ if len(sys.argv) > 1:
+ # Run py.test for tcp_tests module folder with specified options
+ testpaths = os.path.dirname(tcp_tests.__file__)
+ opts = ' '.join(sys.argv[1:])
+ addopts = '-vvv -s -p no:django -p no:ipdb --junit-xml=nosetests.xml'
+ return pytest.main('{testpaths} {addopts} {opts}'.format(
+ testpaths=testpaths, addopts=addopts, opts=opts))
+ else:
+ return pytest.main('--help')
+
+
+if __name__ == '__main__':
+ sys.exit(shell())
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
new file mode 100644
index 0000000..bf6b5b0
--- /dev/null
+++ b/tcp_tests/settings.py
@@ -0,0 +1,48 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import os
+import pkg_resources
+import time
+
+_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
+ '0': False, 'no': False, 'false': False, 'off': False}
+
+_default_conf = pkg_resources.resource_filename(
+ __name__, 'templates/tcpcloud-default.yaml')
+
+
+def get_var_as_bool(name, default):
+ value = os.environ.get(name, '')
+ return _boolean_states.get(value.lower(), default)
+
+
+LOGS_DIR = os.environ.get('LOGS_DIR', os.getcwd())
+TIMESTAT_PATH_YAML = os.environ.get(
+ 'TIMESTAT_PATH_YAML', os.path.join(
+ LOGS_DIR, 'timestat_{}.yaml'.format(time.strftime("%Y%m%d"))))
+
+SSH_LOGIN = os.environ.get('SSH_LOGIN', 'vagrant')
+SSH_PASSWORD = os.environ.get('SSH_PASSWORD', 'vagrant')
+SSH_NODE_CREDENTIALS = {"login": SSH_LOGIN,
+ "password": SSH_PASSWORD}
+
+CONF_PATH = os.environ.get('CONF_PATH', os.path.abspath(_default_conf))
+SHUTDOWN_ENV_ON_TEARDOWN = get_var_as_bool('SHUTDOWN_ENV_ON_TEARDOWN', True)
+
+# public_iface = IFACES[0]
+# private_iface = IFACES[1]
+IFACES = [
+ os.environ.get("IFACE_0", "eth0"),
+ os.environ.get("IFACE_1", "eth1"),
+]
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
new file mode 100644
index 0000000..6c818f2
--- /dev/null
+++ b/tcp_tests/settings_oslo.py
@@ -0,0 +1,175 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+import os
+import pkg_resources
+
+from oslo_config import cfg
+from oslo_config import generator
+
+from tcp_tests.helpers import ext
+from tcp_tests.helpers import oslo_cfg_types as ct
+from tcp_tests import settings
+
+
+_default_conf = pkg_resources.resource_filename(
+ __name__, 'templates/default.yaml')
+
+
+hardware_opts = [
+ ct.Cfg('manager', ct.String(),
+ help="Hardware manager name", default="devops"),
+ ct.Cfg('conf_path', ct.String(),
+ help="Hardware config file", default=_default_conf),
+ ct.Cfg('current_snapshot', ct.String(),
+ help="Latest environment status name",
+ default=ext.SNAPSHOT.hardware),
+]
+
+
+underlay_opts = [
+ ct.Cfg('ssh', ct.JSONList(),
+ help="""SSH Settings for Underlay: [{
+ 'node_name': node1,
+ 'host': hostname,
+ 'login': login,
+ 'password': password,
+ 'address_pool': (optional),
+ 'port': (optional),
+ 'keys': [(optional)],
+ }, ...]""", default=[]),
+ ct.Cfg('roles', ct.JSONList(),
+ help="Node roles managed by underlay in the environment",
+ default=[ext.UNDERLAY_NODE_ROLE.salt-master,
+ ext.UNDERLAY_NODE_ROLE.salt-minion, ]),
+ ct.Cfg('nameservers', ct.JSONList(),
+ help="IP addresses of DNS servers",
+ default=[]),
+ ct.Cfg('upstream_dns_servers', ct.JSONList(),
+ help="IP addresses of upstream DNS servers (dnsmasq)",
+ default=[]),
+ ct.Cfg('lvm', ct.JSONDict(),
+ help="LVM settings for Underlay", default={}),
+]
+
+# Deploy options for a new TCPCloud deployment
+tcp_deploy_opts = [
+ ct.Cfg('reclass_settings', ct.JSONDict(),
+ help="", default={}),
+]
+
+
+# Access credentials to a ready TCP cluster
+tcp_opts = [
+ ct.Cfg('tcp_host', ct.IPAddress(),
+ help="", default='0.0.0.0'),
+]
+
+
+os_deploy_opts = [
+ # ct.Cfg('stacklight_enable', ct.Boolean(),
+ # help="", default=False),
+]
+
+os_opts = [
+ ct.Cfg('keystone_endpoint', ct.String(),
+ help="", default=''),
+]
+
+
+_group_opts = [
+ ('hardware', hardware_opts),
+ ('underlay', underlay_opts),
+ ('tcp_deploy', tcp_deploy_opts),
+ ('tcp', tcp_opts),
+ ('os_deploy', os_deploy_opts),
+ ('os', os_opts),
+]
+
+
+def register_opts(config):
+ config.register_group(cfg.OptGroup(name='hardware',
+ title="Hardware settings", help=""))
+ config.register_opts(group='hardware', opts=hardware_opts)
+
+ config.register_group(cfg.OptGroup(name='underlay',
+ title="Underlay configuration", help=""))
+ config.register_opts(group='underlay', opts=underlay_opts)
+
+ config.register_group(cfg.OptGroup(name='tcp_deploy',
+ title="tcp deploy configuration", help=""))
+ config.register_opts(group='tcp_deploy', opts=tcp_deploy_opts)
+
+ config.register_group(cfg.OptGroup(name='tcp',
+ title="tcp config and credentials", help=""))
+ config.register_opts(group='tcp', opts=tcp_opts)
+
+ config.register_group(cfg.OptGroup(name='os',
+ title="Openstack config and credentials", help=""))
+ config.register_opts(group='os', opts=os_opts)
+ config.register_group(
+ cfg.OptGroup(name='os_deploy',
+ title="Openstack deploy config and credentials",
+ help=""))
+ config.register_opts(group='os_deploy', opts=os_deploy_opts)
+ return config
+
+
+def load_config(config_files):
+ config = cfg.CONF
+ register_opts(config)
+ config(args=[], default_config_files=config_files)
+ return config
+
+
+def reload_snapshot_config(config, test_config_path):
+ """Reset config to the state from test_config file"""
+ config(args=[], default_config_files=[test_config_path])
+ return config
+
+
+def list_opts():
+ """Return a list of oslo.config options available in the tcp_tests.
+ """
+ return [(group, copy.deepcopy(opts)) for group, opts in _group_opts]
+
+
+def list_current_opts(config):
+ """Return a list of oslo.config options available in the tcp_tests.
+ """
+ result_opts = []
+ for group, opts in _group_opts:
+ current_opts = copy.deepcopy(opts)
+ for opt in current_opts:
+ if hasattr(config, group):
+ if hasattr(config[group], opt.name):
+ opt.default = getattr(config[group], opt.name)
+ result_opts.append((group, current_opts))
+ return result_opts
+
+
+def save_config(config, snapshot_name, env_name=None):
+ if env_name is None:
+ env_name = 'config'
+ test_config_path = os.path.join(
+ settings.LOGS_DIR, '{0}_{1}.ini'.format(env_name, snapshot_name))
+
+ with open(test_config_path, 'w') as output_file:
+ formatter = generator._OptFormatter(output_file=output_file)
+ for group, opts in list_current_opts(config):
+ formatter.format_group(group)
+ for opt in opts:
+ formatter.format(opt, group, minimal=True)
+ formatter.write('\n')
+ formatter.write('\n')
diff --git a/tcp_tests/templates/tcpcloud--meta-data.yaml b/tcp_tests/templates/tcpcloud--meta-data.yaml
new file mode 100644
index 0000000..3699401
--- /dev/null
+++ b/tcp_tests/templates/tcpcloud--meta-data.yaml
@@ -0,0 +1,4 @@
+| # All the data below will be stored as a string object
+ instance-id: iid-local1
+ hostname: {hostname}
+ local-hostname: {hostname}
diff --git a/tcp_tests/templates/tcpcloud--user-data-master-node.yaml b/tcp_tests/templates/tcpcloud--user-data-master-node.yaml
new file mode 100644
index 0000000..891fdb0
--- /dev/null
+++ b/tcp_tests/templates/tcpcloud--user-data-master-node.yaml
@@ -0,0 +1,170 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: vagrant
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGwjUlYn9UsmWmAGSuEA2sICad7WqxgsJR0HKcMbbxi0tn96h4Cq2iGYmzlJ48egLm5R5pxyWnFvL4b/2zb+kKTPCMwRc9nv7xEGosEFNQEoSDd+gYu2CO0dgS2bX/7m2DXmzvhqPjxWQUXXsb0OYAS1r9Es65FE8y4rLaegz8V35xfH45bTCA0W8VSKh264XtGz12hacqsttE/UvyjJTZe+/XV+xJy3WAWxe8J/MuW1VqbqNewTmpTE/LJU8i6pG4msU6+wH99UvsGAOKQOduynUHKWG3VZg5YCjpbbV/t/pfW/vHB3b3jiifQmNhulyiG/CNnSQ5BahtV/7qPsYt vagrant@cfg01
+
+ chpasswd:
+ list: |
+ vagrant:vagrant
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Change owner for .ssh/
+ - chown -R vagrant:vagrant /home/vagrant/.ssh
+
+ # Prepare network connection
+ - sudo ifup {interface_name}
+ - sudo route add default gw {gateway} {interface_name}
+ - sudo ifup eth1
+
+ ############## TCP Cloud cfg01 node ##################
+ - echo "Preparing base OS"
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ trusty main security extra tcp tcp-salt" > /etc/apt/sources.list
+ - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
+
+ - apt-get clean
+ - apt-get update
+ - apt-get -y upgrade
+
+ # Install common packages
+ - apt-get install -y python-pip
+ - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
+
+ - echo "Configuring salt master ..."
+ - apt-get install -y salt-master reclass
+ - apt-get install -y salt-formula-*
+
+ - |
+ cat << 'EOF' >> /etc/salt/master.d/master.conf
+ file_roots:
+ base:
+ - /usr/share/salt-formulas/env
+ pillar_opts: False
+ open_mode: True
+ reclass: &reclass
+ storage_type: yaml_fs
+ inventory_base_uri: /srv/salt/reclass
+ ext_pillar:
+ - reclass: *reclass
+ master_tops:
+ reclass: *reclass
+ EOF
+
+ - echo "Configuring reclass ..."
+ - git clone https://github.com/Mirantis/mk-lab-salt-model.git /srv/salt/reclass -b master
+
+ - mkdir -p /srv/salt/reclass/classes/service
+ - for i in /usr/share/salt-formulas/reclass/service/*; do ln -s $i /srv/salt/reclass/classes/service/; done
+
+ - '[ ! -d /etc/reclass ] && mkdir /etc/reclass'
+ - |
+ cat << 'EOF' >> /etc/reclass/reclass-config.yml
+ storage_type: yaml_fs
+ pretty_print: True
+ output: yaml
+ inventory_base_uri: /srv/salt/reclass
+ EOF
+
+ - echo "Configuring salt minion ..."
+ - apt-get install -y salt-minion
+ - '[ ! -d /etc/salt/minion.d ] && mkdir -p /etc/salt/minion.d'
+
+ - |
+ cat << "EOF" >> /etc/salt/minion.d/minion.conf
+ id: {hostname}
+ master: localhost
+ EOF
+
+ - echo "Restarting services ..."
+ - service salt-master restart
+ - rm -f /etc/salt/pki/minion/minion_master.pub
+ - service salt-minion restart
+
+ - echo "Showing system info and metadata ..."
+ #- salt-call --no-color grains.items
+ #- salt-call --no-color pillar.data
+ - reclass -n {hostname}
+
+ #- echo "Running complete state ..."
+ #- salt-call --no-color state.sls linux openssh salt.minion
+ #- salt-call --no-color state.sls salt.master
+ #- service salt-minion restart
+ #salt-call --no-color state.highstate
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces.d/99-tcp-tests.cfg
+ content: |
+ auto eth0
+ iface eth0 inet dhcp
+
+ # 2nd interface should be UP without IP address
+ auto eth1
+ iface eth1 inet manual
+ pre-up ifconfig $IFACE up
+ post-down ifconfig $IFACE down
+
+ - path: /home/vagrant/.ssh/id_rsa
+ owner: vagrant:vagrant
+ permissions: '0600'
+ content: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIEpAIBAAKCAQEAxsI1JWJ/VLJlpgBkrhANrCAmne1qsYLCUdBynDG28YtLZ/eo
+ eAqtohmJs5SePHoC5uUeacclpxby+G/9s2/pCkzwjMEXPZ7+8RBqLBBTUBKEg3fo
+ GLtgjtHYEtm1/+5tg15s74aj48VkFF17G9DmAEta/RLOuRRPMuKy2noM/Fd+cXx+
+ OW0wgNFvFUioduuF7Rs9doWnKrLbRP1L8oyU2Xvv11fsSct1gFsXvCfzLltVam6j
+ XsE5qUxPyyVPIuqRuJrFOvsB/fVL7BgDikDnbsp1Bylht1WYOWAo6W21f7f6X1v7
+ xwd2944on0JjYbpcohvwjZ0kOQWobVf+6j7GLQIDAQABAoIBAF0tAAMlmLGY7CQU
+ /R3IctBlRhU1DpZmyTfXc1MbzzqO5Wu44yZbQyjBthcUrdWGEUQy1r4Z2OHq1T54
+ KcPry6DDjuU9Q+rkVXmnC07a3GOmOq7zEEA/3zU01ImJvFNdb8NtCb6ELOKDT7Zo
+ WGUi2h/7M41+OqDzD2m4csYO/3Vvr12sMhn9BfwU4OPpL44A4PJiEryEAw9o5/j/
+ 73eyPvgf6tkC4l0mMtfHB9tg/F++iH8fiEr1SMvHGIc9gZNmFYMrs2XfLkAejPfH
+ XrOyw6eqd+kluqw51gHhdeQYwBx6mfOkbhPHWU79FzpH5M1ikdfImZmPCxVf3Ykj
+ nxLoK9UCgYEA4c9agPb/OFyN00nnUMBxzQt1pErpOf/7QhnvNZThomzSV7PyefxF
+ H6G/VlS3gCcrWBCh7mqOSxGcNQwgudVqzUm7QXruQeg4nWcCGSxg7lGYSEf0MyWL
+ 5wrd+f9MoV/VV8udIPENjp96o5kwQEVRfsTBNwmk54kup2+br5q8re8CgYEA4VT8
+ UeIN+plP6FjZYITI+SO/ou5goKIhfBrqa5gOXXPc2y6sIu9wBWCr+T7FAF/2gGhS
+ rpVx76zcmx05nwkxIlJh58+G3MVyUDFoWnrtL38vdkBSuOGgNfzcBsFpQvFs8WaW
+ otbuTtkPcXbVdYRr32/C620MxXhUO+svo3CLaaMCgYEA1rjlF8NHl+Gy31rkQg5t
+ aIxgFpVBR+zZkNa2d94V3Ozb65fqmALB/D1Dg6VVROB6P+i5AsyCeHHLd0oMCIof
+ YAyfqrlpvHRE+bAM98ESfyxJwVnipYwrh8z2nZYd2UoWxcCRrtRpjtipts2ha0w/
+ HWudS2e5To5NNdxUT9y1VDMCgYEAxkQiE+ZkyGiXv+hVtLCBqX4EA9fdm9msvudr
+ 9qn/kcj9vrntanvlxEWQbCoH61GEsu2YOtdyPiKKpc1sQvwyiHGWhgK7NoxhDiC7
+ IknhYxZ064ajgtu8PWS1MRiDhwypACt1Rej6HNSu2vZl0hZnWF2dU8tLHoHHFEXX
+ T+caNCMCgYBZpD6XBiiEXf0ikXYnXKOmbsyVG80V+yqfLo85qb2RW9TaviOSP43g
+ nB22ReMSHq2cOrs6VTTgfhxefBwzdDFbfKMf6ZU82jCNlpetAZOrhdMHUvcsjSQk
+ XKI6Ldfq6TU3xKujRHfGP+oQ6GLwVCL/kjGxOuSRLFGfRiiqYI3nww==
+ -----END RSA PRIVATE KEY-----
+
+ - path: /home/vagrant/.ssh/config
+ owner: vagrant:vagrant
+ permissions: '0600'
+ content: |
+ Host *
+ ServerAliveInterval 300
+ ServerAliveCountMax 10
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
diff --git a/tcp_tests/templates/tcpcloud--user-data.yaml b/tcp_tests/templates/tcpcloud--user-data.yaml
new file mode 100644
index 0000000..e7f96f3
--- /dev/null
+++ b/tcp_tests/templates/tcpcloud--user-data.yaml
@@ -0,0 +1,80 @@
+| # All the data below will be stored as a string object
+ #cloud-config, see http://cloudinit.readthedocs.io/en/latest/topics/examples.html
+
+ ssh_pwauth: True
+ users:
+ - name: vagrant
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGwjUlYn9UsmWmAGSuEA2sICad7WqxgsJR0HKcMbbxi0tn96h4Cq2iGYmzlJ48egLm5R5pxyWnFvL4b/2zb+kKTPCMwRc9nv7xEGosEFNQEoSDd+gYu2CO0dgS2bX/7m2DXmzvhqPjxWQUXXsb0OYAS1r9Es65FE8y4rLaegz8V35xfH45bTCA0W8VSKh264XtGz12hacqsttE/UvyjJTZe+/XV+xJy3WAWxe8J/MuW1VqbqNewTmpTE/LJU8i6pG4msU6+wH99UvsGAOKQOduynUHKWG3VZg5YCjpbbV/t/pfW/vHB3b3jiifQmNhulyiG/CNnSQ5BahtV/7qPsYt vagrant@cfg01
+
+ chpasswd:
+ list: |
+ vagrant:vagrant
+ expire: False
+
+ bootcmd:
+ # Block access to SSH while node is preparing
+ - cloud-init-per once sudo iptables -A INPUT -p tcp --dport 22 -j DROP
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log /dev/tty0'
+
+ runcmd:
+ # Configure dhclient
+ - sudo echo "nameserver {gateway}" >> /etc/resolvconf/resolv.conf.d/base
+ - sudo resolvconf -u
+
+ # Prepare network connection
+ - sudo ifup {interface_name}
+ - sudo route add default gw {gateway} {interface_name}
+ - sudo ifup eth1
+
+ ############## TCP Cloud cfg01 node ##################
+ #- sleep 120
+ - echo "Preparing base OS"
+ - which wget >/dev/null || (apt-get update; apt-get install -y wget)
+
+ - echo "deb [arch=amd64] http://apt.tcpcloud.eu/nightly/ trusty main security extra tcp tcp-salt" > /etc/apt/sources.list
+ - wget -O - http://apt.tcpcloud.eu/public.gpg | apt-key add -
+
+ - apt-get clean
+ - apt-get update
+ - apt-get -y upgrade
+
+ # Install common packages
+ - apt-get install -y python-pip git
+ - apt-get install -y curl tmux byobu iputils-ping traceroute htop tree
+
+ - apt-get install -y salt-minion
+
+ # To be configured from inventory/fuel-devops by operator or autotests
+ - 'echo "id: {hostname}" >> /etc/salt/minion'
+ - 'echo "master: 172.16.10.2" >> /etc/salt/minion'
+
+ - rm -f /etc/salt/pki/minion/minion_master.pub
+ - service salt-minion restart
+
+ #- echo "Showing node metadata..."
+ #- salt-call pillar.data
+
+ #- echo "Running complete state ..."
+ #- salt-call state.sls linux,openssh,salt
+
+ ########################################################
+ # Node is ready, allow SSH access
+ - echo "Allow SSH access ..."
+ - sudo iptables -D INPUT -p tcp --dport 22 -j DROP
+ ########################################################
+
+ write_files:
+ - path: /etc/network/interfaces.d/99-tcp-tests.cfg
+ content: |
+ auto eth0
+ iface eth0 inet dhcp
+
+ # 2nd interface should be UP without IP address
+ auto eth1
+ iface eth1 inet manual
+ pre-up ifconfig $IFACE up
+ post-down ifconfig $IFACE down
diff --git a/tcp_tests/templates/tcpcloud-default.yaml b/tcp_tests/templates/tcpcloud-default.yaml
new file mode 100644
index 0000000..a16436e
--- /dev/null
+++ b/tcp_tests/templates/tcpcloud-default.yaml
@@ -0,0 +1,319 @@
+---
+aliases:
+ dynamic_addresses_pool:
+ - &pool_default !os_env POOL_DEFAULT, 172.16.10.0/24:24
+
+ default_interface_model:
+ - &interface_model !os_env INTERFACE_MODEL, virtio
+
+template:
+ devops_settings:
+ env_name: !os_env ENV_NAME, tcpcloud-mk20
+
+ address_pools:
+ public-pool01:
+ net: *pool_default
+ params:
+ ip_reserved:
+ gateway: +1
+ l2_network_device: +1
+ ip_ranges:
+ dhcp: [+2, -2]
+
+ groups:
+ - name: default
+ driver:
+ name: devops.driver.libvirt
+ params:
+ connection_string: !os_env CONNECTION_STRING, qemu:///system
+ storage_pool_name: !os_env STORAGE_POOL_NAME, default
+ stp: False
+ hpet: False
+ enable_acpi: true
+ use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
+
+ network_pools:
+ public: public-pool01
+
+ l2_network_devices:
+ public:
+ address_pool: public-pool01
+ dhcp: true
+ forward:
+ mode: nat
+
+ private:
+ dhcp: false
+
+ group_volumes:
+ - name: cloudimage # This name is used for 'backing_store' option for node volumes.
+ source_image: !os_env IMAGE_PATH # https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img or
+ # http://apt.tcpcloud.eu/images/ubuntu-14-04-x64-201608231134.qcow2
+ format: qcow2
+
+ nodes:
+ - name: cfg01.mk20-lab-advanced.local
+ role: salt-master
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 2
+ memory: !os_env SLAVE_NODE_MEMORY, 4096
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: eth0
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include tcpcloud--meta-data.yaml
+ cloudinit_user_data: !include tcpcloud--user-data-master-node.yaml
+
+ interfaces:
+ - label: eth0
+ l2_network_device: public
+ interface_model: *interface_model
+ - label: eth1
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ eth0: # Will get an IP from DHCP public-pool01
+ networks:
+ - public
+ eth1:
+ networks:
+ - private
+
+ - name: ctl01.mk20-lab-advanced.local
+ role: salt-minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 12400
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: eth0
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include tcpcloud--meta-data.yaml
+ cloudinit_user_data: !include tcpcloud--user-data.yaml
+
+ interfaces:
+ - label: eth0
+ l2_network_device: public
+ interface_model: *interface_model
+ - label: eth1
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ eth0: # Will get an IP from DHCP public-pool01
+ networks:
+ - public
+ eth1:
+ networks:
+ - private
+
+ - name: ctl02.mk20-lab-advanced.local
+ role: salt-minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 12400
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: eth0
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include tcpcloud--meta-data.yaml
+ cloudinit_user_data: !include tcpcloud--user-data.yaml
+
+ interfaces:
+ - label: eth0
+ l2_network_device: public
+ interface_model: *interface_model
+ - label: eth1
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ eth0: # Will get an IP from DHCP public-pool01
+ networks:
+ - public
+ eth1:
+ networks:
+ - private
+
+ - name: ctl03.mk20-lab-advanced.local
+ role: salt-minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 12400
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: eth0
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include tcpcloud--meta-data.yaml
+ cloudinit_user_data: !include tcpcloud--user-data.yaml
+
+ interfaces:
+ - label: eth0
+ l2_network_device: public
+ interface_model: *interface_model
+ - label: eth1
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ eth0: # Will get an IP from DHCP public-pool01
+ networks:
+ - public
+ eth1:
+ networks:
+ - private
+
+ - name: cmp01.mk20-lab-advanced.local
+ role: salt-minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 4
+ memory: !os_env SLAVE_NODE_MEMORY, 8192
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: eth0
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include tcpcloud--meta-data.yaml
+ cloudinit_user_data: !include tcpcloud--user-data.yaml
+
+ interfaces:
+ - label: eth0
+ l2_network_device: public
+ interface_model: *interface_model
+ - label: eth1
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ eth0: # Will get an IP from DHCP public-pool01
+ networks:
+ - public
+ eth1:
+ networks:
+ - private
+
+ - name: web01.mk20-lab-advanced.local
+ role: salt-minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: eth0
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include tcpcloud--meta-data.yaml
+ cloudinit_user_data: !include tcpcloud--user-data.yaml
+
+ interfaces:
+ - label: eth0
+ l2_network_device: public
+ interface_model: *interface_model
+ - label: eth1
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ eth0: # Will get an IP from DHCP public-pool01
+ networks:
+ - public
+ eth1:
+ networks:
+ - private
+
+ - name: mtr01.mk20-lab-advanced.local
+ role: salt-minion
+ params:
+ vcpu: !os_env SLAVE_NODE_CPU, 1
+ memory: !os_env SLAVE_NODE_MEMORY, 2048
+ boot:
+ - hd
+ cloud_init_volume_name: iso
+ cloud_init_iface_up: eth0
+ volumes:
+ - name: system
+ capacity: !os_env NODE_VOLUME_SIZE, 150
+ backing_store: cloudimage
+ format: qcow2
+ - name: iso # Volume with name 'iso' will be used
+ # for store image with cloud-init metadata.
+ capacity: 1
+ format: raw
+ device: cdrom
+ bus: ide
+ cloudinit_meta_data: !include tcpcloud--meta-data.yaml
+ cloudinit_user_data: !include tcpcloud--user-data.yaml
+
+ interfaces:
+ - label: eth0
+ l2_network_device: public
+ interface_model: *interface_model
+ - label: eth1
+ l2_network_device: private
+ interface_model: *interface_model
+ network_config:
+ eth0: # Will get an IP from DHCP public-pool01
+ networks:
+ - public
+ eth1:
+ networks:
+ - private
diff --git a/tcp_tests/tests/system/test_tcp_install.py b/tcp_tests/tests/system/test_tcp_install.py
new file mode 100644
index 0000000..215aeb5
--- /dev/null
+++ b/tcp_tests/tests/system/test_tcp_install.py
@@ -0,0 +1,42 @@
+# Copyright 2016 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+import pytest
+
+import base_test
+from tcp_tests import settings
+from tcp_tests.helpers import ext
+
+
+@pytest.mark.deploy
+class TestTCPInstaller(object):
+ """Test class for testing TCP deployment"""
+
+ @pytest.mark.snapshot_needed
+ @pytest.mark.revert_snapshot(ext.SNAPSHOT.underlay)
+ @pytest.mark.fail_snapshot
+ def test_tcp_install_default(self, underlay, tcp_actions, show_step):
+ """Test for deploying an tcp environment and check it
+
+ Preconditions:
+ 1. Environment with installed salt master and salt minions
+ 2. Installed TCP salt formulas on salt master
+
+ Scenario:
+ 1. Show TCP config
+ """
+
+ show_step(1)
+ tcp_actions.show_tcp_config()
diff --git a/tcp_tests/tests/unit/test_env_config_funcs.py b/tcp_tests/tests/unit/test_env_config_funcs.py
new file mode 100644
index 0000000..003c216
--- /dev/null
+++ b/tcp_tests/tests/unit/test_env_config_funcs.py
@@ -0,0 +1,141 @@
+import copy
+
+import pytest
+
+from fuel_ccp_tests.helpers import env_config as funcs
+
+# Test data for funcs.return_obj
+testdata1 = [
+ ([], {}),
+ ([0], [{}]),
+ ([1], [None, {}]),
+ ([4, 1], [None, None, None, None, [None, {}]]),
+ (
+ [3, 1, 6],
+ [None, None, None, [None, [None, None, None, None, None, None, {}]]]
+ ),
+ (
+ [-1, -3, 0],
+ [[[{}], None, None]]
+ ),
+ (
+ [-1, 1, -2],
+ [[None, [{}, None]]]
+ ),
+]
+
+# Test data for funcs.set_value_for_dict_by_keypath
+some_dict = {}
+sample1 = {'params': {'settings': {'version': 3}}}
+sample2 = copy.deepcopy(sample1)
+sample2.update({'env_name': 'mcp_test'})
+sample3 = copy.deepcopy(sample2)
+sample3.update(
+ {
+ 'groups': [
+ {
+ 'nodes': [
+ None,
+ {
+ 'volumes': [
+ {'source_image': 'some_path'}
+ ]
+ }
+ ]
+ }
+ ]
+ }
+)
+testdata2 = [
+ (some_dict, 'params.settings.version', 3, sample1),
+ (some_dict, 'env_name', 'mcp_test', sample2),
+ (
+ some_dict,
+ 'groups[0].nodes[1].volumes[0].source_image',
+ 'some_path',
+ sample3
+ )
+]
+
+# Test data for funcs.list_update
+testdata3 = [
+ ([None, None, None], [2], 'Test', [None, None, 'Test']),
+ ([None, None, None], [-1], 'Test', [None, None, 'Test']),
+ ([None, [None, [None]]], [1, 1, 0], 'Test', [None, [None, ['Test']]]),
+ ([None, [None, [None]]], [-1, 1, 0], 'Test', [None, [None, ['Test']]]),
+ ([None, [None, [None]]], [-1, -1, 0], 'Test', [None, [None, ['Test']]]),
+ ([None, [None, [None]]], [-1, -1, -1], 'Test', [None, [None, ['Test']]]),
+]
+
+sample_list = [
+ "string",
+ [
+ "sublist string",
+ ],
+ {"index": 2, "value": "dict"}
+]
+list_update_fail = [
+ (sample_list, [0, 1], "test_fail"),
+ (sample_list, [1, 1], "test_fail"),
+ (sample_list, [1, 1], "test_fail"),
+ (sample_list, [0, [2]], "test_fail"),
+ (sample_list, [0, None], "test_fail"),
+ (sample_list, ["a"], "test_fail")
+]
+
+sample_dict = {"root": {"subroot": {"list": ["Test", "value", [1]]}}}
+keypath_fail = [
+ (sample_dict, "root.subroot.list[2][1]", 3, True),
+ (sample_dict, "root.subroot.list[1][0]", 3, True),
+ (sample_dict, "root.subroot[0]", 3, True),
+ (sample_dict, "root.subroot.undefinedkey", 3, False),
+]
+
+
+@pytest.mark.parametrize("x,exp", testdata1)
+@pytest.mark.unit_tests
+@pytest.mark.return_obj
+def test_return_obj_ok(x, exp):
+ assert funcs.return_obj(x) == exp
+
+
+@pytest.mark.xfail(strict=True)
+@pytest.mark.parametrize("x", ["test_fail", [[-1]], ["test_fail"], [0, [3]]])
+@pytest.mark.unit_tests
+@pytest.mark.return_obj
+def test_return_obj_fail(x):
+ result = funcs.return_obj(x)
+ return result
+
+
+@pytest.mark.parametrize("source,keypath,value,exp", testdata2)
+@pytest.mark.unit_tests
+@pytest.mark.set_value_for_dict_by_keypath
+def test_set_value_for_dict_by_keypath_ok(source, keypath, value, exp):
+ funcs.set_value_for_dict_by_keypath(source, paths=keypath, value=value)
+ assert source == exp
+
+
+@pytest.mark.xfail(strict=True)
+@pytest.mark.parametrize("source,keypath,value,make_new", keypath_fail)
+@pytest.mark.set_value_for_dict_by_keypath
+@pytest.mark.unit_tests
+def test_set_value_for_dict_by_keypath_fail(source, keypath, value, make_new):
+ funcs.set_value_for_dict_by_keypath(source, paths=keypath, value=value,
+ new_on_missing=make_new)
+
+
+@pytest.mark.parametrize('obj,indexes,value,exp', testdata3)
+@pytest.mark.unit_tests
+@pytest.mark.list_update
+def test_list_update_ok(obj, indexes, value, exp):
+ funcs.list_update(obj, indexes, value)
+ assert obj == exp
+
+
+@pytest.mark.xfail(strict=True)
+@pytest.mark.parametrize('obj,indexes,value', list_update_fail)
+@pytest.mark.list_update
+@pytest.mark.unit_tests
+def test_list_update_fail(obj, indexes, value):
+ funcs.list_update(obj, indexes, value)
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..1ed7e5c
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,34 @@
+# Tox (http://tox.testrun.org/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+skipsdist = True
+envlist = py35,py34,py27,pypy,pep8
+
+[testenv]
+install_command = pip install --allow-external -U {opts} {packages}
+deps =
+ setuptools
+ -r{toxinidir}/tcp_tests/requirements.txt
+ -r{toxinidir}/test-requirements.txt
+usedevelop = False
+commands = py.test -s -vvv tcp_tests/tests/unit
+
+[testenv:venv]
+commands = {posargs}
+
+[testenv:pep8]
+deps = flake8
+usedevelop = False
+exclude = .venv,.git,.tox,.chache,.lib,dist,doc,*egg,build,local*
+commands =
+ flake8 {posargs:.}
+
+[flake8]
+ignore = H302,H802
+exclude = .venv,.git,.tox,dist,doc,*egg,build,local,./lib
+show-pep8 = True
+show-source = True
+count = True