Revert "Fix runtestmanager for CICD deployments"
This reverts commit 70cefed6f2de0d31eaeadff55713bec5e3d763d8.
Change-Id: Ie9087cda7102b2bfd4d0e1b5d4dacc66cc4648be
diff --git a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
index 6b9370e..f4c8765 100644
--- a/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
+++ b/jobs/pipelines/swarm-bootstrap-salt-cluster-devops.groovy
@@ -50,12 +50,6 @@
""")
}
- if (env.TCP_QA_REFS) {
- stage("Update working dir to patch ${TCP_QA_REFS}") {
- shared.update_working_dir()
- }
- }
-
stage("Create an environment ${ENV_NAME} in disabled state") {
// deploy_hardware.xml
shared.run_cmd("""\
@@ -103,7 +97,7 @@
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed: " + e.message, "red")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-cicd.groovy b/jobs/pipelines/swarm-deploy-cicd.groovy
index 538f5ea..d067e07 100644
--- a/jobs/pipelines/swarm-deploy-cicd.groovy
+++ b/jobs/pipelines/swarm-deploy-cicd.groovy
@@ -35,12 +35,6 @@
error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
}
- if (env.TCP_QA_REFS) {
- stage("Update working dir to patch ${TCP_QA_REFS}") {
- shared.update_working_dir()
- }
- }
-
// Install core and cicd
def stack
def timeout
@@ -66,7 +60,7 @@
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed: " + e.message, "red")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-deploy-platform.groovy b/jobs/pipelines/swarm-deploy-platform.groovy
index 78e363f..54bc43d 100644
--- a/jobs/pipelines/swarm-deploy-platform.groovy
+++ b/jobs/pipelines/swarm-deploy-platform.groovy
@@ -35,12 +35,6 @@
error "'STACK_INSTALL' must contain one or more comma separated stack names for [deploy_openstack] pipeline"
}
- if (env.TCP_QA_REFS) {
- stage("Update working dir to patch ${TCP_QA_REFS}") {
- shared.update_working_dir()
- }
- }
-
// Install the cluster
def stack
def timeout
@@ -66,7 +60,7 @@
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed:" + e.message, "red")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-run-pytest.groovy b/jobs/pipelines/swarm-run-pytest.groovy
index 5d7bd8d..553b8a2 100644
--- a/jobs/pipelines/swarm-run-pytest.groovy
+++ b/jobs/pipelines/swarm-run-pytest.groovy
@@ -36,16 +36,9 @@
dir("${PARENT_WORKSPACE}") {
try {
- if (env.TCP_QA_REFS) {
- stage("Update working dir to patch ${TCP_QA_REFS}") {
- shared.update_working_dir()
- }
- }
-
stage("Run tests") {
def steps = shared.get_steps_list(PASSED_STEPS)
def sources = """\
- cd ${PARENT_WORKSPACE}
export ENV_NAME=${ENV_NAME}
. ./tcp_tests/utils/env_salt"""
if (steps.contains('k8s')) {
@@ -59,7 +52,7 @@
def installed = steps.collect {"""\
export ${it}_installed=true"""}.join("\n")
- shared.run_sh(sources + installed + """
+ shared.run_cmd(sources + installed + """
export TESTS_CONFIGS=${ENV_NAME}_salt_deployed.ini
export MANAGER=devops # use 'hardware' fixture to manage fuel-devops environment
export salt_master_host=\$SALT_MASTER_IP # skip salt_deployed fixture
@@ -75,7 +68,7 @@
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed" + e.message, "red")
throw e
} finally {
// TODO(ddmitriev): analyze the "def currentResult = currentBuild.result ?: 'SUCCESS'"
diff --git a/jobs/pipelines/swarm-testrail-report.groovy b/jobs/pipelines/swarm-testrail-report.groovy
index e7ffe92..333547a 100644
--- a/jobs/pipelines/swarm-testrail-report.groovy
+++ b/jobs/pipelines/swarm-testrail-report.groovy
@@ -31,13 +31,6 @@
}
dir("${PARENT_WORKSPACE}") {
try {
-
- if (env.TCP_QA_REFS) {
- stage("Update working dir to patch ${TCP_QA_REFS}") {
- shared.update_working_dir()
- }
- }
-
def report_name = ''
def testSuiteName = ''
def methodname = ''
@@ -118,7 +111,7 @@
}
} catch (e) {
- common.printMsg("Job is failed", "red")
+ common.printMsg("Job is failed: " + e.message, "red")
throw e
} finally {
// reporting is failed for some reason
diff --git a/src/com/mirantis/system_qa/SharedPipeline.groovy b/src/com/mirantis/system_qa/SharedPipeline.groovy
index 108ee0c..e4779cd 100644
--- a/src/com/mirantis/system_qa/SharedPipeline.groovy
+++ b/src/com/mirantis/system_qa/SharedPipeline.groovy
@@ -2,19 +2,6 @@
import groovy.xml.XmlUtil
-def run_sh(String cmd) {
- // run shell script without catching any output
- def common = new com.mirantis.mk.Common()
- common.printMsg("Run shell command:\n" + cmd, "blue")
- def VENV_PATH='/home/jenkins/fuel-devops30'
- script = """\
- set -ex;
- . ${VENV_PATH}/bin/activate;
- bash -c '${cmd.stripIndent()}'
- """
- return sh(script: script)
-}
-
def run_cmd(String cmd, Boolean returnStdout=false) {
def common = new com.mirantis.mk.Common()
common.printMsg("Run shell command:\n" + cmd, "blue")
@@ -128,17 +115,6 @@
""")
}
-def update_working_dir() {
- // Use to fetch a patchset from gerrit to the working dir
- run_cmd("""\
- if [ -n "$TCP_QA_REFS" ]; then
- set -e
- git fetch https://review.gerrithub.io/Mirantis/tcp-qa $TCP_QA_REFS && git checkout FETCH_HEAD || exit \$?
- fi
- pip install -r tcp_tests/requirements.txt
- """)
-}
-
def swarm_bootstrap_salt_cluster_devops() {
def common = new com.mirantis.mk.Common()
def cookiecutter_template_commit = env.COOKIECUTTER_TEMPLATE_COMMIT ?: env.MCP_VERSION
diff --git a/tcp_tests/fixtures/runtest_fixtures.py b/tcp_tests/fixtures/runtest_fixtures.py
index 3e1a45b..5a43b42 100644
--- a/tcp_tests/fixtures/runtest_fixtures.py
+++ b/tcp_tests/fixtures/runtest_fixtures.py
@@ -18,7 +18,7 @@
@pytest.fixture(scope='function')
-def tempest_actions(config, underlay_actions, salt_actions):
+def tempest_actions(underlay_actions, salt_actions):
"""
Run tempest tests
"""
@@ -28,7 +28,6 @@
domain_name = settings.DOMAIN_NAME
target = settings.TEMPEST_TARGET
runtest = RuntestManager(
- config,
underlay_actions, salt_actions,
cluster_name=cluster_name,
domain_name=domain_name,
diff --git a/tcp_tests/helpers/ext.py b/tcp_tests/helpers/ext.py
index 7abc53e..19bdf08 100644
--- a/tcp_tests/helpers/ext.py
+++ b/tcp_tests/helpers/ext.py
@@ -34,11 +34,8 @@
NETWORK_TYPE = enum(
- 'admin',
- 'control',
- 'tenant',
- 'storage',
- 'external',
+ 'private',
+ 'admin'
)
diff --git a/tcp_tests/managers/envmanager_devops.py b/tcp_tests/managers/envmanager_devops.py
index bd4024c..7424a49 100644
--- a/tcp_tests/managers/envmanager_devops.py
+++ b/tcp_tests/managers/envmanager_devops.py
@@ -71,7 +71,6 @@
self._create_environment()
self.set_dns_config()
self.set_address_pools_config()
- self.set_dhcp_ranges_config()
@property
def _devops_config(self):
@@ -523,14 +522,3 @@
"""Store address pools CIDRs in config object"""
for ap in self.__env.get_address_pools():
self.__config.underlay.address_pools[ap.name] = ap.net
-
- def set_dhcp_ranges_config(self):
- """Store DHCP ranges in config object"""
- for ap in self.__env.get_address_pools():
- if "gateway" in ap.ip_reserved and "dhcp" in ap.ip_ranges:
- self.__config.underlay.dhcp_ranges[ap.name] = {
- "cidr": ap.net,
- "start": ap.ip_range_start("dhcp"),
- "end": ap.ip_range_end("dhcp"),
- "gateway": ap.gateway,
- }
diff --git a/tcp_tests/managers/execute_commands.py b/tcp_tests/managers/execute_commands.py
index 193153c..adb76dc 100644
--- a/tcp_tests/managers/execute_commands.py
+++ b/tcp_tests/managers/execute_commands.py
@@ -87,7 +87,6 @@
retry_count = retry.get('count', 1)
retry_delay = retry.get('delay', 1)
skip_fail = step.get('skip_fail', False)
- timeout = step.get('timeout', None)
with self.__underlay.remote(node_name=node_name) as remote:
@@ -102,7 +101,7 @@
LOG.info("\n\n{0}\n{1}".format(
msg + retry_msg, '=' * len(msg + retry_msg)))
- result = remote.execute(cmd, timeout=timeout, verbose=True)
+ result = remote.execute(cmd, verbose=True)
if return_res:
return result
@@ -149,7 +148,6 @@
retry_count = retry.get('count', 1)
retry_delay = retry.get('delay', 1)
skip_fail = step.get('skip_fail', False)
- timeout = step.get('timeout', None)
if not bool(state) ^ bool(states):
raise ValueError("You should use state or states in step")
@@ -167,7 +165,7 @@
method = getattr(self._salt, self._salt._map[do])
command_ret = method(tgt=target, state=state or states,
- args=args, kwargs=kwargs, timeout=timeout)
+ args=args, kwargs=kwargs)
command_ret = command_ret if \
isinstance(command_ret, list) else [command_ret]
results = [(r['return'][0], f) for r, f in command_ret]
diff --git a/tcp_tests/managers/rallymanager.py b/tcp_tests/managers/rallymanager.py
index 589e1ee..5b7fd4c 100644
--- a/tcp_tests/managers/rallymanager.py
+++ b/tcp_tests/managers/rallymanager.py
@@ -97,7 +97,7 @@
" docker pull {image}:{version}".format(image=image,
version=version))
self._underlay.check_call(cmd, node_name=self._node_name)
- except Exception:
+ except Exception as e:
LOG.debug('Cannot install docker-ce')
cmd = ("apt-get -y install docker.io &&"
" docker pull {image}:{version}".format(image=image,
diff --git a/tcp_tests/managers/runtestmanager.py b/tcp_tests/managers/runtestmanager.py
index 036f415..c400556 100644
--- a/tcp_tests/managers/runtestmanager.py
+++ b/tcp_tests/managers/runtestmanager.py
@@ -14,6 +14,9 @@
import json
import os
+import time
+
+from devops.helpers import helpers
from tcp_tests import logger
from tcp_tests import settings
@@ -22,6 +25,71 @@
TEMPEST_CFG_DIR = '/tmp/test'
+CONFIG = {
+ 'classes': ['service.runtest.tempest',
+ 'service.runtest.tempest.services.manila.glance'],
+ 'parameters': {
+ '_param': {
+ 'runtest_tempest_cfg_dir': TEMPEST_CFG_DIR,
+ 'runtest_tempest_cfg_name': 'tempest.conf',
+ 'runtest_tempest_public_net': 'net04_ext',
+ 'tempest_test_target': 'gtw01*'
+ },
+ 'neutron': {
+ 'client': {
+ 'enabled': True
+ }
+ },
+ 'runtest': {
+ 'enabled': True,
+ 'keystonerc_node': 'ctl01*',
+ 'tempest': {
+ 'enabled': True,
+ 'cfg_dir': '${_param:runtest_tempest_cfg_dir}',
+ 'cfg_name': '${_param:runtest_tempest_cfg_name}',
+ 'DEFAULT': {
+ 'log_file': 'tempest.log'
+ },
+ 'compute': {
+ 'build_timeout': 600,
+ 'max_microversion': 2.53,
+ 'min_compute_nodes': 2,
+ 'min_microversion': 2.1,
+ 'volume_device_name': 'vdc'
+ },
+ 'convert_to_uuid': {
+ 'network': {
+ 'public_network_id':
+ '${_param:runtest_tempest_public_net}'
+ }
+ },
+ 'dns_feature_enabled': {
+ 'api_admin': False,
+ 'api_v1': False,
+ 'api_v2': True,
+ 'api_v2_quotas': True,
+ 'api_v2_root_recordsets': True,
+ 'bug_1573141_fixed': True
+ },
+ 'heat_plugin': {
+ 'floating_network_name':
+ '${_param:runtest_tempest_public_net}'
+ },
+ 'network': {
+ 'floating_network_name':
+ '${_param:runtest_tempest_public_net}'
+ },
+ 'share': {
+ 'capability_snapshot_support': True,
+ 'run_driver_assisted_migration_tests': False,
+ 'run_manage_unmanage_snapshot_tests': False,
+ 'run_manage_unmanage_tests': False,
+ 'run_migration_with_preserve_snapshots_tests': False,
+ 'run_quota_tests': True,
+ 'run_replication_tests': False,
+ 'run_snapshot_tests': True,
+ }}}}}
+
class RuntestManager(object):
"""Helper manager for execution tempest via runtest-formula"""
@@ -30,117 +98,60 @@
image_version = settings.TEMPEST_IMAGE_VERSION
container_name = 'run-tempest-ci'
master_host = "cfg01"
- control_host = "ctl01"
+ master_tgt = "{}*".format(master_host)
class_name = "runtest"
run_cmd = '/bin/bash -c "run-tempest"'
- def __init__(self, config, underlay, salt_api, cluster_name,
+ def __init__(self, underlay, salt_api, cluster_name,
domain_name, tempest_threads,
tempest_pattern=settings.TEMPEST_PATTERN,
run_cmd=None, target='gtw01'):
- self.__config = config
self.underlay = underlay
self.__salt_api = salt_api
+ self.target = target
self.cluster_name = cluster_name
self.domain_name = domain_name
self.tempest_threads = tempest_threads
self.tempest_pattern = tempest_pattern
self.run_cmd = run_cmd or self.run_cmd
- self.target_name = self.underlay.get_target_node_names(target)[0]
- self.master_name = self.underlay.get_target_node_names(
- self.master_host)[0]
- self.control_name = self.underlay.get_target_node_names(
- self.control_host)[0]
@property
def salt_api(self):
return self.__salt_api
- @property
- def runtest_pillar(self):
- public_net = self.__config.underlay.dhcp_ranges[
- settings.EXTERNAL_ADDRESS_POOL_NAME]
- public_gateway = public_net["gateway"].encode("ascii")
- public_cidr = public_net["cidr"].encode("ascii")
- public_allocation_start = public_net["start"].encode("ascii")
- public_allocation_end = public_net["end"].encode("ascii")
+ def install_python_lib(self):
+ return self.salt_api.local(
+ "{}*".format(self.target),
+ 'pip.install', 'docker'), None
- return {
- 'classes': ['service.runtest.tempest',
- 'service.runtest.tempest.public_net',
- 'service.runtest.tempest.services.manila.glance'],
- 'parameters': {
- '_param': {
- 'runtest_tempest_cfg_dir': TEMPEST_CFG_DIR,
- 'runtest_tempest_cfg_name': 'tempest.conf',
- 'runtest_tempest_public_net': 'public',
- 'openstack_public_neutron_subnet_gateway': public_gateway,
- 'openstack_public_neutron_subnet_cidr': public_cidr,
- 'openstack_public_neutron_subnet_allocation_start':
- public_allocation_start,
- 'openstack_public_neutron_subnet_allocation_end':
- public_allocation_end,
- 'tempest_test_target': self.target_name.encode("ascii"),
- },
- 'neutron': {
- 'client': {
- 'enabled': True
- }
- },
- 'runtest': {
- 'enabled': True,
- 'keystonerc_node': 'ctl01*',
- 'tempest': {
- 'enabled': True,
- 'cfg_dir': '${_param:runtest_tempest_cfg_dir}',
- 'cfg_name': '${_param:runtest_tempest_cfg_name}',
- 'DEFAULT': {
- 'log_file': 'tempest.log'
- },
- 'compute': {
- 'build_timeout': 600,
- 'max_microversion': 2.53,
- 'min_compute_nodes': 2,
- 'min_microversion': 2.1,
- 'volume_device_name': 'vdc'
- },
- 'convert_to_uuid': {
- 'network': {
- 'public_network_id':
- '${_param:runtest_tempest_public_net}'
- }
- },
- 'dns_feature_enabled': {
- 'api_admin': False,
- 'api_v1': False,
- 'api_v2': True,
- 'api_v2_quotas': True,
- 'api_v2_root_recordsets': True,
- 'bug_1573141_fixed': True
- },
- 'heat_plugin': {
- 'floating_network_name':
- '${_param:runtest_tempest_public_net}'
- },
- 'network': {
- 'floating_network_name':
- '${_param:runtest_tempest_public_net}'
- },
- 'share': {
- 'capability_snapshot_support': True,
- 'run_driver_assisted_migration_tests': False,
- 'run_manage_unmanage_snapshot_tests': False,
- 'run_manage_unmanage_tests': False,
- 'run_migration_with_preserve_snapshots_tests':
- False,
- 'run_quota_tests': True,
- 'run_replication_tests': False,
- 'run_snapshot_tests': True,
- }}}}}
+ def run_salt_minion_state(self):
+ return self.salt_api.local('cfg01*', 'state.sls', 'salt.minion')
+
+ def create_networks(self):
+ return self.salt_api.local('cfg01*', 'state.sls', 'neutron.client')
+
+ def create_flavors(self):
+ return self.salt_api.local('cfg01*', 'state.sls', 'nova.client')
+
+ def set_property(self):
+ return self.salt_api.local(
+ tgt='ctl01*',
+ fun='cmd.run',
+ args='. /root/keystonercv3; openstack '
+ 'flavor set m1.tiny_test '
+ '--property hw:mem_page_size=small')
+
+ def create_cirros(self):
+ return self.salt_api.local('cfg01*', 'state.sls', 'glance.client')
+
+ def generate_config(self):
+ return self.salt_api.local('cfg01*', 'state.sls', 'runtest')
def fetch_arficats(self, username=None, file_format='xml'):
- with self.underlay.remote(node_name=self.target_name,
- username=None) as tgt:
+ target_name = next(node_name for node_name
+ in self.underlay.node_names() if
+ self.target in node_name)
+ with self.underlay.remote(node_name=target_name, username=None) as tgt:
result = tgt.execute('find {} -name "report_*.{}"'.format(
TEMPEST_CFG_DIR, file_format))
LOG.debug("Find result {0}".format(result))
@@ -152,38 +163,42 @@
destination=report, # noqa
target=os.getcwd())
- def store_runtest_model(self, runtest_pillar=None):
+ def store_runtest_model(self, config=CONFIG):
+ master_name = next(node_name for node_name
+ in self.underlay.node_names() if
+ self.master_host in node_name)
with self.underlay.yaml_editor(
file_path="/srv/salt/reclass/classes/cluster/"
"{cluster_name}/infra/"
"{class_name}.yml".format(
cluster_name=self.cluster_name,
class_name=self.class_name),
- node_name=self.master_name) as editor:
- editor.content = runtest_pillar or self.runtest_pillar
+ node_name=master_name) as editor:
+ editor.content = config
with self.underlay.yaml_editor(
file_path="/srv/salt/reclass/nodes/_generated/"
"cfg01.{domain_name}.yml".format(
domain_name=self.domain_name),
- node_name=self.master_name) as editor:
+ node_name=master_name) as editor:
editor.content['classes'].append(
'cluster.{cluster_name}.infra.{class_name}'.format(
cluster_name=self.cluster_name,
class_name=self.class_name))
+ self.salt_api.local('*', 'saltutil.refresh_pillar')
+ self.salt_api.local('*', 'saltutil.sync_all')
+
def save_runtime_logs(self, logs=None, inspect=None):
if logs:
with open("{path}/{target}_tempest_run.log".format(
- path=settings.LOGS_DIR,
- target=self.target_name), 'w') as f:
+ path=settings.LOGS_DIR, target=self.target), 'w') as f:
LOG.info("Save tempest console log")
container_log = logs
f.write(container_log.encode('ascii', 'ignore'))
if inspect:
with open("{path}/{target}_tempest_container_info.json.log".format(
- path=settings.LOGS_DIR,
- target=self.target_name), 'w') as f:
+ path=settings.LOGS_DIR, target=self.target), 'w') as f:
LOG.info("Save tempest container inspect data")
container_inspect = json.dumps(inspect,
@@ -193,123 +208,104 @@
def prepare(self, dpdk=None):
self.store_runtest_model()
- salt_cmd = "salt -l info --hard-crash --state-output=mixed "
- salt_call_cmd = "salt-call -l info --hard-crash --state-output=mixed "
- commands = [
- {
- 'description': "Sync salt objects for runtest model",
- 'node_name': self.master_name,
- 'cmd': ("set -ex;" +
- salt_cmd + "'*' saltutil.refresh_pillar && " +
- salt_cmd + "'*' saltutil.sync_all")},
- {
- 'description': ("Install docker.io package and "
- "enable packets forwarding"),
- 'node_name': self.target_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " pkg.install docker.io && " +
- " iptables --policy FORWARD ACCEPT")},
- {
- 'description': "Install PyPI docker package",
- 'node_name': self.target_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " pip.install setuptools && " +
- salt_call_cmd + " pip.install docker")},
- {
- 'description': "Run salt.minion state for runtest formula",
- 'node_name': self.master_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " state.sls salt.minion && "
- " sleep 20")},
- {
- 'description': "Enforce keystone state for neutronv2",
- 'node_name': self.master_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " state.sls keystone.client")},
- {
- 'description': "Create networks for Tempest tests",
- 'node_name': self.master_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " state.sls neutron.client")},
- {
- 'description': "Create flavors for Tempest tests",
- 'node_name': self.master_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " state.sls nova.client")},
- {
- 'description': "Create cirros image for Tempest",
- 'node_name': self.master_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " state.sls glance.client")},
- {
- 'description': "Generate config for Tempest",
- 'node_name': self.master_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " state.sls runtest")},
- ]
+ res = self.install_python_lib()
+ LOG.info(json.dumps(res, indent=4))
+ res = self.run_salt_minion_state()
+ LOG.info(json.dumps(res, indent=4))
+ time.sleep(20)
+
+ res = self.create_networks()
+ LOG.info(json.dumps(res, indent=4))
+ time.sleep(20)
+
+ res = self.create_flavors()
+ LOG.info(json.dumps(res, indent=4))
+ time.sleep(20)
if dpdk:
- commands.append({
- 'description': "Configure flavor for DPDK",
- 'node_name': self.control_name,
- 'cmd': ("set -ex;" +
- salt_call_cmd + " cmd.run "
- " '. /root/keystonercv3;"
- " openstack flavor set m1.tiny_test"
- " --property hw:mem_page_size=small'")},
- )
+ res = self.set_property()
+ LOG.info('Update flavor property')
+ LOG.info(json.dumps(res, indent=4))
+ time.sleep(20)
- self.__salt_api.execute_commands(commands=commands,
- label="Prepare for Tempest")
+ res = self.create_cirros()
+ LOG.info(json.dumps(res, indent=4))
+ time.sleep(20)
+
+ res = self.generate_config()
+ LOG.info(json.dumps(res, indent=4))
+ time.sleep(20)
def run_tempest(self, timeout=600):
- tgt = self.target_name
- image_nameversion = "{}:{}".format(self.image_name, self.image_version)
+ tgt = "{}*".format(self.target)
+ params = {
+ "name": self.container_name,
+ "image": "{}:{}".format(self.image_name, self.image_version),
+ "environment": {
+ "ARGS": "-r {tempest_pattern} -w "
+ "{tempest_threads} ".format(
+ tempest_pattern=self.tempest_pattern,
+ tempest_threads=self.tempest_threads) # noqa
+ },
+ "binds": [
+ "{cfg_dir}/tempest.conf:/etc/tempest/tempest.conf".format(cfg_dir=TEMPEST_CFG_DIR), # noqa
+ "/tmp/:/tmp/",
+ "{cfg_dir}:/root/tempest".format(cfg_dir=TEMPEST_CFG_DIR),
+ "/etc/ssl/certs/:/etc/ssl/certs/"
+ ],
+ "auto_remove": False,
+ "cmd": self.run_cmd
+ }
- docker_args = (
- " --name {container_name} "
- " -e ARGS=\"-r {tempest_pattern} -w {tempest_threads}\""
- " -v {cfg_dir}/tempest.conf:/etc/tempest/tempest.conf"
- " -v /tmp/:/tmp/"
- " -v {cfg_dir}:/root/tempest"
- " -v /etc/ssl/certs/:/etc/ssl/certs/"
- " --rm"
- " {image_nameversion} {run_cmd}"
- .format(
- container_name=self.container_name,
- image_nameversion=image_nameversion,
- tempest_pattern=self.tempest_pattern,
- tempest_threads=self.tempest_threads,
- cfg_dir=TEMPEST_CFG_DIR,
- run_cmd=self.run_cmd,
- ))
+ res = self.salt_api.local(tgt, 'dockerng.pull', "{}:{}".format(
+ self.image_name, self.image_version))
+ LOG.info("Tempest image has beed pulled- \n{}".format(
+ json.dumps(res, indent=4)))
- commands = [
- {
- 'description': "Run Tempest tests {0}".format(
- image_nameversion),
- 'node_name': self.target_name,
- 'cmd': ("set -ex;" +
- " docker rm --force {container_name} || true;"
- " docker run {docker_args}"
- .format(container_name=self.container_name,
- docker_args=docker_args)),
- 'timeout': timeout},
- ]
+ res = self.salt_api.local(tgt, 'dockerng.create', kwargs=params)
+ LOG.info("Tempest container has been created - \n{}".format(
+ json.dumps(res, indent=4)))
- self.__salt_api.execute_commands(commands=commands,
- label="Run Tempest tests")
+ res = self.salt_api.local(tgt, 'dockerng.start', self.container_name)
+ LOG.info("Tempest container has been started - \n{}".format(
+ json.dumps(res, indent=4)))
+
+ def wait_status(s):
+ inspect_res = self.salt_api.local(tgt,
+ 'dockerng.inspect',
+ self.container_name)
+ if 'return' in inspect_res:
+ inspect = inspect_res['return']
+ inspect = inspect[0]
+ inspect = next(inspect.iteritems())[1]
+ status = inspect['State']['Status']
+
+ return status.lower() == s.lower()
+
+ return False
+
+ helpers.wait(lambda: wait_status('exited'),
+ timeout=timeout,
+ timeout_msg=('Tempest run didnt finished '
+ 'in {}'.format(timeout)))
inspect_res = self.salt_api.local(tgt,
'dockerng.inspect',
self.container_name)
inspect = inspect_res['return'][0]
inspect = next(inspect.iteritems())[1]
+ if inspect['State']['ExitCode'] != 0:
+ LOG.error("Tempest running failed")
+ LOG.info("Tempest tests have been finished - \n{}".format(
+ json.dumps(res, indent=4)))
+
logs_res = self.salt_api.local(tgt,
'dockerng.logs',
self.container_name)
logs = logs_res['return'][0]
logs = next(logs.iteritems())[1]
+ LOG.info("Tempest result - \n{}".format(
+ logs.encode('ascii', 'ignore')))
res = self.salt_api.local(tgt, 'dockerng.rm', self.container_name)
LOG.info("Tempest container was removed".format(
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 6fad0e4..b5d5f04 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -104,12 +104,11 @@
self.__session_start = login()
return self.__api
- def local(self, tgt, fun, args=None, kwargs=None, timeout=None):
- return self.api.local(tgt, fun, args, kwargs, timeout=timeout,
- expr_form='compound')
+ def local(self, tgt, fun, args=None, kwargs=None):
+ return self.api.local(tgt, fun, args, kwargs, expr_form='compound')
- def local_async(self, tgt, fun, args=None, kwargs=None, timeout=None):
- return self.api.local_async(tgt, fun, args, kwargs, timeout=timeout)
+ def local_async(self, tgt, fun, args=None, kwargs=None):
+ return self.api.local_async(tgt, fun, args, kwargs)
def lookup_result(self, jid):
return self.api.lookup_jid(jid)
@@ -139,27 +138,25 @@
return fails if fails else None
- def enforce_state(self, tgt, state, args=None, kwargs=None, timeout=None):
- r = self.local(tgt=tgt, fun='state.sls', args=state, timeout=timeout)
+ def enforce_state(self, tgt, state, args=None, kwargs=None):
+ r = self.local(tgt=tgt, fun='state.sls', args=state)
f = self.check_result(r)
return r, f
- def enforce_states(self, tgt, state, args=None, kwargs=None, timeout=None):
+ def enforce_states(self, tgt, state, args=None, kwargs=None):
rets = []
for s in state:
- r = self.enforce_state(tgt=tgt, state=s, timeout=timeout)
+ r = self.enforce_state(tgt=tgt, state=s)
rets.append(r)
return rets
- def run_state(self, tgt, state, args=None, kwargs=None, timeout=None):
- return self.local(tgt=tgt, fun=state, args=args, kwargs=kwargs,
- timeout=timeout), None
+ def run_state(self, tgt, state, args=None, kwargs=None):
+ return self.local(tgt=tgt, fun=state, args=args, kwargs=kwargs), None
- def run_states(self, tgt, state, args=None, kwargs=None, timeout=None):
+ def run_states(self, tgt, state, args=None, kwargs=None):
rets = []
for s in state:
- r = self.run_state(tgt=tgt, state=s, args=args, kwargs=kwargs,
- timeout=timeout)
+ r = self.run_state(tgt=tgt, state=s, args=args, kwargs=kwargs)
rets.append(r)
return rets
diff --git a/tcp_tests/settings.py b/tcp_tests/settings.py
index 9ebdf22..0d79cc4 100644
--- a/tcp_tests/settings.py
+++ b/tcp_tests/settings.py
@@ -85,6 +85,3 @@
SL_TEST_REPO = os.environ.get('SL_TEST_REPO',
'https://github.com/Mirantis/stacklight-pytest')
SL_TEST_COMMIT = os.environ.get('SL_TEST_COMMIT', 'master')
-
-EXTERNAL_ADDRESS_POOL_NAME = os.environ.get('EXTERNAL_ADDRESS_POOL_NAME',
- 'external-pool01')
diff --git a/tcp_tests/settings_oslo.py b/tcp_tests/settings_oslo.py
index 009eb24..037dbd8 100644
--- a/tcp_tests/settings_oslo.py
+++ b/tcp_tests/settings_oslo.py
@@ -123,22 +123,6 @@
tests or during the deployment process.
{'pool_name1': '<cidr>', 'pool_name2': '<cidr>', ...}""",
default={}),
- ct.Cfg('dhcp_ranges', ct.JSONDict(),
- help="""DHCP ranges allocated for the address pools.
- This is extended object comparing to 'address_pools'.
- May be used to determine DHCP range start/end/gateway for a
- specific network from tests or during the deployment
- process.
- {'pool_name1': {'cidr': 'n.n.n.n/m',
- 'start': 'x.x.x.x',
- 'end': 'y.y.y.y',
- 'gateway': 'z.z.z.z'},
- 'pool_name2': {'cidr': 'n.n.n.n/m',
- 'start': x.x.x.x,
- 'end': 'y.y.y.y',
- 'gateway': 'z.z.z.z'},
- ...}""",
- default={}),
ct.Cfg('ssh_keys', ct.JSONList(), default=[],
help="SSH key pair(s) for root. If the option is left empty, "
"then a key pair will be generated automatically"),
diff --git a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
index 084a922..612299f 100644
--- a/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-dpdk-pipeline/underlay.yaml
@@ -73,9 +73,8 @@
net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.128/26:26') }}
params:
ip_reserved:
- gateway: '172.17.42.129'
- ip_ranges:
- dhcp: ['172.17.42.130', '172.17.42.180']
+ gateway: +1
+ l2_network_device: -2
groups:
diff --git a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
index a7308e9..25c98bc 100644
--- a/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-dvr-vxlan/underlay.yaml
@@ -73,9 +73,8 @@
net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.0/26:26') }}
params:
ip_reserved:
- gateway: '172.17.42.1'
- ip_ranges:
- dhcp: ['172.17.42.10', '172.17.42.60']
+ gateway: +1
+ l2_network_device: -2
groups:
diff --git a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
index 15e22ba..23eb24c 100644
--- a/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
+++ b/tcp_tests/templates/cookied-bm-mcp-ovs-dpdk/underlay.yaml
@@ -73,9 +73,9 @@
net: {{ os_env('EXTERNAL_ADDRESS_POOL01', '172.17.42.128/26:26') }}
params:
ip_reserved:
- gateway: '172.17.42.129'
- ip_ranges:
- dhcp: ['172.17.42.130', '172.17.42.180']
+ gateway: +1
+ l2_network_device: -2
+
groups:
- name: virtual
diff --git a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
index 94199a0..a964d2b 100644
--- a/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
+++ b/tcp_tests/templates/cookied-cicd-pike-dvr-sl/underlay.yaml
@@ -161,7 +161,8 @@
default_{{ HOSTNAME_GTW01 }}: +110
default_{{ HOSTNAME_PRX01 }}: +121
ip_ranges:
- dhcp: [+180, +220]
+ dhcp: [+10, -10]
+
groups:
- name: default
diff --git a/tcp_tests/tests/unit/test_yaml_templates.py b/tcp_tests/tests/unit/test_yaml_templates.py
index a9f10fb..54eb5b3 100644
--- a/tcp_tests/tests/unit/test_yaml_templates.py
+++ b/tcp_tests/tests/unit/test_yaml_templates.py
@@ -29,16 +29,6 @@
"tenant-pool01": "10.80.0.0/24",
"external-pool01": "10.90.0.0/24"
}
-config.underlay.dhcp_ranges = {
- "admin-pool01": {"cidr": "10.70.0.0/24",
- "start": "10.70.0.10",
- "end": "10.70.0.200",
- "gateway": "10.70.0.1"},
- "external-pool01": {"cidr": "10.90.0.0/24",
- "start": "10.90.0.10",
- "end": "10.90.0.200",
- "gateway": "10.90.0.1"},
-}
config.underlay.ssh_keys = [
{"public": "AAAARRRGGHHHhh", "private": "--- BLABLA-KEY ---"}
]
diff --git a/tcp_tests/utils/get_jenkins_job_stages.py b/tcp_tests/utils/get_jenkins_job_stages.py
index 361b8d1..143e1a2 100755
--- a/tcp_tests/utils/get_jenkins_job_stages.py
+++ b/tcp_tests/utils/get_jenkins_job_stages.py
@@ -15,7 +15,6 @@
import argparse
import os
import sys
-import time
sys.path.append(os.getcwd())
try:
@@ -108,17 +107,8 @@
for line in log["text"].splitlines()))
return res
- for _ in range(3):
- wf = jenkins.get_workflow(opts.job_name, opts.build_number)
- info = jenkins.build_info(opts.job_name, int(wf['id']))
- if info is not None:
- break
- time.sleep(3)
-
- if not info:
- raise("Cannot get info for the job {0}:{1}".format(opts.job_name,
- opts.build_number))
-
+ wf = jenkins.get_workflow(opts.job_name, opts.build_number)
+ info = jenkins.build_info(opts.job_name, int(wf['id']))
build_description = ("[" + info['fullDisplayName'] + "] " +
info['url'] + " : " + info['result'])
stages = get_stages(wf['stages'], 0)