add rally to the run
diff --git a/tcp_tests/managers/common_services_manager.py b/tcp_tests/managers/common_services_manager.py
index add159f..7735f9a 100644
--- a/tcp_tests/managers/common_services_manager.py
+++ b/tcp_tests/managers/common_services_manager.py
@@ -24,5 +24,6 @@
super(CommonServicesManager, self).__init__()
def install(self, commands):
- self.__underlay.execute_commands(commands)
+ self.__underlay.execute_commands(commands,
+ label='Install common services')
self.__config.common_services.installed = True
diff --git a/tcp_tests/managers/openstack_manager.py b/tcp_tests/managers/openstack_manager.py
index 9727ac4..785f100 100644
--- a/tcp_tests/managers/openstack_manager.py
+++ b/tcp_tests/managers/openstack_manager.py
@@ -24,5 +24,6 @@
super(OpenstackManager, self).__init__()
def install(self, commands):
- self.__underlay.execute_commands(commands)
+ self.__underlay.execute_commands(commands=commands,
+ label="Install OpenStack services")
self.__config.openstack.installed = True
diff --git a/tcp_tests/managers/rallymanager.py b/tcp_tests/managers/rallymanager.py
index bd937d5..af3e68d 100644
--- a/tcp_tests/managers/rallymanager.py
+++ b/tcp_tests/managers/rallymanager.py
@@ -11,6 +11,10 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
+import json
+
+from junit_xml import TestSuite, TestCase
from tcp_tests import logger
from tcp_tests import settings
@@ -25,9 +29,9 @@
image_name = 'rallyforge/rally'
image_version = '0.5.0'
- def __init__(self, underlay, admin_node_name):
+ def __init__(self, underlay, admin_host):
super(RallyManager, self).__init__()
- self._admin_node_name = admin_node_name
+ self._admin_host = admin_host
self._underlay = underlay
def prepare(self):
@@ -40,17 +44,13 @@
rally verify genconfig
rally verify showconfig"""
cmd = "cat > {path} << EOF\n{content}\nEOF".format(
- path='/home/{user}/rally/install_tempest.sh'.format(
- user=settings.SSH_LOGIN), content=content)
- cmd1 = "chmod +x /home/{user}/rally/install_tempest.sh".format(
- user=settings.SSH_LOGIN)
- cmd2 = "cp /home/{user}/openrc-* /home/{user}/rally/openrc".format(
- user=settings.SSH_LOGIN)
+ path='/root/rally/install_tempest.sh', content=content)
+ cmd1 = "chmod +x /root/rally/install_tempest.sh"
+ cmd2 = "scp ctl01:/root/keystonerc /root/rally/openrc"
- with self._underlay.remote(node_name=self._admin_node_name) as remote:
+ with self._underlay.remote(host=self._admin_host) as remote:
LOG.info("Create rally workdir")
- remote.check_call('mkdir -p /home/{user}/rally'.format(
- user=settings.SSH_LOGIN))
+ remote.check_call('mkdir -p /root/rally')
LOG.info("Create install_tempest.sh")
remote.check_call(cmd)
LOG.info("Chmod +x install_tempest.sh")
@@ -61,14 +61,15 @@
def pull_image(self, version=None):
version = version or self.image_version
image = self.image_name
- cmd = "docker pull {image}:{version}".format(image=image,
- version=version)
- with self._underlay.remote(node_name=self._admin_node_name) as remote:
+ cmd = ("apt-get -y install docker.io &&"
+ " docker pull {image}:{version}".format(image=image,
+ version=version))
+ with self._underlay.remote(host=self._admin_host) as remote:
LOG.info("Pull {image}:{version}".format(image=image,
version=version))
remote.check_call(cmd)
- with self._underlay.remote(node_name=self._admin_node_name) as remote:
+ with self._underlay.remote(host=self._admin_host) as remote:
LOG.info("Getting image id")
cmd = "docker images | grep 0.5.0| awk '{print $3}'"
res = remote.check_call(cmd)
@@ -76,10 +77,9 @@
LOG.info("Image ID is {}".format(self.image_id))
def run(self):
- with self._underlay.remote(node_name=self._admin_node_name) as remote:
- cmd = ("docker run --net host -v /home/{user}/rally:/home/rally "
- "-tid -u root {image_id}".format(
- user=settings.SSH_LOGIN, image_id=self.image_id))
+ with self._underlay.remote(host=self._admin_host) as remote:
+ cmd = ("docker run --net host -v /root/rally:/home/rally "
+ "-tid -u root {image_id}".format(image_id=self.image_id))
LOG.info("Run Rally container")
remote.check_call(cmd)
@@ -92,27 +92,80 @@
LOG.info("Container ID is {}".format(self.docker_id))
def run_tempest(self, test=''):
- docker_exec = ('source /home/{user}/rally/openrc; '
+ docker_exec = ('source /root/rally/openrc; '
'docker exec -i {docker_id} bash -c "{cmd}"')
commands = [
docker_exec.format(cmd="./install_tempest.sh",
- user=settings.SSH_LOGIN,
docker_id=self.docker_id),
docker_exec.format(
cmd="source /home/rally/openrc && "
"rally verify start {test}".format(test=test),
- user=settings.SSH_LOGIN,
docker_id=self.docker_id),
docker_exec.format(
cmd="rally verify results --json --output-file result.json",
- user=settings.SSH_LOGIN,
docker_id=self.docker_id),
docker_exec.format(
cmd="rally verify results --html --output-file result.html",
- user=settings.SSH_LOGIN,
docker_id=self.docker_id),
]
- with self._underlay.remote(node_name=self._admin_node_name) as remote:
+ with self._underlay.remote(host=self._admin_host) as remote:
LOG.info("Run tempest inside Rally container")
for cmd in commands:
remote.check_call(cmd, verbose=True)
+
+ def get_results(self, store=True, store_file='tempest.xml'):
+ LOG.info('Storing tests results...')
+ res_file_name = 'result.json'
+ file_prefix = 'results_' + datetime.datetime.now().strftime(
+ '%Y%m%d_%H%M%S') + '_'
+ file_dst = '{0}/logs/{1}{2}'.format(
+ settings.LOGS_DIR, file_prefix, res_file_name)
+ with self._underlay.remote(host=self._admin_host) as remote:
+ remote.download(
+ '/root/rally/{1}'.format(res_file_name),
+ file_dst)
+ res = json.load(remote.open('/root/rally/result.json'))
+ if not store:
+ return res
+
+ formatted_tc = []
+ failed_cases = [res['test_cases'][case]
+ for case in res['test_cases']
+ if res['test_cases'][case]['status']
+ in 'fail']
+ for case in failed_cases:
+ if case:
+ tc = TestCase(case['name'])
+ tc.add_failure_info(case['traceback'])
+ formatted_tc.append(tc)
+
+ skipped_cases = [res['test_cases'][case]
+ for case in res['test_cases']
+ if res['test_cases'][case]['status'] in 'skip']
+ for case in skipped_cases:
+ if case:
+ tc = TestCase(case['name'])
+ tc.add_skipped_info(case['reason'])
+ formatted_tc.append(tc)
+
+ error_cases = [res['test_cases'][case] for case in res['test_cases']
+ if res['test_cases'][case]['status'] in 'error']
+
+ for case in error_cases:
+ if case:
+ tc = TestCase(case['name'])
+ tc.add_error_info(case['traceback'])
+ formatted_tc.append(tc)
+
+ success = [res['test_cases'][case] for case in res['test_cases']
+ if res['test_cases'][case]['status'] in 'success']
+ for case in success:
+ if case:
+ tc = TestCase(case['name'])
+ formatted_tc.append(tc)
+
+ ts = TestSuite("tempest", formatted_tc)
+ with open(store_file, 'w') as f:
+ ts.to_file(f, [ts], prettyprint=False)
+
+ return res
diff --git a/tcp_tests/managers/saltmanager.py b/tcp_tests/managers/saltmanager.py
index 2e539bc..1a89616 100644
--- a/tcp_tests/managers/saltmanager.py
+++ b/tcp_tests/managers/saltmanager.py
@@ -32,4 +32,5 @@
self.__config.salt.salt_master_host = \
self.__underlay.host_by_node_name(salt_nodes[0])
- self.__underlay.execute_commands(commands)
+ self.__underlay.execute_commands(commands=commands,
+ label="Install and configure salt")
diff --git a/tcp_tests/managers/underlay_ssh_manager.py b/tcp_tests/managers/underlay_ssh_manager.py
index 90f3924..09521cd 100644
--- a/tcp_tests/managers/underlay_ssh_manager.py
+++ b/tcp_tests/managers/underlay_ssh_manager.py
@@ -367,13 +367,20 @@
def ensure_running_service(self, service_name, node_name, check_cmd,
state_running='start/running'):
+ """Check if the service_name running or try to restart it
+
+ :param service_name: name of the service that will be checked
+ :param node_name: node on which the service will be checked
+ :param check_cmd: shell command to ensure that the service is running
+ :param state_running: string for check the service state
+ """
cmd = "service {0} status | grep -q '{1}'".format(
service_name, state_running)
with self.remote(node_name=node_name) as remote:
result = remote.execute(cmd)
if result.exit_code != 0:
LOG.info("{0} is not in running state on the node {1},"
- " restarting".format(service_name, node_name))
+ " trying to start".format(service_name, node_name))
cmd = ("service {0} stop;"
" sleep 3; killall -9 {0};"
"service {0} start; sleep 5;"
@@ -383,16 +390,55 @@
remote.execute(check_cmd)
remote.execute(check_cmd)
- def execute_commands(self, commands):
- for n, step in enumerate(commands):
- LOG.info(" ####################################################")
- LOG.info(" *** [ Command #{0} ] {1} ***"
- .format(n+1, step['description']))
+ def execute_commands(self, commands, label="Command"):
+ """Execute a sequence of commands
- with self.remote(node_name=step['node_name']) as remote:
- for x in range(step['retry']['count'], 0, -1):
+ Main propose is to implement workarounds for salt formulas like:
+ - exit_code == 0 when there are actual failures
+ - salt_master and/or salt_minion stop working after executing a formula
+ - a formula fails at first run, but completes at next runs
+
+ :param label: label of the current sequence of the commands, for log
+ :param commands: list of dicts with the following data:
+ commands = [
+ ...
+ {
+ # Required:
+ 'cmd': 'shell command(s) to run',
+ 'node_name': 'name of the node to run the command(s)',
+ # Optional:
+ 'description': 'string with a readable command description',
+ 'retry': {
+ 'count': int, # How many times should be run the command
+ # until success
+ 'delay': int, # Delay between tries in seconds
+ },
+ 'skip_fail': bool # If True - continue with the next step
+ # without failure even if count number
+ # is reached.
+ # If False - rise an exception (default)
+ },
+ ...
+ ]
+ """
+ for n, step in enumerate(commands):
+ # Required fields
+ cmd = step.get('cmd')
+ node_name = step.get('node_name')
+ # Optional fields
+ description = step.get('description', cmd)
+ retry = step.get('retry', {'count': 1, 'delay': 1})
+ retry_count = retry.get('count', 1)
+ retry_delay = retry.get('delay', 1)
+ skip_fail = retry.get('skip_fail', False)
+
+ LOG.info(" >>> [ {0} #{1} ] {2}".format(label, n+1, description))
+
+ with self.remote(node_name=node_name) as remote:
+
+ for x in range(retry_count, 0, -1):
time.sleep(3)
- result = remote.execute(step['cmd'], verbose=True)
+ result = remote.execute(cmd, verbose=True)
# Workaround of exit code 0 from salt in case of failures
failed = 0
@@ -401,14 +447,14 @@
failed += int(s.split("Failed:")[1])
if result.exit_code != 0:
- time.sleep(step['retry']['delay'])
+ time.sleep(retry_delay)
LOG.info(" === RETRY ({0}/{1}) ========================="
- .format(x-1, step['retry']['count']))
+ .format(x-1, retry_count))
elif failed != 0:
LOG.error(" === SALT returned exit code = 0 while "
"there are failed modules! ===")
LOG.info(" === RETRY ({0}/{1}) ======================="
- .format(x-1, step['retry']['count']))
+ .format(x-1, retry_count))
else:
# Workarounds for crashed services
self.ensure_running_service(
@@ -423,7 +469,7 @@
"active (running)") # Hardcoded for now
break
- if x == 1 and step['skip_fail'] == False:
+ if x == 1 and skip_fail == False:
# In the last retry iteration, raise an exception
raise Exception("Step '{0}' failed"
- .format(step['description']))
+ .format(description))