Merge "Fail tempest if 0 tests are run"
diff --git a/tempest/cli/simple_read_only/orchestration/test_heat.py b/tempest/cli/simple_read_only/orchestration/test_heat.py
index 8e413a9..019818b 100644
--- a/tempest/cli/simple_read_only/orchestration/test_heat.py
+++ b/tempest/cli/simple_read_only/orchestration/test_heat.py
@@ -56,7 +56,7 @@
def test_heat_resource_template_fmt_arg_long_json(self):
ret = self.heat('resource-template --format json OS::Nova::Server')
- self.assertIn('"Type": "OS::Nova::Server",', ret)
+ self.assertIn('"Type": "OS::Nova::Server"', ret)
self.assertIsInstance(json.loads(ret), dict)
def test_heat_resource_type_list(self):
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 87b7cd7..3f8db3d 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -21,7 +21,6 @@
import argparse
import datetime
-import logging
import os
import sys
import unittest
@@ -31,6 +30,7 @@
import tempest.auth
from tempest import config
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.openstack.common import timeutils
from tempest.services.compute.json import flavors_client
from tempest.services.compute.json import servers_client
@@ -616,21 +616,10 @@
config.CONF.set_config_path(OPTS.config_file)
-def setup_logging(debug=True):
+def setup_logging():
global LOG
+ logging.setup(__name__)
LOG = logging.getLogger(__name__)
- if debug:
- LOG.setLevel(logging.DEBUG)
- else:
- LOG.setLevel(logging.INFO)
-
- ch = logging.StreamHandler(sys.stdout)
- ch.setLevel(logging.DEBUG)
- formatter = logging.Formatter(
- datefmt='%Y-%m-%d %H:%M:%S',
- fmt='%(asctime)s.%(msecs).03d - %(levelname)s - %(message)s')
- ch.setFormatter(formatter)
- LOG.addHandler(ch)
def main():
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index cabefc8..3fb9f93 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -271,14 +271,18 @@
_, volume = self.volumes_client.create_volume(
size=size, display_name=name, snapshot_id=snapshot_id,
imageRef=imageRef, volume_type=volume_type)
+
if wait_on_delete:
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
volume['id'])
- self.addCleanup_with_wait(
- waiter_callable=self.volumes_client.wait_for_resource_deletion,
- thing_id=volume['id'], thing_id_param='id',
- cleanup_callable=self.delete_wrapper,
- cleanup_args=[self.volumes_client.delete_volume, volume['id']])
+ self.addCleanup(self.delete_wrapper,
+ self.volumes_client.delete_volume, volume['id'])
+ else:
+ self.addCleanup_with_wait(
+ waiter_callable=self.volumes_client.wait_for_resource_deletion,
+ thing_id=volume['id'], thing_id_param='id',
+ cleanup_callable=self.delete_wrapper,
+ cleanup_args=[self.volumes_client.delete_volume, volume['id']])
self.assertEqual(name, volume['display_name'])
self.volumes_client.wait_for_volume_status(volume['id'], 'available')
diff --git a/tools/subunit-trace.py b/tools/subunit-trace.py
index 6ed0b00..57e58f2 100755
--- a/tools/subunit-trace.py
+++ b/tools/subunit-trace.py
@@ -23,7 +23,6 @@
import re
import sys
-import mimeparse
import subunit
import testtools
@@ -32,55 +31,6 @@
RESULTS = {}
-class Starts(testtools.StreamResult):
-
- def __init__(self, output):
- super(Starts, self).__init__()
- self._output = output
-
- def startTestRun(self):
- self._neednewline = False
- self._emitted = set()
-
- def status(self, test_id=None, test_status=None, test_tags=None,
- runnable=True, file_name=None, file_bytes=None, eof=False,
- mime_type=None, route_code=None, timestamp=None):
- super(Starts, self).status(
- test_id, test_status,
- test_tags=test_tags, runnable=runnable, file_name=file_name,
- file_bytes=file_bytes, eof=eof, mime_type=mime_type,
- route_code=route_code, timestamp=timestamp)
- if not test_id:
- if not file_bytes:
- return
- if not mime_type or mime_type == 'test/plain;charset=utf8':
- mime_type = 'text/plain; charset=utf-8'
- primary, sub, parameters = mimeparse.parse_mime_type(mime_type)
- content_type = testtools.content_type.ContentType(
- primary, sub, parameters)
- content = testtools.content.Content(
- content_type, lambda: [file_bytes])
- text = content.as_text()
- if text and text[-1] not in '\r\n':
- self._neednewline = True
- self._output.write(text)
- elif test_status == 'inprogress' and test_id not in self._emitted:
- if self._neednewline:
- self._neednewline = False
- self._output.write('\n')
- worker = ''
- for tag in test_tags or ():
- if tag.startswith('worker-'):
- worker = '(' + tag[7:] + ') '
- if timestamp:
- timestr = timestamp.isoformat()
- else:
- timestr = ''
- self._output.write('%s: %s%s [start]\n' %
- (timestr, worker, test_id))
- self._emitted.add(test_id)
-
-
def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
"""Clean up the test name for display.
@@ -274,12 +224,11 @@
args = parse_args()
stream = subunit.ByteStreamToStreamResult(
sys.stdin, non_subunit_name='stdout')
- starts = Starts(sys.stdout)
outcomes = testtools.StreamToDict(
functools.partial(show_outcome, sys.stdout,
print_failures=args.print_failures))
summary = testtools.StreamSummary()
- result = testtools.CopyStreamResult([starts, outcomes, summary])
+ result = testtools.CopyStreamResult([outcomes, summary])
result.startTestRun()
try:
stream.run(result)