Merge "Make cli test namespace include test files"
diff --git a/tempest/cli/simple_read_only/volume/test_cinder.py b/tempest/cli/simple_read_only/volume/test_cinder.py
index 3a9a7a6..e44a577 100644
--- a/tempest/cli/simple_read_only/volume/test_cinder.py
+++ b/tempest/cli/simple_read_only/volume/test_cinder.py
@@ -121,8 +121,12 @@
         self.assertTableStruct(zone_list, ['Name', 'Status'])
 
     def test_cinder_endpoints(self):
-        endpoints = self.parser.listing(self.cinder('endpoints'))
-        self.assertTableStruct(endpoints, ['nova', 'Value'])
+        out = self.cinder('endpoints')
+        tables = self.parser.tables(out)
+        for table in tables:
+            headers = table['headers']
+            self.assertTrue(2 >= len(headers))
+            self.assertEqual('Value', headers[1])
 
     def test_cinder_service_list(self):
         service_list = self.parser.listing(self.cinder('service-list'))
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 66043d3..7746ed8 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -16,13 +16,10 @@
 
 import logging
 import os
-import re
 import subprocess
-import time
 
 from cinderclient import exceptions as cinder_exceptions
 import glanceclient
-from heatclient import exc as heat_exceptions
 import netaddr
 from neutronclient.common import exceptions as exc
 from novaclient import exceptions as nova_exceptions
@@ -38,7 +35,6 @@
 from tempest import config
 from tempest import exceptions
 from tempest.openstack.common import log
-from tempest.openstack.common import timeutils
 from tempest.services.network import resources as net_resources
 import tempest.test
 
@@ -92,13 +88,8 @@
         cls.interface_client = cls.manager.interfaces_client
         # Neutron network client
         cls.network_client = cls.manager.network_client
-
-    @classmethod
-    def tearDownClass(cls):
-        # Isolated creds also manages network resources, which should
-        # be cleaned up at the end of the test case
-        cls.isolated_creds.clear_isolated_creds()
-        super(ScenarioTest, cls).tearDownClass()
+        # Heat client
+        cls.orchestration_client = cls.manager.orchestration_client
 
     @classmethod
     def _get_credentials(cls, get_creds, ctype):
@@ -1015,11 +1006,6 @@
         cls.ceilometer_client = cls.manager.ceilometer_client
 
     @classmethod
-    def tearDownClass(cls):
-        cls.isolated_creds.clear_isolated_creds()
-        super(OfficialClientTest, cls).tearDownClass()
-
-    @classmethod
     def _get_credentials(cls, get_creds, ctype):
         if CONF.compute.allow_tenant_isolation:
             creds = get_creds()
@@ -2253,7 +2239,7 @@
         return network, subnet, router
 
 
-class OrchestrationScenarioTest(OfficialClientTest):
+class OrchestrationScenarioTest(ScenarioTest):
     """
     Base class for orchestration scenario tests
     """
@@ -2283,15 +2269,15 @@
 
     @classmethod
     def _get_default_network(cls):
-        networks = cls.network_client.list_networks()
-        for net in networks['networks']:
-            if net['name'] == CONF.compute.fixed_network_name:
+        _, networks = cls.networks_client.list_networks()
+        for net in networks:
+            if net['label'] == CONF.compute.fixed_network_name:
                 return net
 
     @staticmethod
     def _stack_output(stack, output_key):
         """Return a stack output value for a given key."""
-        return next((o['output_value'] for o in stack.outputs
+        return next((o['output_value'] for o in stack['outputs']
                     if o['output_key'] == output_key), None)
 
     def _ping_ip_address(self, ip_address, should_succeed=True):
@@ -2307,82 +2293,6 @@
         return tempest.test.call_until_true(
             ping, CONF.orchestration.build_timeout, 1)
 
-    def _wait_for_resource_status(self, stack_identifier, resource_name,
-                                  status, failure_pattern='^.*_FAILED$'):
-        """Waits for a Resource to reach a given status."""
-        fail_regexp = re.compile(failure_pattern)
-        build_timeout = CONF.orchestration.build_timeout
-        build_interval = CONF.orchestration.build_interval
-
-        start = timeutils.utcnow()
-        while timeutils.delta_seconds(start,
-                                      timeutils.utcnow()) < build_timeout:
-            try:
-                res = self.client.resources.get(
-                    stack_identifier, resource_name)
-            except heat_exceptions.HTTPNotFound:
-                # ignore this, as the resource may not have
-                # been created yet
-                pass
-            else:
-                if res.resource_status == status:
-                    return
-                if fail_regexp.search(res.resource_status):
-                    raise exceptions.StackResourceBuildErrorException(
-                        resource_name=res.resource_name,
-                        stack_identifier=stack_identifier,
-                        resource_status=res.resource_status,
-                        resource_status_reason=res.resource_status_reason)
-            time.sleep(build_interval)
-
-        message = ('Resource %s failed to reach %s status within '
-                   'the required time (%s s).' %
-                   (res.resource_name, status, build_timeout))
-        raise exceptions.TimeoutException(message)
-
-    def _wait_for_stack_status(self, stack_identifier, status,
-                               failure_pattern='^.*_FAILED$'):
-        """
-        Waits for a Stack to reach a given status.
-
-        Note this compares the full $action_$status, e.g
-        CREATE_COMPLETE, not just COMPLETE which is exposed
-        via the status property of Stack in heatclient
-        """
-        fail_regexp = re.compile(failure_pattern)
-        build_timeout = CONF.orchestration.build_timeout
-        build_interval = CONF.orchestration.build_interval
-
-        start = timeutils.utcnow()
-        while timeutils.delta_seconds(start,
-                                      timeutils.utcnow()) < build_timeout:
-            try:
-                stack = self.client.stacks.get(stack_identifier)
-            except heat_exceptions.HTTPNotFound:
-                # ignore this, as the stackource may not have
-                # been created yet
-                pass
-            else:
-                if stack.stack_status == status:
-                    return
-                if fail_regexp.search(stack.stack_status):
-                    raise exceptions.StackBuildErrorException(
-                        stack_identifier=stack_identifier,
-                        stack_status=stack.stack_status,
-                        stack_status_reason=stack.stack_status_reason)
-            time.sleep(build_interval)
-
-        message = ('Stack %s failed to reach %s status within '
-                   'the required time (%s s).' %
-                   (stack.stack_name, status, build_timeout))
-        raise exceptions.TimeoutException(message)
-
-    def _stack_delete(self, stack_identifier):
-        try:
-            self.client.stacks.delete(stack_identifier)
-        except heat_exceptions.HTTPNotFound:
-            pass
-
 
 class SwiftScenarioTest(ScenarioTest):
     """
diff --git a/tempest/scenario/orchestration/test_server_cfn_init.py b/tempest/scenario/orchestration/test_server_cfn_init.py
index 36e6126..4e85429 100644
--- a/tempest/scenario/orchestration/test_server_cfn_init.py
+++ b/tempest/scenario/orchestration/test_server_cfn_init.py
@@ -38,7 +38,7 @@
             self.keypair_name = CONF.orchestration.keypair_name
         else:
             self.keypair = self.create_keypair()
-            self.keypair_name = self.keypair.id
+            self.keypair_name = self.keypair['name']
 
     def launch_stack(self):
         net = self._get_default_network()
@@ -52,32 +52,36 @@
 
         # create the stack
         self.template = self._load_template(__file__, self.template_name)
-        self.client.stacks.create(
-            stack_name=self.stack_name,
+        _, stack = self.client.create_stack(
+            name=self.stack_name,
             template=self.template,
             parameters=self.parameters)
+        stack = stack['stack']
 
-        self.stack = self.client.stacks.get(self.stack_name)
-        self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id)
-        self.addCleanup(self._stack_delete, self.stack_identifier)
+        _, self.stack = self.client.get_stack(stack['id'])
+        self.stack_identifier = '%s/%s' % (self.stack_name, self.stack['id'])
+        self.addCleanup(self.delete_wrapper,
+                        self.orchestration_client.delete_stack,
+                        self.stack_identifier)
 
     def check_stack(self):
         sid = self.stack_identifier
-        self._wait_for_resource_status(
+        self.client.wait_for_resource_status(
             sid, 'WaitHandle', 'CREATE_COMPLETE')
-        self._wait_for_resource_status(
+        self.client.wait_for_resource_status(
             sid, 'SmokeSecurityGroup', 'CREATE_COMPLETE')
-        self._wait_for_resource_status(
+        self.client.wait_for_resource_status(
             sid, 'SmokeKeys', 'CREATE_COMPLETE')
-        self._wait_for_resource_status(
+        self.client.wait_for_resource_status(
             sid, 'CfnUser', 'CREATE_COMPLETE')
-        self._wait_for_resource_status(
+        self.client.wait_for_resource_status(
             sid, 'SmokeServer', 'CREATE_COMPLETE')
 
-        server_resource = self.client.resources.get(sid, 'SmokeServer')
-        server_id = server_resource.physical_resource_id
-        server = self.compute_client.servers.get(server_id)
-        server_ip = server.networks[CONF.compute.network_for_ssh][0]
+        _, server_resource = self.client.get_resource(sid, 'SmokeServer')
+        server_id = server_resource['physical_resource_id']
+        _, server = self.servers_client.get_server(server_id)
+        server_ip =\
+            server['addresses'][CONF.compute.network_for_ssh][0]['addr']
 
         if not self._ping_ip_address(server_ip):
             self._log_console_output(servers=[server])
@@ -85,7 +89,7 @@
                 "Timed out waiting for %s to become reachable" % server_ip)
 
         try:
-            self._wait_for_resource_status(
+            self.client.wait_for_resource_status(
                 sid, 'WaitCondition', 'CREATE_COMPLETE')
         except (exceptions.StackResourceBuildErrorException,
                 exceptions.TimeoutException) as e:
@@ -96,9 +100,9 @@
             # logs to be compared
             self._log_console_output(servers=[server])
 
-        self._wait_for_stack_status(sid, 'CREATE_COMPLETE')
+        self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
 
-        stack = self.client.stacks.get(sid)
+        _, stack = self.client.get_stack(sid)
 
         # This is an assert of great significance, as it means the following
         # has happened:
diff --git a/tempest/test.py b/tempest/test.py
index d2b32d4..4a22b1b 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -69,6 +69,9 @@
 def safe_setup(f):
     """A decorator used to wrap the setUpClass for cleaning up resources
        when setUpClass failed.
+
+    Deprecated, see:
+    http://specs.openstack.org/openstack/qa-specs/specs/resource-cleanup.html
     """
     @functools.wraps(f)
     def decorator(cls):
@@ -279,15 +282,51 @@
 
     @classmethod
     def setUpClass(cls):
+        # It should never be overridden by descendants
         if hasattr(super(BaseTestCase, cls), 'setUpClass'):
             super(BaseTestCase, cls).setUpClass()
         cls.setUpClassCalled = True
+        # No test resource is allocated until here
+        try:
+            # TODO(andreaf) Split-up resource_setup in stages:
+            # skip checks, pre-hook, credentials, clients, resources, post-hook
+            cls.resource_setup()
+        except Exception:
+            etype, value, trace = sys.exc_info()
+            LOG.info("%s in resource setup. Invoking tearDownClass." % etype)
+            # Catch any exception in tearDown so we can re-raise the original
+            # exception at the end
+            try:
+                cls.tearDownClass()
+            except Exception as te:
+                LOG.exception("tearDownClass failed: %s" % te)
+            try:
+                raise etype(value), None, trace
+            finally:
+                del trace  # for avoiding circular refs
 
     @classmethod
     def tearDownClass(cls):
         at_exit_set.discard(cls)
+        # It should never be overridden by descendants
         if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
             super(BaseTestCase, cls).tearDownClass()
+        try:
+            cls.resource_cleanup()
+        finally:
+            cls.clear_isolated_creds()
+
+    @classmethod
+    def resource_setup(cls):
+        """Class level setup steps for test cases.
+        Recommended order: skip checks, credentials, clients, resources.
+        """
+        pass
+
+    @classmethod
+    def resource_cleanup(cls):
+        """Class level resource cleanup for test cases. """
+        pass
 
     def setUp(self):
         super(BaseTestCase, self).setUp()
@@ -355,7 +394,7 @@
         """
         Clears isolated creds if set
         """
-        if getattr(cls, 'isolated_creds'):
+        if hasattr(cls, 'isolated_creds'):
             cls.isolated_creds.clear_isolated_creds()
 
     @classmethod
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
index 4bf71f3..f94d880 100644
--- a/tempest/thirdparty/boto/test.py
+++ b/tempest/thirdparty/boto/test.py
@@ -108,8 +108,8 @@
     CODE_RE = '.*'  # regexp makes sense in group match
 
     def match(self, exc):
-        """:returns: Retruns with an error string if not matches,
-               returns with None when matches.
+        """:returns: Returns with an error string if it does not match,
+               returns with None when it matches.
         """
         if not isinstance(exc, exception.BotoServerError):
             return "%r not an BotoServerError instance" % exc
@@ -485,7 +485,7 @@
 
     @classmethod
     def destroy_volume_wait(cls, volume):
-        """Delete volume, tryies to detach first.
+        """Delete volume, tries to detach first.
            Use just for teardown!
         """
         exc_num = 0
@@ -518,7 +518,7 @@
 
     @classmethod
     def destroy_snapshot_wait(cls, snapshot):
-        """delete snaphot, wait until not exists."""
+        """delete snapshot, wait until it ceases to exist."""
         snapshot.delete()
 
         def _update():
diff --git a/tools/check_logs.py b/tools/check_logs.py
index 917aaaf..7cf9d85 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -80,7 +80,6 @@
 
 def scan_content(name, content, regexp, whitelist):
     had_errors = False
-    print_log_name = True
     for line in content:
         if not line.startswith("Stderr:") and regexp.match(line):
             whitelisted = False
@@ -91,13 +90,8 @@
                     whitelisted = True
                     break
             if not whitelisted or dump_all_errors:
-                if print_log_name:
-                    print("\nLog File Has Errors: %s" % name)
-                    print_log_name = False
                 if not whitelisted:
                     had_errors = True
-                    print("*** Not Whitelisted ***"),
-                print(line.rstrip())
     return had_errors
 
 
@@ -151,17 +145,21 @@
             whitelists = loaded
     logs_with_errors = process_files(files_to_process, urls_to_process,
                                      whitelists)
-    if logs_with_errors:
-        print("Logs have errors")
-    if is_grenade:
-        print("Currently not failing grenade runs with errors")
-        return 0
+
     failed = False
-    for log in logs_with_errors:
-        if log not in allowed_dirty:
-            print("Log: %s not allowed to have ERRORS or TRACES" % log)
-            failed = True
+    if logs_with_errors:
+        log_files = set(logs_with_errors)
+        for log in log_files:
+            msg = '%s log file has errors' % log
+            if log not in allowed_dirty:
+                msg += ' and is not allowed to have them'
+                failed = True
+            print(msg)
+        print("\nPlease check the respective log files to see the errors")
     if failed:
+        if is_grenade:
+            print("Currently not failing grenade runs with errors")
+            return 0
         return 1
     print("ok")
     return 0
diff --git a/tox.ini b/tox.ini
index 6ec0b2c..492c4f6 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,28 +6,23 @@
 [testenv]
 setenv = VIRTUAL_ENV={envdir}
          OS_TEST_PATH=./tempest/test_discover
-         PYTHONHASHSEED=0
 usedevelop = True
 install_command = pip install -U {opts} {packages}
 
 [testenv:py26]
 setenv = OS_TEST_PATH=./tempest/tests
-         PYTHONHASHSEED=0
 commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
 
 [testenv:py33]
 setenv = OS_TEST_PATH=./tempest/tests
-         PYTHONHASHSEED=0
 commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
 
 [testenv:py27]
 setenv = OS_TEST_PATH=./tempest/tests
-         PYTHONHASHSEED=0
 commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
 
 [testenv:cover]
 setenv = OS_TEST_PATH=./tempest/tests
-         PYTHONHASHSEED=0
 commands = python setup.py testr --coverage --testr-arg='tempest\.tests {posargs}'
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
@@ -96,6 +91,7 @@
        -r{toxinidir}/test-requirements.txt
 
 [testenv:pep8]
+setenv = PYTHONHASHSEED=0
 commands =
    flake8 {posargs}
    {toxinidir}/tools/config/check_uptodate.sh