Merge "Add Aggregate XML client and tests."
diff --git a/tempest/README.rst b/tempest/README.rst
new file mode 100644
index 0000000..c41ef96
--- /dev/null
+++ b/tempest/README.rst
@@ -0,0 +1,98 @@
+Tempest Field Guide
+-----------
+
+Tempest is designed to be useful for a large number of different
+environments. This includes being useful for gating commits to
+OpenStack core projects, being used to validate OpenStack cloud
+implementations for both correctness, as well as a burn in tool for
+OpenStack clouds.
+
+As such Tempest tests come in many flavors, each with their own rules
+and guidelines. Below is the proposed Havana restructuring for Tempest
+to make this clear.
+
+tempest/
+   3rdparty/ - 3rd party api tests
+   api/ - API tests
+   cli/ - CLI tests
+   scenario/ - complex scenario tests
+   stress/ - stress tests
+   whitebox/ - white box testing
+
+Each of these directories contains different types of tests. What
+belongs in each directory, the rules and examples for good tests, are
+documented in a README.rst file in the directory.
+
+
+3rdparty
+------------
+
+Many openstack components include 3rdparty API support. It is
+completely legitmate for Tempest to include tests of 3rdparty APIs,
+but those should be kept seperate from the normal OpenStack
+validation.
+
+TODO: tempest/tests/boto should become tempest/3rdparty/boto
+
+
+api
+------------
+
+API tests are validation tests for the OpenStack API. They should not
+use the existing python clients for OpenStack, but should instead use
+the tempest implementations of clients. This allows us to test both
+XML and JSON. Having raw clients also lets us pass invalid JSON and
+XML to the APIs and see the results, something we could not get with
+the native clients.
+
+When it makes sense, API testing should be moved closer to the
+projects themselves, possibly as functional tests in their unit test
+frameworks.
+
+TODO: The bulk of tempest/tests should move to tempest/api
+
+
+cli
+------------
+
+CLI tests use the openstack CLI to interact with the OpenStack
+cloud. CLI testing in unit tests is somewhat difficult because unlike
+server testing, there is no access to server code to
+instantiate. Tempest seems like a logical place for this, as it
+prereqs having a running OpenStack cloud.
+
+TODO: the top level cli directory moves to tempest/cli
+
+
+scenario
+------------
+
+Scenario tests are complex "through path" tests for OpenStack
+functionality. They are typically a series of steps where complicated
+state requiring multiple services is set up exercised, and torn down.
+
+Scenario tests can and should use the OpenStack python clients.
+
+TODO: tests/network/test_network_basic_ops.py,
+tests/compute/servers/*_ops.py should move to tempest/scenario (others)
+
+
+stress
+-----------
+
+Stress tests are designed to stress an OpenStack environment by
+running a high workload against it and seeing what breaks. Tools may
+be provided to help detect breaks (stack traces in the logs).
+
+TODO: old stress tests deleted, new_stress that david is working on
+moves into here.
+
+
+whitebox
+----------
+
+Whitebox tests are tests which require access to the database of the
+target OpenStack machine to verify internal state after opperations
+are made. White box tests are allowed to use the python clients.
+
+TODO: collect out whitebox tests to this location.
diff --git a/tempest/test.py b/tempest/test.py
index 4db9827..11d8f4e 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -62,6 +62,29 @@
             super(BaseTestCase, cls).setUpClass()
 
 
+def call_until_true(func, duration, sleep_for):
+    """
+    Call the given function until it returns True (and return True) or
+    until the specified duration (in seconds) elapses (and return
+    False).
+
+    :param func: A zero argument callable that returns True on success.
+    :param duration: The number of seconds for which to attempt a
+        successful call of the function.
+    :param sleep_for: The number of seconds to sleep after an unsuccessful
+                      invocation of the function.
+    """
+    now = time.time()
+    timeout = now + duration
+    while now < timeout:
+        if func():
+            return True
+        LOG.debug("Sleeping for %d seconds", sleep_for)
+        time.sleep(sleep_for)
+        now = time.time()
+    return False
+
+
 class TestCase(BaseTestCase):
     """Base test case class for all Tempest tests
 
@@ -96,57 +119,33 @@
         self.os_resources.remove(thing)
         del self.resource_keys[key]
 
-
-def call_until_true(func, duration, sleep_for):
-    """
-    Call the given function until it returns True (and return True) or
-    until the specified duration (in seconds) elapses (and return
-    False).
-
-    :param func: A zero argument callable that returns True on success.
-    :param duration: The number of seconds for which to attempt a successful
-                     call of the function.
-    :param sleep_for: The number of seconds to sleep after an unsuccessful
-                      invocation of the function.
-    """
-    now = time.time()
-    timeout = now + duration
-    while now < timeout:
-        if func():
-            return True
-        LOG.debug("Sleeping for %d seconds", sleep_for)
-        time.sleep(sleep_for)
-        now = time.time()
-    return False
-
-
-def status_timeout(testcase, things, thing_id, expected_status):
-    """
-    Given a thing and an expected status, do a loop, sleeping
-    for a configurable amount of time, checking for the
-    expected status to show. At any time, if the returned
-    status of the thing is ERROR, fail out.
-    """
-    def check_status():
-        # python-novaclient has resources available to its client
-        # that all implement a get() method taking an identifier
-        # for the singular resource to retrieve.
-        thing = things.get(thing_id)
-        new_status = thing.status
-        if new_status == 'ERROR':
-            testcase.fail("%s failed to get to expected status."
+    def status_timeout(self, things, thing_id, expected_status):
+        """
+        Given a thing and an expected status, do a loop, sleeping
+        for a configurable amount of time, checking for the
+        expected status to show. At any time, if the returned
+        status of the thing is ERROR, fail out.
+        """
+        def check_status():
+            # python-novaclient has resources available to its client
+            # that all implement a get() method taking an identifier
+            # for the singular resource to retrieve.
+            thing = things.get(thing_id)
+            new_status = thing.status
+            if new_status == 'ERROR':
+                self.fail("%s failed to get to expected status."
                           "In ERROR state."
                           % thing)
-        elif new_status == expected_status:
-            return True  # All good.
-        LOG.debug("Waiting for %s to get to %s status. "
-                  "Currently in %s status",
-                  thing, expected_status, new_status)
-    conf = config.TempestConfig()
-    if not call_until_true(check_status,
-                           conf.compute.build_timeout,
-                           conf.compute.build_interval):
-        testcase.fail("Timed out waiting for thing %s to become %s"
+            elif new_status == expected_status:
+                return True  # All good.
+            LOG.debug("Waiting for %s to get to %s status. "
+                      "Currently in %s status",
+                      thing, expected_status, new_status)
+        conf = config.TempestConfig()
+        if not call_until_true(check_status,
+                               conf.compute.build_timeout,
+                               conf.compute.build_interval):
+            self.fail("Timed out waiting for thing %s to become %s"
                       % (thing_id, expected_status))
 
 
diff --git a/tempest/tests/boto/test_ec2_instance_run.py b/tempest/tests/boto/test_ec2_instance_run.py
index 08dc330..b6b93d8 100644
--- a/tempest/tests/boto/test_ec2_instance_run.py
+++ b/tempest/tests/boto/test_ec2_instance_run.py
@@ -202,14 +202,13 @@
 
         re_search_wait(_output, text)
         part_lines = ssh.get_partitions().split('\n')
-        # "attaching" invalid EC2 state ! #1074901
         volume.attach(instance.id, "/dev/vdh")
 
         def _volume_state():
             volume.update(validate=True)
             return volume.status
 
-        #self.assertVolumeStatusWait(_volume_state, "in-use")  # #1074901
+        self.assertVolumeStatusWait(_volume_state, "in-use")
         re_search_wait(_volume_state, "in-use")
 
         #NOTE(afazekas):  Different Hypervisor backends names
@@ -229,9 +228,9 @@
 
         #TODO(afazekas): Resource compare to the flavor settings
 
-        volume.detach()  # "detaching" invalid EC2 status #1074901
+        volume.detach()
 
-        #self.assertVolumeStatusWait(_volume_state, "available")
+        self.assertVolumeStatusWait(_volume_state, "available")
         re_search_wait(_volume_state, "available")
         LOG.info("Volume %s state: %s", volume.id, volume.status)
 
diff --git a/tempest/tests/boto/test_ec2_volumes.py b/tempest/tests/boto/test_ec2_volumes.py
index dc8ff31..37a913e 100644
--- a/tempest/tests/boto/test_ec2_volumes.py
+++ b/tempest/tests/boto/test_ec2_volumes.py
@@ -39,7 +39,6 @@
         cls.client = cls.os.ec2api_client
         cls.zone = cls.client.get_good_zone()
 
-#NOTE(afazekas): as admin it can trigger the Bug #1074901
     @attr(type='smoke')
     def test_create_get_delete(self):
         # EC2 Create, get, delete Volume
diff --git a/tempest/tests/compute/floating_ips/test_floating_ips_actions.py b/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
index d800fb5..5fe911f 100644
--- a/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
@@ -53,8 +53,8 @@
     @classmethod
     def tearDownClass(cls):
         #Deleting the floating IP which is created in this method
-        super(FloatingIPsTestJSON, cls).tearDownClass()
         resp, body = cls.client.delete_floating_ip(cls.floating_ip_id)
+        super(FloatingIPsTestJSON, cls).tearDownClass()
 
     @attr(type='positive')
     def test_allocate_floating_ip(self):
diff --git a/tempest/tests/compute/images/test_images_whitebox.py b/tempest/tests/compute/images/test_images_whitebox.py
index 105a38a..9ec05dd 100644
--- a/tempest/tests/compute/images/test_images_whitebox.py
+++ b/tempest/tests/compute/images/test_images_whitebox.py
@@ -37,10 +37,10 @@
     @classmethod
     def tearDownClass(cls):
         """Delete images after a test is executed."""
-        super(ImagesWhiteboxTest, cls).tearDownClass()
         for image_id in cls.image_ids:
             cls.client.delete_image(image_id)
             cls.image_ids.remove(image_id)
+        super(ImagesWhiteboxTest, cls).tearDownClass()
 
     @classmethod
     def update_state(self, server_id, vm_state, task_state, deleted=0):
diff --git a/tempest/tests/compute/servers/test_server_advanced_ops.py b/tempest/tests/compute/servers/test_server_advanced_ops.py
index 8be9c54..ad859d0 100644
--- a/tempest/tests/compute/servers/test_server_advanced_ops.py
+++ b/tempest/tests/compute/servers/test_server_advanced_ops.py
@@ -66,18 +66,18 @@
 
         self.assertEqual(self.instance.status, 'BUILD')
         instance_id = self.get_resource('instance').id
-        test.status_timeout(
-            self, self.compute_client.servers, instance_id, 'ACTIVE')
+        self.status_timeout(
+            self.compute_client.servers, instance_id, 'ACTIVE')
         instance = self.get_resource('instance')
         instance_id = instance.id
         resize_flavor = self.config.compute.flavor_ref_alt
         LOG.debug("Resizing instance %s from flavor %s to flavor %s",
                   instance.id, instance.flavor, resize_flavor)
         instance.resize(resize_flavor)
-        test.status_timeout(self, self.compute_client.servers, instance_id,
+        self.status_timeout(self.compute_client.servers, instance_id,
                             'VERIFY_RESIZE')
 
         LOG.debug("Confirming resize of instance %s", instance_id)
         instance.confirm_resize()
-        test.status_timeout(
-            self, self.compute_client.servers, instance_id, 'ACTIVE')
+        self.status_timeout(
+            self.compute_client.servers, instance_id, 'ACTIVE')
diff --git a/tempest/tests/compute/servers/test_server_basic_ops.py b/tempest/tests/compute/servers/test_server_basic_ops.py
index e4e246a..fdbbd3c 100644
--- a/tempest/tests/compute/servers/test_server_basic_ops.py
+++ b/tempest/tests/compute/servers/test_server_basic_ops.py
@@ -101,8 +101,8 @@
 
     def wait_on_active(self):
         instance_id = self.get_resource('instance').id
-        test.status_timeout(
-            self, self.compute_client.servers, instance_id, 'ACTIVE')
+        self.status_timeout(
+            self.compute_client.servers, instance_id, 'ACTIVE')
 
     def pause_server(self):
         instance = self.get_resource('instance')
@@ -110,8 +110,8 @@
         LOG.debug("Pausing instance %s. Current status: %s",
                   instance_id, instance.status)
         instance.pause()
-        test.status_timeout(
-            self, self.compute_client.servers, instance_id, 'PAUSED')
+        self.status_timeout(
+            self.compute_client.servers, instance_id, 'PAUSED')
 
     def unpause_server(self):
         instance = self.get_resource('instance')
@@ -119,8 +119,8 @@
         LOG.debug("Unpausing instance %s. Current status: %s",
                   instance_id, instance.status)
         instance.unpause()
-        test.status_timeout(
-            self, self.compute_client.servers, instance_id, 'ACTIVE')
+        self.status_timeout(
+            self.compute_client.servers, instance_id, 'ACTIVE')
 
     def suspend_server(self):
         instance = self.get_resource('instance')
@@ -128,7 +128,7 @@
         LOG.debug("Suspending instance %s. Current status: %s",
                   instance_id, instance.status)
         instance.suspend()
-        test.status_timeout(self, self.compute_client.servers,
+        self.status_timeout(self.compute_client.servers,
                             instance_id, 'SUSPENDED')
 
     def resume_server(self):
@@ -137,8 +137,8 @@
         LOG.debug("Resuming instance %s. Current status: %s",
                   instance_id, instance.status)
         instance.resume()
-        test.status_timeout(
-            self, self.compute_client.servers, instance_id, 'ACTIVE')
+        self.status_timeout(
+            self.compute_client.servers, instance_id, 'ACTIVE')
 
     def terminate_instance(self):
         instance = self.get_resource('instance')
diff --git a/tempest/tests/compute/servers/test_server_rescue.py b/tempest/tests/compute/servers/test_server_rescue.py
index 04c5b27..862a86a 100644
--- a/tempest/tests/compute/servers/test_server_rescue.py
+++ b/tempest/tests/compute/servers/test_server_rescue.py
@@ -85,7 +85,6 @@
 
     @classmethod
     def tearDownClass(cls):
-        super(ServerRescueTestJSON, cls).tearDownClass()
         #Deleting the floating IP which is created in this method
         cls.floating_ips_client.delete_floating_ip(cls.floating_ip_id)
         client = cls.volumes_extensions_client
@@ -93,6 +92,7 @@
         client.delete_volume(str(cls.volume_to_detach['id']).strip())
         resp, cls.sg = cls.security_groups_client.delete_security_group(
             cls.sg_id)
+        super(ServerRescueTestJSON, cls).tearDownClass()
 
     def tearDown(self):
         super(ServerRescueTestJSON, self).tearDown()
diff --git a/tempest/tests/network/common.py b/tempest/tests/network/common.py
index 6246f54..6811acf 100644
--- a/tempest/tests/network/common.py
+++ b/tempest/tests/network/common.py
@@ -269,7 +269,7 @@
             self.set_resource(name, server)
         except AttributeError:
             self.fail("Server not successfully created.")
-        test.status_timeout(self, client.servers, server.id, 'ACTIVE')
+        self.status_timeout(client.servers, server.id, 'ACTIVE')
         # The instance retrieved on creation is missing network
         # details, necessitating retrieval after it becomes active to
         # ensure correct details.
diff --git a/tempest/tests/volume/admin/test_multi_backend.py b/tempest/tests/volume/admin/test_multi_backend.py
index 04007c9..3d5fae4 100644
--- a/tempest/tests/volume/admin/test_multi_backend.py
+++ b/tempest/tests/volume/admin/test_multi_backend.py
@@ -97,8 +97,6 @@
 
     @classmethod
     def tearDownClass(cls):
-        super(VolumeMultiBackendTest, cls).tearDownClass()
-
         ## volumes deletion
         for volume_id in cls.volume_id_list:
             cls.client.delete_volume(volume_id)
@@ -108,6 +106,8 @@
         for volume_type in cls.volume_type_list:
             cls.client2.delete_volume_type(volume_type)
 
+        super(VolumeMultiBackendTest, cls).tearDownClass()
+
     def test_multi_backend_enabled(self):
         # this test checks that multi backend is enabled for at least the
         # computes where the volumes created in setUp were made
diff --git a/tempest/tests/volume/admin/test_volume_types_extra_specs.py b/tempest/tests/volume/admin/test_volume_types_extra_specs.py
index c8cf8d9..1cd7653 100644
--- a/tempest/tests/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/tests/volume/admin/test_volume_types_extra_specs.py
@@ -30,8 +30,8 @@
 
     @classmethod
     def tearDownClass(cls):
-        super(VolumeTypesExtraSpecsTest, cls).tearDownClass()
         cls.client.delete_volume_type(cls.volume_type['id'])
+        super(VolumeTypesExtraSpecsTest, cls).tearDownClass()
 
     def test_volume_type_extra_specs_list(self):
         # List Volume types extra specs.
diff --git a/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
index 13fcbbf..bd6e279 100644
--- a/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
@@ -36,8 +36,8 @@
 
     @classmethod
     def tearDownClass(cls):
-        super(ExtraSpecsNegativeTest, cls).tearDownClass()
         cls.client.delete_volume_type(cls.volume_type['id'])
+        super(ExtraSpecsNegativeTest, cls).tearDownClass()
 
     def test_update_no_body(self):
         # Should not update volume type extra specs with no body
diff --git a/tempest/tests/volume/test_volumes_actions.py b/tempest/tests/volume/test_volumes_actions.py
index fb9b975..e6eb8d8 100644
--- a/tempest/tests/volume/test_volumes_actions.py
+++ b/tempest/tests/volume/test_volumes_actions.py
@@ -43,7 +43,6 @@
 
     @classmethod
     def tearDownClass(cls):
-        super(VolumesActionsTest, cls).tearDownClass()
         # Delete the test instance and volume
         cls.client.delete_volume(cls.volume['id'])
         cls.client.wait_for_resource_deletion(cls.volume['id'])
@@ -51,6 +50,8 @@
         cls.servers_client.delete_server(cls.server['id'])
         cls.client.wait_for_resource_deletion(cls.server['id'])
 
+        super(VolumesActionsTest, cls).tearDownClass()
+
     @attr(type='smoke')
     def test_attach_detach_volume_to_instance(self):
         # Volume is attached and detached successfully from an instance
diff --git a/tox.ini b/tox.ini
index 4a2f80e..565a9ad 100644
--- a/tox.ini
+++ b/tox.ini
@@ -9,17 +9,52 @@
          NOSE_OPENSTACK_YELLOW=3
          NOSE_OPENSTACK_SHOW_ELAPSED=1
          NOSE_OPENSTACK_STDOUT=1
-deps = -r{toxinidir}/tools/pip-requires
-       -r{toxinidir}/tools/test-requires
-commands = nosetests {posargs}
+
+[testenv:full]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+         NOSE_WITH_OPENSTACK=1
+         NOSE_OPENSTACK_COLOR=1
+         NOSE_OPENSTACK_RED=15
+         NOSE_OPENSTACK_YELLOW=3
+         NOSE_OPENSTACK_SHOW_ELAPSED=1
+         NOSE_OPENSTACK_STDOUT=1
+commands =
+  nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest
+  nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-cli.xml -sv cli
+
+[testenv:smoke]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+         NOSE_WITH_OPENSTACK=1
+         NOSE_OPENSTACK_COLOR=1
+         NOSE_OPENSTACK_RED=15
+         NOSE_OPENSTACK_YELLOW=3
+         NOSE_OPENSTACK_SHOW_ELAPSED=1
+         NOSE_OPENSTACK_STDOUT=1
+commands =
+   nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit -sv --attr=type=smoke --xunit-file=nosetests-smoke.xml tempest
+
 
 [testenv:coverage]
-commands = python -m tools/tempest_coverage -c start --combine
-           nosetests {posargs}
-           python -m tools/tempest_coverage -c report --html
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+         NOSE_WITH_OPENSTACK=1
+         NOSE_OPENSTACK_COLOR=1
+         NOSE_OPENSTACK_RED=15
+         NOSE_OPENSTACK_YELLOW=3
+         NOSE_OPENSTACK_SHOW_ELAPSED=1
+         NOSE_OPENSTACK_STDOUT=1
+commands =
+   python -m tools/tempest_coverage -c start --combine
+   nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest
+   nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-cli.xml -sv cli
+   python -m tools/tempest_coverage -c report --html
 
 [testenv:pep8]
 commands = flake8
+deps = -r{toxinidir}/tools/pip-requires
+       -r{toxinidir}/tools/test-requires
 
 [flake8]
 ignore = E125,H302,H404