Addition of base Smoke and Whitebox tests

* Splits out the "manager" class into its own file (at least
  for now to keep the code change size down initially)
* Adds base manager classes for Fuzz and Default clients
* Adds base test case class and a derived base SmokeTest class
* Adds smoke test for basic server operations in compute
* Adds non-smoke test for advanced server operations in compute
* Adds Whitebox base test case class
* New basic db-checking whitebox tests for servers and images

This change builds upon the proposed refactoring to the Tempest Manager
and base test classes as per https://review.openstack.org/#/c/7069

Change-Id: I12125fffb725cad3a4fef3134c83e55437529252
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 31dd902..ecd1e2a 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -99,6 +99,28 @@
 # Level to log Compute API request/response details.
 log_level = ERROR
 
+# Whitebox options for compute. Whitebox options enable the
+# whitebox test cases, which look at internal Nova database state,
+# SSH into VMs to check instance state, etc.
+
+# Should we run whitebox tests for Compute?
+whitebox_enabled = true
+
+# Path of nova source directory
+source_dir = /opt/stack/nova
+
+# Path of nova configuration file
+config_path = /etc/nova/nova.conf
+
+# Directory containing nova binaries such as nova-manage
+bin_dir = /usr/local/bin
+
+# Path to a private key file for SSH access to remote hosts
+path_to_private_key = /home/user/.ssh/id_rsa
+
+# Connection string to the database of Compute service
+db_uri = mysql:///user:pass@localhost/nova
+
 [image]
 # This section contains configuration options used when executing tests
 # against the OpenStack Images API
diff --git a/etc/tempest.conf.tpl b/etc/tempest.conf.tpl
index 480d74b..f268de2 100644
--- a/etc/tempest.conf.tpl
+++ b/etc/tempest.conf.tpl
@@ -78,6 +78,28 @@
 # Level to log Compute API request/response details.
 log_level = %COMPUTE_LOG_LEVEL%
 
+# Whitebox options for compute. Whitebox options enable the
+# whitebox test cases, which look at internal Nova database state,
+# SSH into VMs to check instance state, etc.
+
+# Should we run whitebox tests for Compute?
+whitebox_enabled = %COMPUTE_WHITEBOX_ENABLED%
+
+# Path of nova source directory
+source_dir = %COMPUTE_SOURCE_DIR%
+
+# Path of nova configuration file
+config_path = %COMPUTE_CONFIG_PATH%
+
+# Directory containing nova binaries such as nova-manage
+bin_dir = %COMPUTE_BIN_DIR%
+
+# Path to a private key file for SSH access to remote hosts
+path_to_private_key = %COMPUTE_PATH_TO_PRIVATE_KEY%
+
+# Connection string to the database of Compute service
+db_uri = %COMPUTE_DB_URI%
+
 [image]
 # This section contains configuration options used when executing tests
 # against the OpenStack Images API
diff --git a/run_tests.sh b/run_tests.sh
index 30325fe..f62667e 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -5,6 +5,7 @@
   echo "Run Tempest test suite"
   echo ""
   echo "  -s, --smoke              Only run smoke tests"
+  echo "  -w, --whitebox           Only run whitebox tests"
   echo "  -p, --pep8               Just run pep8"
   echo "  -h, --help               Print this usage message"
   echo "  -d. --debug              Debug this script -- set -o xtrace"
@@ -17,6 +18,7 @@
     -d|--debug) set -o xtrace;;
     -p|--pep8) let just_pep8=1;;
     -s|--smoke) noseargs="$noseargs --attr=type=smoke";;
+    -w|--whitebox) noseargs="$noseargs --attr=type=whitebox";;
     *) noseargs="$noseargs $1"
   esac
 }
diff --git a/tempest/common/ssh.py b/tempest/common/ssh.py
index f43ebd9..085fce3 100644
--- a/tempest/common/ssh.py
+++ b/tempest/common/ssh.py
@@ -1,8 +1,27 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
 import time
 import socket
 import warnings
+
 from tempest import exceptions
 
+
 with warnings.catch_warnings():
     warnings.simplefilter("ignore")
     import paramiko
@@ -10,11 +29,15 @@
 
 class Client(object):
 
-    def __init__(self, host, username, password, timeout=60):
+    def __init__(self, host, username, password=None, timeout=300,
+                 channel_timeout=10, look_for_keys=False, key_filename=None):
         self.host = host
         self.username = username
         self.password = password
+        self.look_for_keys = look_for_keys
+        self.key_filename = key_filename
         self.timeout = int(timeout)
+        self.channel_timeout = int(channel_timeout)
 
     def _get_ssh_connection(self):
         """Returns an ssh connection to the specified host"""
@@ -27,13 +50,16 @@
         while not self._is_timed_out(self.timeout, _start_time):
             try:
                 ssh.connect(self.host, username=self.username,
-                    password=self.password, timeout=20)
+                            password=self.password,
+                            look_for_keys=self.look_for_keys,
+                            key_filename=self.key_filename,
+                            timeout=self.timeout)
                 _timeout = False
                 break
             except socket.error:
                 continue
             except paramiko.AuthenticationException:
-                time.sleep(15)
+                time.sleep(5)
                 continue
         if _timeout:
             raise exceptions.SSHTimeout(host=self.host,
@@ -66,9 +92,17 @@
         """
         ssh = self._get_ssh_connection()
         stdin, stdout, stderr = ssh.exec_command(cmd)
-        output = stdout.read()
+        stdin.flush()
+        stdin.channel.shutdown_write()
+        stdout.channel.settimeout(self.channel_timeout)
+        status = stdout.channel.recv_exit_status()
+        try:
+            output = stdout.read()
+        except socket.timeout:
+            if status == 0:
+                return None, status
         ssh.close()
-        return output
+        return status, output
 
     def test_connection_auth(self):
         """ Returns true if ssh can connect to server"""
diff --git a/tempest/config.py b/tempest/config.py
index 7d18974..28a0c5e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -235,6 +235,36 @@
         """Level for logging compute API calls."""
         return self.get("log_level", 'ERROR')
 
+    @property
+    def whitebox_enabled(self):
+        """Does the test environment support whitebox tests for Compute?"""
+        return self.get("whitebox_enabled", 'false').lower() != 'false'
+
+    @property
+    def db_uri(self):
+        """Connection string to the database of Compute service"""
+        return self.get("db_uri", None)
+
+    @property
+    def source_dir(self):
+        """Path of nova source directory"""
+        return self.get("source_dir", "/opt/stack/nova")
+
+    @property
+    def config_path(self):
+        """Path of nova configuration file"""
+        return self.get("config_path", "/etc/nova/nova.conf")
+
+    @property
+    def bin_dir(self):
+        """Directory containing nova binaries such as nova-manage"""
+        return self.get("bin_dir", "/usr/local/bin/")
+
+    @property
+    def path_to_private_key(self):
+        """Path to a private key file for SSH access to remote hosts"""
+        return self.get("path_to_private_key")
+
 
 class ComputeAdminConfig(BaseConfig):
 
@@ -314,7 +344,6 @@
         return self.get("api_version", "v1.1")
 
 
-# TODO(jaypipes): Move this to a common utils (not data_utils...)
 def singleton(cls):
     """Simple wrapper for classes that should only have a single instance"""
     instances = {}
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 39463d7..03cf163 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -96,3 +96,7 @@
 
 class ServerUnreachable(TempestException):
     message = "The server is not reachable via the configured network"
+
+
+class SQLException(TempestException):
+    message = "SQL error: %(message)s"
diff --git a/tempest/manager.py b/tempest/manager.py
new file mode 100644
index 0000000..b960edb
--- /dev/null
+++ b/tempest/manager.py
@@ -0,0 +1,233 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+
+# Default client libs
+import novaclient.client
+import glance.client
+
+import tempest.config
+from tempest import exceptions
+# Tempest REST Fuzz testing client libs
+from tempest.services.network.json import network_client
+from tempest.services.nova.json import images_client
+from tempest.services.nova.json import flavors_client
+from tempest.services.nova.json import servers_client
+from tempest.services.nova.json import limits_client
+from tempest.services.nova.json import extensions_client
+from tempest.services.nova.json import security_groups_client
+from tempest.services.nova.json import floating_ips_client
+from tempest.services.nova.json import keypairs_client
+from tempest.services.nova.json import volumes_client
+from tempest.services.nova.json import console_output_client
+
+NetworkClient = network_client.NetworkClient
+ImagesClient = images_client.ImagesClient
+FlavorsClient = flavors_client.FlavorsClient
+ServersClient = servers_client.ServersClient
+LimitsClient = limits_client.LimitsClient
+ExtensionsClient = extensions_client.ExtensionsClient
+SecurityGroupsClient = security_groups_client.SecurityGroupsClient
+FloatingIPsClient = floating_ips_client.FloatingIPsClient
+KeyPairsClient = keypairs_client.KeyPairsClient
+VolumesClient = volumes_client.VolumesClient
+ConsoleOutputsClient = console_output_client.ConsoleOutputsClient
+
+LOG = logging.getLogger(__name__)
+
+
+class Manager(object):
+
+    """
+    Base manager class
+
+    Manager objects are responsible for providing a configuration object
+    and a client object for a test case to use in performing actions.
+    """
+
+    def __init__(self):
+        self.config = tempest.config.TempestConfig()
+        self.client = None
+
+
+class DefaultClientManager(Manager):
+
+    """
+    Manager class that indicates the client provided by the manager
+    is the default Python client that an OpenStack API provides.
+    """
+    pass
+
+
+class FuzzClientManager(Manager):
+
+    """
+    Manager class that indicates the client provided by the manager
+    is a fuzz-testing client that Tempest contains. These fuzz-testing
+    clients are used to be able to throw random or invalid data at
+    an endpoint and check for appropriate error messages returned
+    from the endpoint.
+    """
+    pass
+
+
+class ComputeDefaultClientManager(DefaultClientManager):
+
+    """
+    Manager that provides the default python-novaclient client object
+    to access the OpenStack Compute API.
+    """
+
+    NOVACLIENT_VERSION = '2'
+
+    def __init__(self):
+        super(ComputeDefaultClientManager, self).__init__()
+        username = self.config.compute.username
+        password = self.config.compute.password
+        tenant_name = self.config.compute.tenant_name
+
+        if None in (username, password, tenant_name):
+            msg = ("Missing required credentials. "
+                   "username: %(username)s, password: %(password)s, "
+                   "tenant_name: %(tenant_name)s") % locals()
+            raise exceptions.InvalidConfiguration(msg)
+
+        # Novaclient adds a /tokens/ part to the auth URL automatically
+        auth_url = self.config.identity.auth_url.rstrip('tokens')
+
+        client_args = (username, password, tenant_name, auth_url)
+
+        # Create our default Nova client to use in testing
+        self.client = novaclient.client.Client(self.NOVACLIENT_VERSION,
+                        *client_args,
+                        service_type=self.config.compute.catalog_type)
+
+
+class GlanceDefaultClientManager(DefaultClientManager):
+    """
+    Manager that provides the default glance client object to access
+    the OpenStack Images API
+    """
+    def __init__(self):
+        super(GlanceDefaultClientManager, self).__init__()
+        host = self.config.images.host
+        port = self.config.images.port
+        strategy = self.config.identity.strategy
+        auth_url = self.config.identity.auth_url
+        username = self.config.images.username
+        password = self.config.images.password
+        tenant_name = self.config.images.tenant_name
+
+        if None in (host, port, username, password, tenant_name):
+            msg = ("Missing required credentials. "
+                    "host:%(host)s, port: %(port)s username: %(username)s, "
+                    "password: %(password)s, "
+                    "tenant_name: %(tenant_name)s") % locals()
+            raise exceptions.InvalidConfiguration(msg)
+        auth_url = self.config.identity.auth_url.rstrip('tokens')
+
+        creds = {'strategy': strategy,
+                 'username': username,
+                 'password': password,
+                 'tenant': tenant_name,
+                 'auth_url': auth_url}
+
+        # Create our default Glance client to use in testing
+        self.client = glance.client.Client(host, port, creds=creds)
+
+
+class ComputeFuzzClientManager(FuzzClientManager):
+
+    """
+    Manager that uses the Tempest REST client that can send
+    random or invalid data at the OpenStack Compute API
+    """
+
+    def __init__(self, username=None, password=None, tenant_name=None):
+        """
+        We allow overriding of the credentials used within the various
+        client classes managed by the Manager object. Left as None, the
+        standard username/password/tenant_name is used.
+
+        :param username: Override of the username
+        :param password: Override of the password
+        :param tenant_name: Override of the tenant name
+        """
+        super(ComputeFuzzClientManager, self).__init__()
+
+        # If no creds are provided, we fall back on the defaults
+        # in the config file for the Compute API.
+        username = username or self.config.compute.username
+        password = password or self.config.compute.password
+        tenant_name = tenant_name or self.config.compute.tenant_name
+
+        if None in (username, password, tenant_name):
+            msg = ("Missing required credentials. "
+                   "username: %(username)s, password: %(password)s, "
+                   "tenant_name: %(tenant_name)s") % locals()
+            raise exceptions.InvalidConfiguration(msg)
+
+        auth_url = self.config.identity.auth_url
+
+        if self.config.identity.strategy == 'keystone':
+            client_args = (self.config, username, password, auth_url,
+                           tenant_name)
+        else:
+            client_args = (self.config, username, password, auth_url)
+
+        self.servers_client = ServersClient(*client_args)
+        self.flavors_client = FlavorsClient(*client_args)
+        self.images_client = ImagesClient(*client_args)
+        self.limits_client = LimitsClient(*client_args)
+        self.extensions_client = ExtensionsClient(*client_args)
+        self.keypairs_client = KeyPairsClient(*client_args)
+        self.security_groups_client = SecurityGroupsClient(*client_args)
+        self.floating_ips_client = FloatingIPsClient(*client_args)
+        self.volumes_client = VolumesClient(*client_args)
+        self.console_outputs_client = ConsoleOutputsClient(*client_args)
+        self.network_client = NetworkClient(*client_args)
+
+
+class ComputeFuzzClientAltManager(Manager):
+
+    """
+    Manager object that uses the alt_XXX credentials for its
+    managed client objects
+    """
+
+    def __init__(self):
+        conf = tempest.config.TempestConfig()
+        super(ComputeFuzzClientAltManager, self).__init__(
+                conf.compute.alt_username,
+                conf.compute.alt_password,
+                conf.compute.alt_tenant_name)
+
+
+class ComputeFuzzClientAdminManager(Manager):
+
+    """
+    Manager object that uses the alt_XXX credentials for its
+    managed client objects
+    """
+
+    def __init__(self):
+        conf = tempest.config.TempestConfig()
+        super(ComputeFuzzClientAdminManager, self).__init__(
+                conf.compute_admin.username,
+                conf.compute_admin.password,
+                conf.compute_admin.tenant_name)
diff --git a/tempest/services/nova/json/servers_client.py b/tempest/services/nova/json/servers_client.py
index b4ea973..78a81e0 100644
--- a/tempest/services/nova/json/servers_client.py
+++ b/tempest/services/nova/json/servers_client.py
@@ -153,7 +153,7 @@
                 message += ' Current status: %s.' % server_status
                 raise exceptions.TimeoutException(message)
 
-    def wait_for_server_termination(self, server_id):
+    def wait_for_server_termination(self, server_id, ignore_error=False):
         """Waits for server to reach termination"""
         start_time = int(time.time())
         while True:
@@ -163,7 +163,7 @@
                 return
 
             server_status = body['status']
-            if server_status == 'ERROR':
+            if server_status == 'ERROR' and not ignore_error:
                 raise exceptions.BuildErrorException
 
             if int(time.time()) - start_time >= self.build_timeout:
diff --git a/tempest/smoke.py b/tempest/smoke.py
new file mode 100644
index 0000000..9383559
--- /dev/null
+++ b/tempest/smoke.py
@@ -0,0 +1,64 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+
+from tempest import test
+
+LOG = logging.getLogger(__name__)
+
+
+class SmokeTest(object):
+
+    """
+    Base test case class mixin for "smoke tests"
+
+    Smoke tests are tests that have the following characteristics:
+
+     * Test basic operations of an API, typically in an order that
+       a regular user would perform those operations
+     * Test only the correct inputs and action paths -- no fuzz or
+       random input data is sent, only valid inputs.
+     * Use only the default client tool for calling an API
+    """
+    pass
+
+
+class ComputeSmokeTest(test.ComputeDefaultClientTest, SmokeTest):
+
+    """
+    Base smoke test case class for OpenStack Compute API (Nova)
+    """
+
+    @classmethod
+    def tearDownClass(cls):
+        # NOTE(jaypipes): Because smoke tests are typically run in a specific
+        # order, and because test methods in smoke tests generally create
+        # resources in a particular order, we destroy resources in the reverse
+        # order in which resources are added to the smoke test class object
+        if not cls.resources:
+            return
+        thing = cls.resources.pop()
+        while True:
+            LOG.debug("Deleting %r from shared resources of %s" %
+                      (thing, cls.__name__))
+            # Resources in novaclient all have a delete() method
+            # which destroys the resource...
+            thing.delete()
+            if not cls.resources:
+                return
+            thing = cls.resources.pop()
diff --git a/tempest/test.py b/tempest/test.py
new file mode 100644
index 0000000..33bb3fb
--- /dev/null
+++ b/tempest/test.py
@@ -0,0 +1,150 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+import time
+
+import unittest2 as unittest
+
+from tempest import manager
+
+LOG = logging.getLogger(__name__)
+
+
+class TestCase(unittest.TestCase):
+
+    """
+    Base test case class for all Tempest tests
+
+    Contains basic setup and convenience methods
+    """
+    manager_class = None
+
+    @classmethod
+    def setUpClass(cls):
+        cls.manager = cls.manager_class()
+        cls.config = cls.manager.config
+        cls.client = cls.manager.client
+        cls.resource_keys = {}
+        cls.resources = []
+
+    def set_resource(self, key, thing):
+        LOG.debug("Adding %r to shared resources of %s" %
+                  (thing, self.__class__.__name__))
+        self.resource_keys[key] = thing
+        self.resources.append(thing)
+
+    def get_resource(self, key):
+        return self.resource_keys[key]
+
+    def remove_resource(self, key):
+        thing = self.resource_keys[key]
+        self.resources.remove(thing)
+        del self.resource_keys[key]
+
+
+class ComputeDefaultClientTest(TestCase):
+
+    """
+    Base test case class for OpenStack Compute API (Nova)
+    that uses the novaclient libs for calling the API.
+    """
+
+    manager_class = manager.ComputeDefaultClientManager
+
+    def status_timeout(self, things, thing_id, expected_status):
+        """
+        Given a thing and an expected status, do a loop, sleeping
+        for a configurable amount of time, checking for the
+        expected status to show. At any time, if the returned
+        status of the thing is ERROR, fail out.
+        """
+        now = time.time()
+        timeout = now + self.config.compute.build_timeout
+        sleep_for = self.config.compute.build_interval
+        while now < timeout:
+            # python-novaclient has resources available to its client
+            # that all implement a get() method taking an identifier
+            # for the singular resource to retrieve.
+            thing = things.get(thing_id)
+            new_status = thing.status
+            if new_status == 'ERROR':
+                self.fail("%s failed to get to expected status."
+                          "In ERROR state."
+                          % thing)
+            elif new_status == expected_status:
+                return  # All good.
+            LOG.debug("Waiting for %s to get to %s status. "
+                      "Currently in %s status",
+                      thing, expected_status, new_status)
+            LOG.debug("Sleeping for %d seconds", sleep_for)
+        self.fail("Timed out waiting for thing %s to become %s"
+                  % (thing_id, expected_status))
+
+
+class ComputeFuzzClientTest(TestCase):
+
+    """
+    Base test case class for OpenStack Compute API (Nova)
+    that uses the Tempest REST fuzz client libs for calling the API.
+    """
+
+    manager_class = manager.ComputeFuzzClientManager
+
+    def status_timeout(self, client_get_method, thing_id, expected_status):
+        """
+        Given a method to get a resource and an expected status, do a loop,
+        sleeping for a configurable amount of time, checking for the
+        expected status to show. At any time, if the returned
+        status of the thing is ERROR, fail out.
+
+        :param client_get_method: The callable that will retrieve the thing
+                                  with ID :param:thing_id
+        :param thing_id: The ID of the thing to get
+        :param expected_status: String value of the expected status of the
+                                thing that we are looking for.
+
+        :code ..
+
+            Usage:
+
+            def test_some_server_action(self):
+                client = self.servers_client
+                resp, server = client.create_server('random_server')
+                self.status_timeout(client.get_server, server['id'], 'ACTIVE')
+        """
+        now = time.time()
+        timeout = now + self.config.compute.build_timeout
+        sleep_for = self.config.compute.build_interval
+        while now < timeout:
+            # Tempest REST client has resources available to its client
+            # that all implement a various get_$resource() methods taking
+            # an identifier for the singular resource to retrieve.
+            thing = client_get_method(thing_id)
+            new_status = thing['status']
+            if new_status == 'ERROR':
+                self.fail("%s failed to get to expected status."
+                          "In ERROR state."
+                          % thing)
+            elif new_status == expected_status:
+                return  # All good.
+            LOG.debug("Waiting for %s to get to %s status. "
+                      "Currently in %s status",
+                      thing, expected_status, new_status)
+            LOG.debug("Sleeping for %d seconds", sleep_for)
+        self.fail("Timed out waiting for thing %s to become %s"
+                  % (thing_id, expected_status))
diff --git a/tempest/tests/__init__.py b/tempest/tests/__init__.py
index e69de29..6a0b9cd 100644
--- a/tempest/tests/__init__.py
+++ b/tempest/tests/__init__.py
@@ -0,0 +1,16 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
diff --git a/tempest/tests/compute/__init__.py b/tempest/tests/compute/__init__.py
index 2fe8c21..7396833 100644
--- a/tempest/tests/compute/__init__.py
+++ b/tempest/tests/compute/__init__.py
@@ -28,6 +28,7 @@
 CREATE_IMAGE_ENABLED = CONFIG.compute.create_image_enabled
 RESIZE_AVAILABLE = CONFIG.compute.resize_available
 CHANGE_PASSWORD_AVAILABLE = CONFIG.compute.change_password_available
+WHITEBOX_ENABLED = CONFIG.compute.whitebox_enabled
 DISK_CONFIG_ENABLED = False
 FLAVOR_EXTRA_DATA_ENABLED = False
 MULTI_USER = False
diff --git a/tempest/tests/compute/test_images_whitebox.py b/tempest/tests/compute/test_images_whitebox.py
new file mode 100644
index 0000000..f371fe4
--- /dev/null
+++ b/tempest/tests/compute/test_images_whitebox.py
@@ -0,0 +1,168 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from nose.plugins.attrib import attr
+
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+from tempest.tests.compute import base
+from tempest import whitebox
+
+
+@attr(type='whitebox')
+class ImagesWhiteboxTest(whitebox.ComputeWhiteboxTest, base.BaseComputeTest):
+
+    @classmethod
+    def setUpClass(cls):
+        super(ImagesWhiteboxTest, cls).setUpClass()
+        cls.client = cls.images_client
+        cls.connection, cls.meta = cls.get_db_handle_and_meta()
+        cls.shared_server = cls.create_server()
+        cls.image_ids = []
+
+    @classmethod
+    def tearDownClass(cls):
+        """Terminate test instances created after a test is executed"""
+
+        for server in cls.servers:
+            cls.update_state(server['id'], "active", None)
+            resp, body = cls.servers_client.delete_server(server['id'])
+            if resp['status'] == '204':
+                cls.servers.remove(server)
+                cls.servers_client.wait_for_server_termination(server['id'])
+
+        for image_id in cls.image_ids:
+            cls.client.delete_image(image_id)
+            cls.image_ids.remove(image_id)
+
+    @classmethod
+    def update_state(self, server_id, vm_state, task_state, deleted=0):
+        """Update states of an instance in database for validation"""
+        if not task_state:
+            task_state = "NULL"
+
+        instances = self.meta.tables['instances']
+        stmt = instances.update().where(instances.c.uuid == server_id).values(
+                                                               deleted=deleted,
+                                                             vm_state=vm_state,
+                                                         task_state=task_state)
+
+        self.connection.execute(stmt, autocommit=True)
+
+    def _test_create_image_409_base(self, vm_state, task_state, deleted=0):
+        """Base method for create image tests based on vm and task states"""
+        try:
+            self.update_state(self.shared_server['id'], vm_state,
+                              task_state, deleted)
+
+            image_name = rand_name('snap-')
+            self.assertRaises(exceptions.Duplicate,
+                              self.client.create_image,
+                              self.shared_server['id'], image_name)
+        except:
+            self.fail("Should not allow create image when vm_state=%s and "
+                      "task_state=%s" % (vm_state, task_state))
+        finally:
+            self.update_state(self.shared_server['id'], 'active', None)
+
+    def test_create_image_when_vm_eq_building_task_eq_scheduling(self):
+        """409 error when instance states are building,scheduling"""
+        self._test_create_image_409_base("building", "scheduling")
+
+    def test_create_image_when_vm_eq_building_task_eq_networking(self):
+        """409 error when instance states are building,networking"""
+        self._test_create_image_409_base("building", "networking")
+
+    def test_create_image_when_vm_eq_building_task_eq_bdm(self):
+        """409 error when instance states are building,block_device_mapping"""
+        self._test_create_image_409_base("building", "block_device_mapping")
+
+    def test_create_image_when_vm_eq_building_task_eq_spawning(self):
+        """409 error when instance states are building,spawning"""
+        self._test_create_image_409_base("building", "spawning")
+
+    def test_create_image_when_vm_eq_active_task_eq_image_backup(self):
+        """409 error when instance states are active,image_backup"""
+        self._test_create_image_409_base("active", "image_backup")
+
+    def test_create_image_when_vm_eq_resized_task_eq_resize_prep(self):
+        """409 error when instance states are resized,resize_prep"""
+        self._test_create_image_409_base("resized", "resize_prep")
+
+    def test_create_image_when_vm_eq_resized_task_eq_resize_migrating(self):
+        """409 error when instance states are resized,resize_migrating"""
+        self._test_create_image_409_base("resized", "resize_migrating")
+
+    def test_create_image_when_vm_eq_resized_task_eq_resize_migrated(self):
+        """409 error when instance states are resized,resize_migrated"""
+        self._test_create_image_409_base("resized", "resize_migrated")
+
+    def test_create_image_when_vm_eq_resized_task_eq_resize_finish(self):
+        """409 error when instance states are resized,resize_finish"""
+        self._test_create_image_409_base("resized", "resize_finish")
+
+    def test_create_image_when_vm_eq_resized_task_eq_resize_reverting(self):
+        """409 error when instance states are resized,resize_reverting"""
+        self._test_create_image_409_base("resized", "resize_reverting")
+
+    def test_create_image_when_vm_eq_resized_task_eq_resize_confirming(self):
+        """409 error when instance states are resized,resize_confirming"""
+        self._test_create_image_409_base("resized", "resize_confirming")
+
+    def test_create_image_when_vm_eq_active_task_eq_resize_verify(self):
+        """409 error when instance states are active,resize_verify"""
+        self._test_create_image_409_base("active", "resize_verify")
+
+    def test_create_image_when_vm_eq_active_task_eq_updating_password(self):
+        """409 error when instance states are active,updating_password"""
+        self._test_create_image_409_base("active", "updating_password")
+
+    def test_create_image_when_vm_eq_active_task_eq_rebuilding(self):
+        """409 error when instance states are active,rebuilding"""
+        self._test_create_image_409_base("active", "rebuilding")
+
+    def test_create_image_when_vm_eq_active_task_eq_rebooting(self):
+        """409 error when instance states are active,rebooting"""
+        self._test_create_image_409_base("active", "rebooting")
+
+    def test_create_image_when_vm_eq_building_task_eq_deleting(self):
+        """409 error when instance states are building,deleting"""
+        self._test_create_image_409_base("building", "deleting")
+
+    def test_create_image_when_vm_eq_active_task_eq_deleting(self):
+        """409 error when instance states are active,deleting"""
+        self._test_create_image_409_base("active", "deleting")
+
+    def test_create_image_when_vm_eq_error_task_eq_building(self):
+        """409 error when instance states are error,building"""
+        self._test_create_image_409_base("error", "building")
+
+    def test_create_image_when_vm_eq_error_task_eq_none(self):
+        """409 error when instance states are error,None"""
+        self._test_create_image_409_base("error", None)
+
+    def test_create_image_when_vm_eq_deleted_task_eq_none(self):
+        """409 error when instance states are deleted,None"""
+        self._test_create_image_409_base("deleted", None)
+
+    def test_create_image_when_vm_eq_resized_task_eq_none(self):
+        """409 error when instance states are resized,None"""
+        self._test_create_image_409_base("resized", None)
+
+    def test_create_image_when_vm_eq_error_task_eq_resize_prep(self):
+        """409 error when instance states are error,resize_prep"""
+        self._test_create_image_409_base("error", "resize_prep")
diff --git a/tempest/tests/compute/test_server_advanced_ops.py b/tempest/tests/compute/test_server_advanced_ops.py
new file mode 100644
index 0000000..d6962f4
--- /dev/null
+++ b/tempest/tests/compute/test_server_advanced_ops.py
@@ -0,0 +1,82 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+
+import nose
+
+from tempest.common.utils.data_utils import rand_name
+from tempest import test
+
+LOG = logging.getLogger(__name__)
+
+
+class TestServerAdvancedOps(test.ComputeDefaultClientTest):
+
+    """
+    This test case stresses some advanced server instance operations:
+
+     * Resizing an instance
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        super(TestServerAdvancedOps, cls).setUpClass()
+
+        if not cls.config.compute.resize_available:
+            msg = "Skipping test - resize not available on this host"
+            raise nose.SkipTest(msg)
+
+        resize_flavor = cls.config.compute.flavor_ref_alt
+
+        if resize_flavor == cls.config.compute.flavor_ref:
+            msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
+            raise nose.SkipTest(msg)
+
+    @classmethod
+    def tearDownClass(cls):
+        for thing in cls.resources:
+            thing.delete()
+
+    def test_resize_server_confirm(self):
+        # We create an instance for use in this test
+        i_name = rand_name('instance')
+        flavor_id = self.config.compute.flavor_ref
+        base_image_id = self.config.compute.image_ref
+        self.instance = self.client.servers.create(
+                i_name, base_image_id, flavor_id)
+        try:
+            self.assertEqual(self.instance.name, i_name)
+            self.set_resource('instance', self.instance)
+        except AttributeError:
+            self.fail("Instance not successfully created.")
+
+        self.assertEqual(self.instance.status, 'BUILD')
+        instance_id = self.get_resource('instance').id
+        self.status_timeout(self.client.servers, instance_id, 'ACTIVE')
+        instance = self.get_resource('instance')
+        instance_id = instance.id
+        resize_flavor = self.config.compute.flavor_ref_alt
+
+        LOG.debug("Resizing instance %s from flavor %s to flavor %s",
+                  instance.id, instance.flavor, resize_flavor)
+        instance.resize(resize_flavor)
+        self.status_timeout(self.client.servers, instance_id, 'VERIFY_RESIZE')
+
+        LOG.debug("Confirming resize of instance %s", instance_id)
+        instance.confirm_resize()
+        self.status_timeout(self.client.servers, instance_id, 'ACTIVE')
diff --git a/tempest/tests/compute/test_server_basic_ops.py b/tempest/tests/compute/test_server_basic_ops.py
new file mode 100644
index 0000000..8af5212
--- /dev/null
+++ b/tempest/tests/compute/test_server_basic_ops.py
@@ -0,0 +1,140 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+
+from tempest.common.utils.data_utils import rand_name
+from tempest import smoke
+
+LOG = logging.getLogger(__name__)
+
+
+class TestServerBasicOps(smoke.ComputeSmokeTest):
+
+    """
+    This smoke test case follows this basic set of operations:
+
+     * Create a keypair for use in launching an instance
+     * Create a security group to control network access in instance
+     * Add simple permissive rules to the security group
+     * Launch an instance
+     * Pause/unpause the instance
+     * Suspend/resume the instance
+     * Terminate the instance
+    """
+
+    def test_001_create_keypair(self):
+        kp_name = rand_name('keypair-smoke')
+        self.keypair = self.client.keypairs.create(kp_name)
+        try:
+            self.assertEqual(self.keypair.id, kp_name)
+            self.set_resource('keypair', self.keypair)
+        except AttributeError:
+            self.fail("Keypair object not successfully created.")
+
+    def test_002_create_security_group(self):
+        sg_name = rand_name('secgroup-smoke')
+        sg_desc = sg_name + " description"
+        self.secgroup = self.client.security_groups.create(sg_name, sg_desc)
+        try:
+            self.assertEqual(self.secgroup.name, sg_name)
+            self.assertEqual(self.secgroup.description, sg_desc)
+            self.set_resource('secgroup', self.secgroup)
+        except AttributeError:
+            self.fail("SecurityGroup object not successfully created.")
+
+        # Add rules to the security group
+        rulesets = [
+            {
+                'ip_protocol': 'tcp',
+                'from_port': 1,
+                'to_port': 65535,
+                'cidr': '0.0.0.0/0',
+                'group_id': self.secgroup.id
+            },
+            {
+                'ip_protocol': 'icmp',
+                'from_port': -1,
+                'to_port': -1,
+                'cidr': '0.0.0.0/0',
+                'group_id': self.secgroup.id
+            }
+        ]
+        for ruleset in rulesets:
+            try:
+                self.client.security_group_rules.create(
+                                                   self.secgroup.id, **ruleset)
+            except:
+                self.fail("Failed to create rule in security group.")
+
+    def test_003_boot_instance(self):
+        i_name = rand_name('instance')
+        flavor_id = self.config.compute.flavor_ref
+        base_image_id = self.config.compute.image_ref
+        create_kwargs = {
+            'key_name': self.get_resource('keypair').id
+        }
+        self.instance = self.client.servers.create(
+                i_name, base_image_id, flavor_id, **create_kwargs)
+        try:
+            self.assertEqual(self.instance.name, i_name)
+            self.set_resource('instance', self.instance)
+        except AttributeError:
+            self.fail("Instance not successfully created.")
+
+        self.assertEqual(self.instance.status, 'BUILD')
+
+    def test_004_wait_on_active(self):
+        instance_id = self.get_resource('instance').id
+        self.status_timeout(self.client.servers, instance_id, 'ACTIVE')
+
+    def test_005_pause_server(self):
+        instance = self.get_resource('instance')
+        instance_id = instance.id
+        LOG.debug("Pausing instance %s. Current status: %s",
+                  instance_id, instance.status)
+        instance.pause()
+        self.status_timeout(self.client.servers, instance_id, 'PAUSED')
+
+    def test_006_unpause_server(self):
+        instance = self.get_resource('instance')
+        instance_id = instance.id
+        LOG.debug("Unpausing instance %s. Current status: %s",
+                  instance_id, instance.status)
+        instance.unpause()
+        self.status_timeout(self.client.servers, instance_id, 'ACTIVE')
+
+    def test_007_suspend_server(self):
+        instance = self.get_resource('instance')
+        instance_id = instance.id
+        LOG.debug("Suspending instance %s. Current status: %s",
+                  instance_id, instance.status)
+        instance.suspend()
+        self.status_timeout(self.client.servers, instance_id, 'SUSPENDED')
+
+    def test_008_resume_server(self):
+        instance = self.get_resource('instance')
+        instance_id = instance.id
+        LOG.debug("Resuming instance %s. Current status: %s",
+                  instance_id, instance.status)
+        instance.resume()
+        self.status_timeout(self.client.servers, instance_id, 'ACTIVE')
+
+    def test_099_terminate_instance(self):
+        instance = self.get_resource('instance')
+        instance.delete()
+        self.remove_resource('instance')
diff --git a/tempest/tests/compute/test_servers_whitebox.py b/tempest/tests/compute/test_servers_whitebox.py
new file mode 100644
index 0000000..b4edab6
--- /dev/null
+++ b/tempest/tests/compute/test_servers_whitebox.py
@@ -0,0 +1,245 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import exceptions
+from tempest import whitebox
+from tempest.tests.identity.base import BaseIdentityAdminTest
+from nose.plugins.attrib import attr
+
+
+@attr(type='whitebox')
+class ServersWhiteboxTest(whitebox.ComputeWhiteboxTest):
+
+    @classmethod
+    def setUpClass(cls):
+        super(ServersWhiteboxTest, cls).setUpClass()
+        BaseIdentityAdminTest.setUpClass()
+        cls.client = cls.servers_client
+        cls.img_client = cls.images_client
+        cls.admin_client = BaseIdentityAdminTest.client
+
+        cls.connection, cls.meta = cls.get_db_handle_and_meta()
+
+        resp, tenants = cls.admin_client.list_tenants()
+        cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
+                        cls.config.compute.tenant_name][0]
+
+        cls.shared_server = cls.create_server()
+
+    def tearDown(cls):
+        for server in cls.servers:
+            try:
+                cls.client.delete_server(server['id'])
+            except exceptions.NotFound:
+                continue
+
+    def test_create_server_vcpu_quota_full(self):
+        """Disallow server creation when tenant's vcpu quota is full"""
+        quotas = self.meta.tables['quotas']
+        stmt = quotas.select().where(
+                              quotas.c.project_id == self.tenant_id).where(
+                              quotas.c.resource == 'cores')
+        result = self.connection.execute(stmt).first()
+
+        # Set vcpu quota for tenant if not already set
+        if not result:
+            cores_hard_limit = 2
+            stmt = quotas.insert().values(deleted=0,
+                                project_id=self.tenant_id, resource='cores',
+                                hard_limit=cores_hard_limit)
+
+            self.connection.execute(stmt, autocommit=True)
+        else:
+            cores_hard_limit = result.hard_limit
+
+        # Create servers assuming 1 VCPU per instance i.e flavor_id=1
+        try:
+            for count in range(cores_hard_limit + 1):
+                self.create_server()
+        except exceptions.OverLimit:
+            pass
+        else:
+            self.fail("Could create servers over the VCPU quota limit")
+        finally:
+            stmt = quotas.delete()
+            self.connection.execute(stmt, autocommit=True)
+
+    def test_create_server_memory_quota_full(self):
+        """Disallow server creation when tenant's memory quota is full"""
+        quotas = self.meta.tables['quotas']
+        stmt = quotas.select().where(
+                              quotas.c.project_id == self.tenant_id).where(
+                              quotas.c.resource == 'ram')
+        result = self.connection.execute(stmt).first()
+
+        # Set memory quota for tenant if not already set
+        if not result:
+            ram_hard_limit = 1024
+            stmt = quotas.insert().values(deleted=0,
+                                project_id=self.tenant_id, resource='ram',
+                                hard_limit=ram_hard_limit)
+
+            self.connection.execute(stmt, autocommit=True)
+        else:
+            ram_hard_limit = result.hard_limit
+
+        try:
+            # Set a hard range of 3 servers for reaching the RAM quota
+            for count in range(3):
+                self.create_server()
+        except exceptions.OverLimit:
+            pass
+        else:
+            self.fail("Could create servers over the RAM quota limit")
+        finally:
+            stmt = quotas.delete()
+            self.connection.execute(stmt, autocommit=True)
+
+    def update_state(self, server_id, vm_state, task_state, deleted=0):
+        """Update states of an instance in database for validation"""
+        if not task_state:
+            task_state = 'NULL'
+
+        instances = self.meta.tables['instances']
+        stmt = instances.update().where(instances.c.uuid == server_id).values(
+                                                               deleted=deleted,
+                                                             vm_state=vm_state,
+                                                         task_state=task_state)
+        self.connection.execute(stmt, autocommit=True)
+
+    def _test_delete_server_base(self, vm_state, task_state):
+        """
+        Base method for delete server tests based on vm and task states.
+        Validates for successful server termination.
+        """
+        try:
+            server = self.create_server()
+            self.update_state(server['id'], vm_state, task_state)
+
+            resp, body = self.client.delete_server(server['id'])
+            self.assertEqual('204', resp['status'])
+            self.client.wait_for_server_termination(server['id'],
+                                                    ignore_error=True)
+
+            instances = self.meta.tables['instances']
+            stmt = instances.select().where(instances.c.uuid == server['id'])
+            result = self.connection.execute(stmt).first()
+
+            self.assertEqual(1, result.deleted)
+            self.assertEqual('deleted', result.vm_state)
+            self.assertEqual(None, result.task_state)
+        except:
+            self.fail("Should be able to delete a server when vm_state=%s and "
+                      "task_state=%s" % (vm_state, task_state))
+
+    def _test_delete_server_403_base(self, vm_state, task_state):
+        """
+        Base method for delete server tests based on vm and task states.
+        Validates for 403 error code.
+        """
+        try:
+            self.update_state(self.shared_server['id'], vm_state, task_state)
+
+            self.assertRaises(exceptions.Unauthorized,
+                           self.client.delete_server, self.shared_server['id'])
+        except:
+            self.fail("Should not allow delete server when vm_state=%s and "
+                    "task_state=%s" % (vm_state, task_state))
+        finally:
+            self.update_state(self.shared_server['id'], 'active', None)
+
+    def test_delete_server_when_vm_eq_building_task_eq_networking(self):
+        """Delete server when instance states are building,networking"""
+        self._test_delete_server_base('building', 'networking')
+
+    def test_delete_server_when_vm_eq_building_task_eq_bdm(self):
+        """
+        Delete server when instance states are building,block device mapping
+        """
+        self._test_delete_server_base('building', 'block_device_mapping')
+
+    def test_delete_server_when_vm_eq_building_task_eq_spawning(self):
+        """Delete server when instance states are building,spawning"""
+        self._test_delete_server_base('building', 'spawning')
+
+    def test_delete_server_when_vm_eq_active_task_eq_image_backup(self):
+        """Delete server when instance states are active,image_backup"""
+        self._test_delete_server_base('active', 'image_backup')
+
+    def test_delete_server_when_vm_eq_active_task_eq_rebuilding(self):
+        """Delete server when instance states are active,rebuilding"""
+        self._test_delete_server_base('active', 'rebuilding')
+
+    def test_delete_server_when_vm_eq_error_task_eq_spawning(self):
+        """Delete server when instance states are error,spawning"""
+        self._test_delete_server_base('error', 'spawning')
+
+    def test_delete_server_when_vm_eq_resized_task_eq_resize_prep(self):
+        """Delete server when instance states are resized,resize_prep"""
+        self._test_delete_server_403_base('resized', 'resize_prep')
+
+    def test_delete_server_when_vm_eq_resized_task_eq_resize_migrating(self):
+        """Delete server when instance states are resized,resize_migrating"""
+        self._test_delete_server_403_base('resized', 'resize_migrating')
+
+    def test_delete_server_when_vm_eq_resized_task_eq_resize_migrated(self):
+        """Delete server when instance states are resized,resize_migrated"""
+        self._test_delete_server_403_base('resized', 'resize_migrated')
+
+    def test_delete_server_when_vm_eq_resized_task_eq_resize_finish(self):
+        """Delete server when instance states are resized,resize_finish"""
+        self._test_delete_server_403_base('resized', 'resize_finish')
+
+    def test_delete_server_when_vm_eq_resized_task_eq_resize_reverting(self):
+        """Delete server when instance states are resized,resize_reverting"""
+        self._test_delete_server_403_base('resized', 'resize_reverting')
+
+    def test_delete_server_when_vm_eq_resized_task_eq_resize_confirming(self):
+        """Delete server when instance states are resized,resize_confirming"""
+        self._test_delete_server_403_base('resized', 'resize_confirming')
+
+    def test_delete_server_when_vm_eq_active_task_eq_resize_verify(self):
+        """Delete server when instance states are active,resize_verify"""
+        self._test_delete_server_base('active', 'resize_verify')
+
+    def test_delete_server_when_vm_eq_active_task_eq_rebooting(self):
+        """Delete server when instance states are active,rebooting"""
+        self._test_delete_server_base('active', 'rebooting')
+
+    def test_delete_server_when_vm_eq_building_task_eq_deleting(self):
+        """Delete server when instance states are building,deleting"""
+        self._test_delete_server_base('building', 'deleting')
+
+    def test_delete_server_when_vm_eq_active_task_eq_deleting(self):
+        """Delete server when instance states are active,deleting"""
+        self._test_delete_server_base('active', 'deleting')
+
+    def test_delete_server_when_vm_eq_error_task_eq_none(self):
+        """Delete server when instance states are error,None"""
+        self._test_delete_server_base('error', None)
+
+    def test_delete_server_when_vm_eq_resized_task_eq_none(self):
+        """Delete server when instance states are resized,None"""
+        self._test_delete_server_403_base('resized', None)
+
+    def test_delete_server_when_vm_eq_error_task_eq_resize_prep(self):
+        """Delete server when instance states are error,resize_prep"""
+        self._test_delete_server_base('error', 'resize_prep')
+
+    def test_delete_server_when_vm_eq_error_task_eq_error(self):
+        """Delete server when instance states are error,error"""
+        self._test_delete_server_base('error', 'error')
diff --git a/tempest/whitebox.py b/tempest/whitebox.py
new file mode 100644
index 0000000..5978e92
--- /dev/null
+++ b/tempest/whitebox.py
@@ -0,0 +1,170 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+import os
+import sys
+import shlex
+import subprocess
+
+import nose
+from sqlalchemy import create_engine, MetaData
+
+from tempest.common.utils.data_utils import rand_name
+from tempest.common.ssh import Client
+from tempest import exceptions
+from tempest import test
+from tempest.tests import compute
+
+LOG = logging.getLogger(__name__)
+
+
+class WhiteboxTest(object):
+
+    """
+    Base test case class mixin for "whitebox tests"
+
+    Whitebox tests are tests that have the following characteristics:
+
+     * Test common and advanced operations against a set of servers
+     * Use a client that it is possible to send random or bad data with
+     * SSH into either a host or a guest in order to validate server state
+     * May execute SQL queries directly against internal databases to verify
+       the state of data records
+    """
+    pass
+
+
+class ComputeWhiteboxTest(test.ComputeFuzzClientTest, WhiteboxTest):
+
+    """
+    Base smoke test case class for OpenStack Compute API (Nova)
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        if not compute.WHITEBOX_ENABLED:
+            msg = "Whitebox testing disabled"
+            raise nose.SkipTest(msg)
+
+        super(ComputeWhiteboxTest, cls).setUpClass()
+
+        # Add some convenience attributes that tests use...
+        cls.nova_dir = cls.config.compute.source_dir
+        cls.compute_bin_dir = cls.config.compute.bin_dir
+        cls.compute_config_path = cls.config.compute.config_path
+        cls.servers_client = cls.manager.servers_client
+        cls.images_client = cls.manager.images_client
+        cls.flavors_client = cls.manager.flavors_client
+        cls.extensions_client = cls.manager.extensions_client
+        cls.floating_ips_client = cls.manager.floating_ips_client
+        cls.keypairs_client = cls.manager.keypairs_client
+        cls.security_groups_client = cls.manager.security_groups_client
+        cls.console_outputs_client = cls.manager.console_outputs_client
+        cls.limits_client = cls.manager.limits_client
+        cls.volumes_client = cls.manager.volumes_client
+        cls.build_interval = cls.config.compute.build_interval
+        cls.build_timeout = cls.config.compute.build_timeout
+        cls.ssh_user = cls.config.compute.ssh_user
+        cls.image_ref = cls.config.compute.image_ref
+        cls.image_ref_alt = cls.config.compute.image_ref_alt
+        cls.flavor_ref = cls.config.compute.flavor_ref
+        cls.flavor_ref_alt = cls.config.compute.flavor_ref_alt
+        cls.servers = []
+
+    @classmethod
+    def tearDownClass(cls):
+        # NOTE(jaypipes): Tests often add things in a particular order
+        # so we destroy resources in the reverse order in which resources
+        # are added to the test class object
+        if not cls.resources:
+            return
+        thing = cls.resources.pop()
+        while True:
+            LOG.debug("Deleting %r from shared resources of %s" %
+                      (thing, cls.__name__))
+            # Resources in novaclient all have a delete() method
+            # which destroys the resource...
+            thing.delete()
+            if not cls.resources:
+                return
+            thing = cls.resources.pop()
+
+    @classmethod
+    def create_server(cls, image_id=None):
+        """Wrapper utility that returns a test server"""
+        server_name = rand_name(cls.__name__ + "-instance")
+        flavor = cls.flavor_ref
+        if not image_id:
+            image_id = cls.image_ref
+
+        resp, server = cls.servers_client.create_server(
+                                                server_name, image_id, flavor)
+        cls.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
+        cls.servers.append(server)
+        return server
+
+    @classmethod
+    def get_db_handle_and_meta(cls, database='nova'):
+        """Return a connection handle and metadata of an OpenStack database"""
+        engine_args = {"echo": False,
+                       "convert_unicode": True,
+                       "pool_recycle": 3600
+                       }
+
+        try:
+            engine = create_engine(cls.config.compute.db_uri, **engine_args)
+            connection = engine.connect()
+            meta = MetaData()
+            meta.reflect(bind=engine)
+
+        except Exception, e:
+            raise exceptions.SQLException(message=e)
+
+        return connection, meta
+
+    def nova_manage(self, category, action, params):
+        """Executes nova-manage command for the given action"""
+
+        nova_manage_path = os.path.join(self.compute_bin_dir, 'nova-manage')
+        cmd = ' '.join([nova_manage_path, category, action, params])
+
+        if self.deploy_mode == 'devstack-local':
+            if not os.path.isdir(self.nova_dir):
+                sys.exit("Cannot find Nova source directory: %s" %
+                         self.nova_dir)
+
+            cmd = shlex.split(cmd)
+            result = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+
+        #Todo(rohitk): Need to define host connection parameters in config
+        else:
+            client = self.get_ssh_connection(self.config.whitebox.api_host,
+                                            self.config.whitebox.api_user,
+                                            self.config.whitebox.api_passwd)
+            result = client.exec_command(cmd)
+
+        return result
+
+    def get_ssh_connection(self, host, username, password):
+        """Create an SSH connection object to a host"""
+        ssh_timeout = self.config.compute.ssh_timeout
+        ssh_client = Client(host, username, password, ssh_timeout)
+        if not ssh_client.test_connection_auth():
+            raise exceptions.SSHTimeout()
+        else:
+            return ssh_client
diff --git a/tools/pip-requires b/tools/pip-requires
index e3ee18b..2e1dc96 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -1,9 +1,5 @@
 anyjson
 nose
-argparse
 httplib2>=0.7.0
 pika
-dnspython
-ipython
 unittest2
-paramiko
diff --git a/tools/test-requires b/tools/test-requires
index 77a6614..ecf7c3e 100644
--- a/tools/test-requires
+++ b/tools/test-requires
@@ -1,2 +1,5 @@
 pep8>=0.5.0
 pylint==0.19
+# Needed for whitebox testing
+paramiko
+sqlalchemy