Merge "Permits a list of values for the "type=" tests attribute"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index b64b047..7920ab5 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -79,6 +79,9 @@
 # Name of a user used to authenticated to an instance
 ssh_user = cirros
 
+# Visible fixed network name
+fixed_network_name = private
+
 # Network id used for SSH (public, private, etc)
 network_for_ssh = private
 
diff --git a/stress/README.rst b/stress/README.rst
deleted file mode 100644
index d935289..0000000
--- a/stress/README.rst
+++ /dev/null
@@ -1,68 +0,0 @@
-Quanta Research Cambridge OpenStack Stress Test System

-======================================================

-

-Nova is a distributed, asynchronous system that is prone to race condition

-bugs. These bugs will not be easily found during

-functional testing but will be encountered by users in large deployments in a

-way that is hard to debug. The stress test tries to cause these bugs to happen

-in a more controlled environment.

-

-The basic idea of the test is that there are a number of actions, roughly

-corresponding to the Compute API, that are fired pseudo-randomly at a nova 

-cluster as fast as possible. These actions consist of what to do, how to

-verify success, and a state filter to make sure that the operation makes sense.

-For example, if the action is to reboot a server and none are active, nothing

-should be done. A test case is a set of actions to be performed and the

-probability that each action should be selected. There are also parameters

-controlling rate of fire and stuff like that.

-

-This test framework is designed to stress test a Nova cluster. Hence,

-you must have a working Nova cluster with rate limiting turned off.

-

-Environment

-------------

-This particular framework assumes your working Nova cluster understands Nova 

-API 2.0. The stress tests can read the logs from the cluster. To enable this

-you have to provide the hostname to call 'nova-manage' and

-the private key and user name for ssh to the cluster in the

-[stress] section of tempest.conf. You also need to provide the

-value of --logdir in nova.conf:

-

-  host_private_key_path=<path to private ssh key>

-  host_admin_user=<name of user for ssh command>

-  nova_logdir=<value of --logdir in nova.conf>

-  controller=<hostname for calling nova-manage>

-  max_instances=<limit on instances that will be created>

-

-Also, make sure to set

-

-log_level=CRITICAL

-

-so that the API client does not log failed calls which are expected while

-running stress tests.

-

-The stress test needs the top-level tempest directory to be on PYTHONPATH

-if you are not using nosetests to run.

-

-

-Running the sample test

------------------------

-

-To test your installation, do the following (from the tempest directory):

-

-  PYTHONPATH=. python stress/tests/user_script_sample.py

-

-This sample test tries to create a few VMs and kill a few VMs.

-

-

-Additional Tools

-----------------

-

-Sometimes the tests don't finish, or there are failures. In these

-cases, you may want to clean out the nova cluster. We have provided

-some scripts to do this in the ``tools`` subdirectory. To use these

-tools, you will need to install python-novaclient.

-You can then use the following script to destroy any keypairs,

-floating ips, and servers::

-

-stress/tools/nova_destroy_all.py

diff --git a/stress/basher.py b/stress/basher.py
deleted file mode 100644
index e34738f..0000000
--- a/stress/basher.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-"""Class to describe actions to be included in a stress test."""
-
-
-class BasherAction(object):
-    """
-    Used to describe each action that you would like to include in a test run.
-    """
-
-    def __init__(self, test_case, probability, pargs=[], kargs={}):
-        """
-        `test_case`  : the name of the class that implements the action
-        `pargs`      : positional arguments to the constructor of `test_case`
-        `kargs`      : keyword arguments to the constructor of `test_case`
-        `probability`: frequency that each action
-        """
-        self.test_case = test_case
-        self.pargs = pargs
-        self.kargs = kargs
-        self.probability = probability
-
-    def invoke(self, manager, state):
-        """
-        Calls the `run` method of the `test_case`.
-        """
-        return self.test_case.run(manager, state, *self.pargs, **self.kargs)
-
-    def __str__(self):
-        return self.test_case.__class__.__name__
diff --git a/stress/config.py b/stress/config.py
deleted file mode 100755
index 25cb910..0000000
--- a/stress/config.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-
-class StressConfig(object):
-    """Provides configuration information for whitebox stress tests."""
-
-    def __init__(self, conf):
-        self.conf = conf
-
-    @property
-    def host_private_key_path(self):
-        """Path to ssh key for logging into compute nodes."""
-        return self.conf.compute.path_to_private_key
-
-    @property
-    def host_admin_user(self):
-        """Username for logging into compute nodes."""
-        return self.conf.compute.ssh_user
-
-    @property
-    def nova_logdir(self):
-        """Directory containing log files on the compute nodes."""
-        return self.conf.stress.nova_logdir
-
-    @property
-    def controller(self):
-        """Controller host."""
-        return self.conf.stress.controller
-
-    @property
-    def max_instances(self):
-        """Maximum number of instances to create during test."""
-        return self.conf.stress.max_instances
diff --git a/stress/driver.py b/stress/driver.py
deleted file mode 100644
index 533c000..0000000
--- a/stress/driver.py
+++ /dev/null
@@ -1,274 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-"""The entry point for the execution of a workloadTo execute a workload.
-Users pass in a description of the workload and a nova manager object
-to the bash_openstack function call"""
-
-import datetime
-import random
-import time
-import urlparse
-
-from config import StressConfig
-from state import ClusterState
-from state import FloatingIpState
-from state import KeyPairState
-from state import VolumeState
-import stress.utils
-from test_case import logging
-
-from tempest.common.utils.data_utils import rand_name
-
-# setup logging to file
-logging.basicConfig(
-    format='%(asctime)s %(name)-20s %(levelname)-8s %(message)s',
-    datefmt='%m-%d %H:%M:%S',
-    filename="stress.debug.log",
-    filemode="w",
-    level=logging.DEBUG,
-)
-
-# define a Handler which writes INFO messages or higher to the sys.stdout
-_console = logging.StreamHandler()
-_console.setLevel(logging.INFO)
-# set a format which is simpler for console use
-_formatter = logging.Formatter('%(name)-20s: %(levelname)-8s %(message)s')
-# tell the handler to use this format
-_console.setFormatter(_formatter)
-# add the handler to the root logger
-logging.getLogger('').addHandler(_console)
-
-
-def _create_cases(choice_spec):
-    """
-    Generate a workload of tests from workload description
-    """
-    cases = []
-    count = 0
-    for choice in choice_spec:
-        p = choice.probability
-        for i in range(p):
-            cases.append(choice)
-        i = i + p
-        count = count + p
-    assert(count == 100)
-    return cases
-
-
-def _get_compute_nodes(keypath, user, controller):
-    """
-    Returns a list of active compute nodes. List is generated by running
-    nova-manage on the controller.
-    """
-    nodes = []
-    if keypath is None or user is None:
-        return nodes
-    cmd = "nova-manage service list | grep ^nova-compute"
-    lines = stress.utils.ssh(keypath, user, controller, cmd).split('\n')
-    # For example: nova-compute xg11eth0 nova enabled :-) 2011-10-31 18:57:46
-    # This is fragile but there is, at present, no other way to get this info.
-    for line in lines:
-        words = line.split()
-        if len(words) > 0 and words[4] == ":-)":
-            nodes.append(words[1])
-    return nodes
-
-
-def _error_in_logs(keypath, logdir, user, nodes):
-    """
-    Detect errors in the nova log files on the controller and compute nodes.
-    """
-    grep = 'egrep "ERROR\|TRACE" %s/*.log' % logdir
-    for node in nodes:
-        errors = stress.utils.ssh(keypath, user, node, grep, check=False)
-        if len(errors) > 0:
-            logging.error('%s: %s' % (node, errors))
-            return True
-    return False
-
-
-def create_initial_vms(manager, state, count):
-    image = manager.config.compute.image_ref
-    flavor = manager.config.compute.flavor_ref
-    servers = []
-    logging.info('Creating %d vms' % count)
-    for _ in xrange(count):
-        name = rand_name('initial_vm-')
-        _, server = manager.servers_client.create_server(name, image, flavor)
-        servers.append(server)
-    for server in servers:
-        manager.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
-        logging.info('Server Name: %s Id: %s' % (name, server['id']))
-        state.set_instance_state(server['id'], (server, 'ACTIVE'))
-
-
-def create_initial_floating_ips(manager, state, count):
-    logging.info('Creating %d floating ips' % count)
-    for _ in xrange(count):
-        _, ip = manager.floating_ips_client.create_floating_ip()
-        logging.info('Ip: %s' % ip['ip'])
-        state.add_floating_ip(FloatingIpState(ip))
-
-
-def create_initial_keypairs(manager, state, count):
-    logging.info('Creating %d keypairs' % count)
-    for _ in xrange(count):
-        name = rand_name('keypair-')
-        _, keypair = manager.keypairs_client.create_keypair(name)
-        logging.info('Keypair: %s' % name)
-        state.add_keypair(KeyPairState(keypair))
-
-
-def create_initial_volumes(manager, state, count):
-    volumes = []
-    logging.info('Creating %d volumes' % count)
-    for _ in xrange(count):
-        name = rand_name('volume-')
-        _, volume = manager.volumes_client.create_volume(size=1,
-                                                         display_name=name)
-        volumes.append(volume)
-    for volume in volumes:
-        manager.volumes_client.wait_for_volume_status(volume['id'],
-                                                      'available')
-        logging.info('Volume Name: %s Id: %s' % (name, volume['id']))
-        state.add_volume(VolumeState(volume))
-
-
-def bash_openstack(manager,
-                   choice_spec,
-                   **kwargs):
-    """
-    Workload driver. Executes a workload as specified by the `choice_spec`
-    parameter against a nova-cluster.
-
-    `manager`  : Manager object
-    `choice_spec` : list of BasherChoice actions to run on the cluster
-    `kargs`       : keyword arguments to the constructor of `test_case`
-                    `duration`   = how long this test should last (3 sec)
-                    `sleep_time` = time to sleep between actions (in msec)
-                    `test_name`  = human readable workload description
-                                   (default: unnamed test)
-                    `max_vms`    = maximum number of instances to launch
-                                   (default: 32)
-                    `seed`       = random seed (default: None)
-    """
-    stress_config = StressConfig(manager.config)
-    # get keyword arguments
-    duration = kwargs.get('duration', datetime.timedelta(seconds=10))
-    seed = kwargs.get('seed', None)
-    sleep_time = float(kwargs.get('sleep_time', 3000)) / 1000
-    max_vms = int(kwargs.get('max_vms', stress_config.max_instances))
-    test_name = kwargs.get('test_name', 'unamed test')
-
-    keypath = stress_config.host_private_key_path
-    user = stress_config.host_admin_user
-    logdir = stress_config.nova_logdir
-    host = urlparse.urlparse(manager.config.identity.uri).hostname
-    computes = _get_compute_nodes(keypath, user, host)
-    stress.utils.execute_on_all(keypath, user, computes,
-                                "rm -f %s/*.log" % logdir)
-    random.seed(seed)
-    cases = _create_cases(choice_spec)
-    state = ClusterState(max_vms=max_vms)
-    create_initial_keypairs(manager, state,
-                            int(kwargs.get('initial_keypairs', 0)))
-    create_initial_vms(manager, state,
-                       int(kwargs.get('initial_vms', 0)))
-    create_initial_floating_ips(manager, state,
-                                int(kwargs.get('initial_floating_ips', 0)))
-    create_initial_volumes(manager, state,
-                           int(kwargs.get('initial_volumes', 0)))
-    test_end_time = time.time() + duration.seconds
-
-    retry_list = []
-    last_retry = time.time()
-    cooldown = False
-    logcheck_count = 0
-    test_succeeded = True
-    logging.debug('=== Test \"%s\" on %s ===' %
-                  (test_name, time.asctime(time.localtime())))
-    for kw in kwargs:
-        logging.debug('\t%s = %s', kw, kwargs[kw])
-
-    while True:
-        if not cooldown:
-            if time.time() < test_end_time:
-                case = random.choice(cases)
-                logging.debug('Chose %s' % case)
-                retry = case.invoke(manager, state)
-                if retry is not None:
-                    retry_list.append(retry)
-            else:
-                logging.info('Cooling down...')
-                cooldown = True
-        if cooldown and len(retry_list) == 0:
-            if _error_in_logs(keypath, logdir, user, computes):
-                test_succeeded = False
-            break
-        # Retry verifications every 5 seconds.
-        if time.time() - last_retry > 5:
-            logging.debug('retry verifications for %d tasks', len(retry_list))
-            new_retry_list = []
-            for v in retry_list:
-                v.check_timeout()
-                if not v.retry():
-                    new_retry_list.append(v)
-            retry_list = new_retry_list
-            last_retry = time.time()
-        time.sleep(sleep_time)
-        # Check error logs after 100 actions
-        if logcheck_count > 100:
-            if _error_in_logs(keypath, logdir, user, computes):
-                test_succeeded = False
-                break
-            else:
-                logcheck_count = 0
-        else:
-            logcheck_count = logcheck_count + 1
-    # Cleanup
-    logging.info('Cleaning up: terminating virtual machines...')
-    vms = state.get_instances()
-    active_vms = [v for _k, v in vms.iteritems()
-                  if v and v[1] != 'TERMINATING']
-    for target in active_vms:
-        manager.servers_client.delete_server(target[0]['id'])
-        # check to see that the server was actually killed
-    for target in active_vms:
-        kill_id = target[0]['id']
-        i = 0
-        while True:
-            try:
-                manager.servers_client.get_server(kill_id)
-            except Exception:
-                break
-            i += 1
-            if i > 60:
-                _error_in_logs(keypath, logdir, user, computes)
-                raise Exception("Cleanup timed out")
-            time.sleep(1)
-        logging.info('killed %s' % kill_id)
-        state.delete_instance_state(kill_id)
-    for floating_ip_state in state.get_floating_ips():
-        manager.floating_ips_client.delete_floating_ip(
-            floating_ip_state.resource_id)
-    for keypair_state in state.get_keypairs():
-        manager.keypairs_client.delete_keypair(keypair_state.name)
-    for volume_state in state.get_volumes():
-        manager.volumes_client.delete_volume(volume_state.resource_id)
-
-    if test_succeeded:
-        logging.info('*** Test succeeded ***')
-    else:
-        logging.info('*** Test had errors ***')
-    return test_succeeded
diff --git a/stress/pending_action.py b/stress/pending_action.py
deleted file mode 100644
index abfa74d..0000000
--- a/stress/pending_action.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-"""Describe follow-up actions using `PendingAction` class to verify
-that nova API calls such as create/delete are completed"""
-
-import logging
-import time
-
-from tempest.exceptions import TimeoutException
-
-
-class PendingAction(object):
-    """
-    Initialize and describe actions to verify that a Nova API call
-    is successful.
-    """
-
-    def __init__(self, nova_manager, timeout=None):
-        """
-        `nova_manager` : Manager object.
-        `timeout`   : time before we declare a TimeoutException
-        """
-        if timeout is None:
-            timeout = nova_manager.config.compute.build_timeout
-        self._manager = nova_manager
-        self._logger = logging.getLogger(self.__class__.__name__)
-        self._start_time = time.time()
-        self._timeout = timeout
-
-    def retry(self):
-        """
-        Invoked by user of this class to verify completion of
-        previous TestCase actions
-        """
-        return False
-
-    def check_timeout(self):
-        """Check for timeouts of TestCase actions."""
-        time_diff = time.time() - self._start_time
-        if time_diff > self._timeout:
-            self._logger.error('%s exceeded timeout of %d' %
-                               (self.__class__.__name__, self._timeout))
-            raise TimeoutException
-
-    def elapsed(self):
-        return time.time() - self._start_time
-
-
-class PendingServerAction(PendingAction):
-    """
-    Initialize and describe actions to verify that a Nova API call that
-    changes server state is successful.
-    """
-
-    def __init__(self, nova_manager, state, target_server, timeout=None):
-        """
-        `state`           : externally maintained data structure about
-                            state of VMs or other persistent objects in
-                            the nova cluster
-        `target_server`   : server that actions were performed on
-        """
-        super(PendingServerAction, self).__init__(nova_manager,
-                                                  timeout=timeout)
-        self._state = state
-        self._target = target_server
-
-    def _check_for_status(self, state_string):
-        """Check to see if the machine has transitioned states."""
-        t = time.time()  # for debugging
-        target = self._target
-        _resp, body = self._manager.servers_client.get_server(target['id'])
-        if body['status'] != state_string:
-            # grab the actual state as we think it is
-            temp_obj = self._state.get_instances()[target['id']]
-            self._logger.debug("machine %s in state %s" %
-                               (target['id'], temp_obj[1]))
-            self._logger.debug('%s, time: %d' % (temp_obj[1], time.time() - t))
-            return temp_obj[1]
-        self._logger.debug('%s, time: %d' % (state_string, time.time() - t))
-        return state_string
diff --git a/stress/state.py b/stress/state.py
deleted file mode 100644
index 9c31b76..0000000
--- a/stress/state.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-
-class ClusterState(object):
-    """A class to store the state of various persistent objects in the Nova
-    cluster, e.g. instances, volumes.  Use methods to query to state which than
-    can be compared to the current state of the objects in Nova.
-    """
-
-    def __init__(self, **kwargs):
-        self._max_vms = kwargs.get('max_vms', 32)
-        self._instances = {}
-        self._floating_ips = []
-        self._keypairs = []
-        self._volumes = []
-
-    # instance state methods
-    def get_instances(self):
-        """return the instances dictionary that we believe are in cluster."""
-        return self._instances
-
-    def get_max_instances(self):
-        """return the maximum number of instances we can create."""
-        return self._max_vms
-
-    def set_instance_state(self, key, val):
-        """Store `val` in the dictionary indexed at `key`."""
-        self._instances[key] = val
-
-    def delete_instance_state(self, key):
-        """Delete state indexed at `key`."""
-        del self._instances[key]
-
-    #floating_ip state methods
-    def get_floating_ips(self):
-        """return the floating ips list for the cluster."""
-        return self._floating_ips
-
-    def add_floating_ip(self, floating_ip_state):
-        """Add floating ip."""
-        self._floating_ips.append(floating_ip_state)
-
-    def remove_floating_ip(self, floating_ip_state):
-        """Remove floating ip."""
-        self._floating_ips.remove(floating_ip_state)
-
-    # keypair methods
-    def get_keypairs(self):
-        """return the keypairs list for the cluster."""
-        return self._keypairs
-
-    def add_keypair(self, keypair_state):
-        """Add keypair."""
-        self._keypairs.append(keypair_state)
-
-    def remove_keypair(self, keypair_state):
-        """Remove keypair."""
-        self._keypairs.remove(keypair_state)
-
-    # volume methods
-    def get_volumes(self):
-        """return the volumes list for the cluster."""
-        return self._volumes
-
-    def add_volume(self, volume_state):
-        """Add volume."""
-        self._volumes.append(volume_state)
-
-    def remove_volume(self, volume_state):
-        """Remove volume."""
-        self._volumes.remove(volume_state)
-
-
-class ServerAssociatedState(object):
-    """Class that tracks resources that are associated with a particular server
-    such as a volume or floating ip.
-    """
-
-    def __init__(self, resource_id):
-        # The id of the server.
-        self.server_id = None
-        # The id of the resource that is attached to the server.
-        self.resource_id = resource_id
-        # True if in the process of attaching/detaching the resource.
-        self.change_pending = False
-
-
-class FloatingIpState(ServerAssociatedState):
-
-    def __init__(self, ip_desc):
-        super(FloatingIpState, self).__init__(ip_desc['id'])
-        self.address = ip_desc['ip']
-
-
-class VolumeState(ServerAssociatedState):
-
-    def __init__(self, volume_desc):
-        super(VolumeState, self).__init__(volume_desc['id'])
-
-
-class KeyPairState(object):
-
-    def __init__(self, keypair_spec):
-        self.name = keypair_spec['name']
-        self.private_key = keypair_spec['private_key']
diff --git a/stress/test_case.py b/stress/test_case.py
deleted file mode 100644
index d04ace0..0000000
--- a/stress/test_case.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-"""Abstract class for implementing an action. You only need to override
-the `run` method which specifies all the actual nova API class you wish
-to make."""
-
-
-import logging
-
-
-class StressTestCase(object):
-
-    def __init__(self):
-        self._logger = logging.getLogger(self.__class__.__name__)
-
-    def run(self, nova_manager, state_obj, *pargs, **kargs):
-        """Nova API methods to call that would modify state of the cluster."""
-        return
diff --git a/stress/test_floating_ips.py b/stress/test_floating_ips.py
deleted file mode 100755
index c5bad95..0000000
--- a/stress/test_floating_ips.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-import random
-import telnetlib
-
-from stress import pending_action
-from stress import test_case
-
-
-class TestChangeFloatingIp(test_case.StressTestCase):
-    """Add or remove a floating ip from a vm."""
-
-    def __init__(self):
-        super(TestChangeFloatingIp, self).__init__()
-        self.server_ids = None
-
-    def run(self, manager, state, *pargs, **kwargs):
-        if self.server_ids is None:
-            vms = state.get_instances()
-            self.server_ids = [k for k, v in vms.iteritems()]
-        floating_ip = random.choice(state.get_floating_ips())
-        if floating_ip.change_pending:
-            return None
-        floating_ip.change_pending = True
-        timeout = int(kwargs.get('timeout', 60))
-        cli = manager.floating_ips_client
-        if floating_ip.server_id is None:
-            server = random.choice(self.server_ids)
-            address = floating_ip.address
-            self._logger.info('Adding %s to server %s' % (address, server))
-            resp, body = cli.associate_floating_ip_to_server(address,
-                                                             server)
-            if resp.status != 202:
-                raise Exception("response: %s body: %s" % (resp, body))
-            floating_ip.server_id = server
-            return VerifyChangeFloatingIp(manager, floating_ip,
-                                          timeout, add=True)
-        else:
-            server = floating_ip.server_id
-            address = floating_ip.address
-            self._logger.info('Removing %s from server %s' % (address, server))
-            resp, body = cli.disassociate_floating_ip_from_server(address,
-                                                                  server)
-            if resp.status != 202:
-                raise Exception("response: %s body: %s" % (resp, body))
-            return VerifyChangeFloatingIp(manager, floating_ip,
-                                          timeout, add=False)
-
-
-class VerifyChangeFloatingIp(pending_action.PendingAction):
-    """Verify that floating ip was changed."""
-    def __init__(self, manager, floating_ip, timeout, add=None):
-        super(VerifyChangeFloatingIp, self).__init__(manager, timeout=timeout)
-        self.floating_ip = floating_ip
-        self.add = add
-
-    def retry(self):
-        """
-        Check to see that we can contact the server at its new address.
-        """
-        try:
-            conn = telnetlib.Telnet(self.floating_ip.address, 22, timeout=0.5)
-            conn.close()
-            if self.add:
-                self._logger.info('%s added [%.1f secs elapsed]' %
-                                  (self.floating_ip.address, self.elapsed()))
-                self.floating_ip.change_pending = False
-                return True
-        except Exception:
-            if not self.add:
-                self._logger.info('%s removed [%.1f secs elapsed]' %
-                                  (self.floating_ip.address, self.elapsed()))
-                self.floating_ip.change_pending = False
-                self.floating_ip.server_id = None
-                return True
-        return False
diff --git a/stress/test_server_actions.py b/stress/test_server_actions.py
deleted file mode 100644
index 3a7094d..0000000
--- a/stress/test_server_actions.py
+++ /dev/null
@@ -1,275 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-"""Defines various sub-classes of the `StressTestCase` and
-`PendingServerAction` class. Sub-classes of StressTestCase implement various
-API calls on the Nova cluster having to do with Server Actions. Each
-sub-class will have a corresponding PendingServerAction. These pending
-actions veriy that the API call was successful or not."""
-
-import random
-
-from stress import pending_action
-from stress import test_case
-import stress.utils
-from tempest.exceptions import Duplicate
-
-
-class TestRebootVM(test_case.StressTestCase):
-    """Reboot a server."""
-
-    def run(self, manager, state, *pargs, **kwargs):
-        """
-        Send an HTTP POST request to the nova cluster to reboot a random
-        server. Update state of object in `state` variable to indicate that
-        it is rebooting.
-        `manager` : Manager object
-        `state`      : `State` object describing our view of state of cluster
-        `pargs`      : positional arguments
-        `kwargs`     : keyword arguments, which include:
-                       `timeout` : how long to wait before issuing Exception
-                       `type`    : reboot type [SOFT or HARD] (default is SOFT)
-        """
-
-        vms = state.get_instances()
-        active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
-        # no active vms, so return null
-        if not active_vms:
-            self._logger.info('no ACTIVE instances to reboot')
-            return
-
-        _reboot_arg = kwargs.get('type', 'SOFT')
-
-        # select active vm to reboot and then send request to nova controller
-        target = random.choice(active_vms)
-        reboot_target = target[0]
-        # It seems that doing a reboot when in reboot is an error.
-        try:
-            response, body = manager.servers_client.reboot(reboot_target['id'],
-                                                           _reboot_arg)
-        except Duplicate:
-            return
-
-        if (response.status != 202):
-            self._logger.error("response: %s" % response)
-            raise Exception
-
-        if _reboot_arg == 'SOFT':
-            reboot_state = 'REBOOT'
-        else:
-            reboot_state = 'HARD_REBOOT'
-
-        self._logger.info('waiting for machine %s to change to %s' %
-                          (reboot_target['id'], reboot_state))
-
-        return VerifyRebootVM(manager,
-                              state,
-                              reboot_target,
-                              reboot_state=reboot_state)
-
-
-class VerifyRebootVM(pending_action.PendingServerAction):
-    """Class to verify that the reboot completed."""
-    States = stress.utils.enum('REBOOT_CHECK', 'ACTIVE_CHECK')
-
-    def __init__(self, manager, state, target_server,
-                 reboot_state=None,
-                 ip_addr=None):
-        super(VerifyRebootVM, self).__init__(manager,
-                                             state,
-                                             target_server)
-        self._reboot_state = reboot_state
-        self._retry_state = self.States.REBOOT_CHECK
-
-    def retry(self):
-        """
-        Check to see that the server of interest has actually rebooted. Update
-        state to indicate that server is running again.
-        """
-        # don't run reboot verification if target machine has been
-        # deleted or is going to be deleted
-        target_id = self._target['id']
-        if (self._target['id'] not in self._state.get_instances().keys() or
-            self._state.get_instances()[target_id][1] == 'TERMINATING'):
-            self._logger.debug('machine %s is deleted or TERMINATING' %
-                               self._target['id'])
-            return True
-
-        reboot_state = self._reboot_state
-        if self._retry_state == self.States.REBOOT_CHECK:
-            server_state = self._check_for_status(reboot_state)
-            if server_state == reboot_state:
-                self._logger.info('machine %s ACTIVE -> %s' %
-                                  (self._target['id'], reboot_state))
-                self._state.set_instance_state(self._target['id'],
-                                               (self._target, reboot_state))
-                self._retry_state = self.States.ACTIVE_CHECK
-            elif server_state == 'ACTIVE':
-                # machine must have gone ACTIVE -> REBOOT ->ACTIVE
-                self._retry_state = self.States.ACTIVE_CHECK
-
-        elif self._retry_state == self.States.ACTIVE_CHECK:
-            if not self._check_for_status('ACTIVE'):
-                return False
-        target = self._target
-        self._logger.info('machine %s %s -> ACTIVE [%.1f secs elapsed]' %
-                          (target['id'], reboot_state, self.elapsed()))
-        self._state.set_instance_state(target['id'],
-                                      (target, 'ACTIVE'))
-
-        return True
-
-# This code needs to be tested against a cluster that supports resize.
-#class TestResizeVM(test_case.StressTestCase):
-#    """Resize a server (change flavors)."""
-#
-#    def run(self, manager, state, *pargs, **kwargs):
-#        """
-#        Send an HTTP POST request to the nova cluster to resize a random
-#        server. Update `state` to indicate server is rebooting.
-#
-#        `manager` : Manager object.
-#        `state`      : `State` object describing our view of state of cluster
-#        `pargs`      : positional arguments
-#        `kwargs`     : keyword arguments, which include:
-#                       `timeout` : how long to wait before issuing Exception
-#        """
-#
-#        vms = state.get_instances()
-#        active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
-#        # no active vms, so return null
-#        if not active_vms:
-#            self._logger.debug('no ACTIVE instances to resize')
-#            return
-#
-#        target = random.choice(active_vms)
-#        resize_target = target[0]
-#        print resize_target
-#
-#        _timeout = kwargs.get('timeout', 600)
-#
-#        # determine current flavor type, and resize to a different type
-#        # m1.tiny -> m1.small, m1.small -> m1.tiny
-#        curr_size = int(resize_target['flavor']['id'])
-#        if curr_size == 1:
-#            new_size = 2
-#        else:
-#            new_size = 1
-#        flavor_type = { 'flavorRef': new_size } # resize to m1.small
-#
-#        post_body = json.dumps({'resize' : flavor_type})
-#        url = '/servers/%s/action' % resize_target['id']
-#        (response, body) = manager.request('POST',
-#                                              url,
-#                                              body=post_body)
-#
-#        if (response.status != 202):
-#            self._logger.error("response: %s" % response)
-#            raise Exception
-#
-#        state_name = check_for_status(manager, resize_target, 'RESIZE')
-#
-#        if state_name == 'RESIZE':
-#            self._logger.info('machine %s: ACTIVE -> RESIZE' %
-#                              resize_target['id'])
-#            state.set_instance_state(resize_target['id'],
-#                                    (resize_target, 'RESIZE'))
-#
-#        return VerifyResizeVM(manager,
-#                              state,
-#                              resize_target,
-#                              state_name=state_name,
-#                              timeout=_timeout)
-#
-#class VerifyResizeVM(pending_action.PendingServerAction):
-#    """Verify that resizing of a VM was successful."""
-#    States = enum('VERIFY_RESIZE_CHECK', 'ACTIVE_CHECK')
-#
-#    def __init__(self, manager, state, created_server,
-#                 state_name=None,
-#                 timeout=300):
-#        super(VerifyResizeVM, self).__init__(manager,
-#                                             state,
-#                                             created_server,
-#                                             timeout=timeout)
-#        self._retry_state = self.States.VERIFY_RESIZE_CHECK
-#        self._state_name = state_name
-#
-#    def retry(self):
-#        """
-#        Check to see that the server was actually resized. And change `state`
-#        of server to running again.
-#        """
-#        # don't run resize if target machine has been deleted
-#        # or is going to be deleted
-#        if (self._target['id'] not in self._state.get_instances().keys() or
-#            self._state.get_instances()[self._target['id']][1] ==
-#           'TERMINATING'):
-#            self._logger.debug('machine %s is deleted or TERMINATING' %
-#                               self._target['id'])
-#            return True
-#
-#        if self._retry_state == self.States.VERIFY_RESIZE_CHECK:
-#            if self._check_for_status('VERIFY_RESIZE') == 'VERIFY_RESIZE':
-#                # now issue command to CONFIRM RESIZE
-#                post_body = json.dumps({'confirmResize' : null})
-#                url = '/servers/%s/action' % self._target['id']
-#                (response, body) = manager.request('POST',
-#                                                      url,
-#                                                      body=post_body)
-#                if (response.status != 204):
-#                    self._logger.error("response: %s" % response)
-#                    raise Exception
-#
-#                self._logger.info(
-#                    'CONFIRMING RESIZE of machine %s [%.1f secs elapsed]' %
-#                    (self._target['id'], self.elapsed())
-#                    )
-#                state.set_instance_state(self._target['id'],
-#                                        (self._target, 'CONFIRM_RESIZE'))
-#
-#                # change states
-#                self._retry_state = self.States.ACTIVE_CHECK
-#
-#            return False
-#
-#        elif self._retry_state == self.States.ACTIVE_CHECK:
-#            if not self._check_manager("ACTIVE"):
-#                return False
-#            else:
-#                server = self._manager.get_server(self._target['id'])
-#
-#                # Find private IP of server?
-#                try:
-#                    (_, network) = server['addresses'].popitem()
-#                    ip = network[0]['addr']
-#                except KeyError:
-#                    self._logger.error(
-#                        'could not get ip address for machine %s' %
-#                        self._target['id']
-#                        )
-#                    raise Exception
-#
-#                self._logger.info(
-#                    'machine %s: VERIFY_RESIZE -> ACTIVE [%.1f sec elapsed]' %
-#                    (self._target['id'], self.elapsed())
-#                    )
-#                self._state.set_instance_state(self._target['id'],
-#                                              (self._target, 'ACTIVE'))
-#
-#                return True
-#
-#        else:
-#            # should never get here
-#            self._logger.error('Unexpected state')
-#            raise Exception
diff --git a/stress/test_servers.py b/stress/test_servers.py
deleted file mode 100644
index 1dd72f1..0000000
--- a/stress/test_servers.py
+++ /dev/null
@@ -1,318 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-"""Defines various sub-classes of the `StressTestCase` and
-`PendingServerAction` class. Sub-classes of StressTestCase implement various
-API calls on the Nova cluster having to do with creating and deleting VMs.
-Each sub-class will have a corresponding PendingServerAction. These pending
-actions veriy that the API call was successful or not."""
-
-import random
-
-from stress import pending_action
-from stress import test_case
-
-
-class TestCreateVM(test_case.StressTestCase):
-    """Create a virtual machine in the Nova cluster."""
-    _vm_id = 0
-
-    def run(self, manager, state, *pargs, **kwargs):
-        """
-        Send an HTTP POST request to the nova cluster to build a
-        server. Update the state variable to track state of new server
-        and set to PENDING state.
-
-        `manager` : Manager object.
-        `state`      : `State` object describing our view of state of cluster
-        `pargs`      : positional arguments
-        `kwargs`     : keyword arguments, which include:
-                       `key_name`  : name of keypair
-                       `image_ref` : index to image types availablexs
-                       `flavor_ref`: index to flavor types available
-                                     (default = 1, which is tiny)
-        """
-
-        # restrict number of instances we can launch
-        if len(state.get_instances()) >= state.get_max_instances():
-            self._logger.debug("maximum number of instances created: %d" %
-                               state.get_max_instances())
-            return None
-
-        _key_name = kwargs.get('key_name', '')
-        _image_ref = kwargs.get('image_ref', manager.config.compute.image_ref)
-        _flavor_ref = kwargs.get('flavor_ref',
-                                 manager.config.compute.flavor_ref)
-
-        expected_server = {
-            'name': 'server' + str(TestCreateVM._vm_id),
-            'metadata': {
-                'key1': 'value1',
-                'key2': 'value2',
-            },
-            'imageRef': _image_ref,
-            'flavorRef': _flavor_ref,
-            'adminPass': 'testpwd',
-            'key_name': _key_name,
-        }
-        TestCreateVM._vm_id = TestCreateVM._vm_id + 1
-        create_server = manager.servers_client.create_server
-        response, body = create_server(expected_server['name'],
-                                       _image_ref,
-                                       _flavor_ref,
-                                       meta=expected_server['metadata'],
-                                       adminPass=expected_server['adminPass'])
-
-        if (response.status != 202):
-            self._logger.error("response: %s" % response)
-            self._logger.error("body: %s" % body)
-            raise Exception
-
-        created_server = body
-
-        self._logger.info('setting machine %s to BUILD' %
-                          created_server['id'])
-        state.set_instance_state(created_server['id'],
-                                (created_server, 'BUILD'))
-
-        return VerifyCreateVM(manager,
-                              state,
-                              created_server,
-                              expected_server)
-
-
-class VerifyCreateVM(pending_action.PendingServerAction):
-    """Verify that VM was built and is running."""
-    def __init__(self, manager,
-                 state,
-                 created_server,
-                 expected_server):
-        super(VerifyCreateVM, self).__init__(manager,
-                                             state,
-                                             created_server,
-                                             )
-        self._expected = expected_server
-
-    def retry(self):
-        """
-        Check to see that the server was created and is running.
-        Update local view of state to indicate that it is running.
-        """
-        # don't run create verification
-        # if target machine has been deleted or is going to be deleted
-        target_id = self._target['id']
-        if (self._target['id'] not in self._state.get_instances().keys() or
-            self._state.get_instances()[target_id][1] == 'TERMINATING'):
-            self._logger.info('machine %s is deleted or TERMINATING' %
-                              self._target['id'])
-            return True
-
-        admin_pass = self._target['adminPass']
-        # Could check more things here.
-        if (self._expected['adminPass'] != admin_pass):
-            self._logger.error('expected: %s' %
-                               (self._expected['adminPass']))
-            self._logger.error('returned: %s' %
-                               (admin_pass))
-            raise Exception
-
-        if self._check_for_status('ACTIVE') != 'ACTIVE':
-            return False
-
-        self._logger.info('machine %s: BUILD -> ACTIVE [%.1f secs elapsed]' %
-                          (self._target['id'], self.elapsed()))
-        self._state.set_instance_state(self._target['id'],
-                                      (self._target, 'ACTIVE'))
-        return True
-
-
-class TestKillActiveVM(test_case.StressTestCase):
-    """Class to destroy a random ACTIVE server."""
-    def run(self, manager, state, *pargs, **kwargs):
-        """
-        Send an HTTP POST request to the nova cluster to destroy
-        a random ACTIVE server. Update `state` to indicate TERMINATING.
-
-        `manager` : Manager object.
-        `state`      : `State` object describing our view of state of cluster
-        `pargs`      : positional arguments
-        `kwargs`     : keyword arguments, which include:
-                       `timeout` : how long to wait before issuing Exception
-        """
-        # check for active instances
-        vms = state.get_instances()
-        active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
-        # no active vms, so return null
-        if not active_vms:
-            self._logger.info('no ACTIVE instances to delete')
-            return
-
-        _timeout = kwargs.get('timeout', manager.config.compute.build_timeout)
-
-        target = random.choice(active_vms)
-        killtarget = target[0]
-        manager.servers_client.delete_server(killtarget['id'])
-        self._logger.info('machine %s: ACTIVE -> TERMINATING' %
-                          killtarget['id'])
-        state.set_instance_state(killtarget['id'],
-                                (killtarget, 'TERMINATING'))
-        return VerifyKillActiveVM(manager, state,
-                                  killtarget, timeout=_timeout)
-
-
-class VerifyKillActiveVM(pending_action.PendingServerAction):
-    """Verify that server was destroyed."""
-
-    def retry(self):
-        """
-        Check to see that the server of interest is destroyed. Update
-        state to indicate that server is destroyed by deleting it from local
-        view of state.
-        """
-        tid = self._target['id']
-        # if target machine has been deleted from the state, then it was
-        # already verified to be deleted
-        if (not tid in self._state.get_instances().keys()):
-            return False
-
-        try:
-            self._manager.servers_client.get_server(tid)
-        except Exception:
-            # if we get a 404 response, is the machine really gone?
-            target = self._target
-            self._logger.info('machine %s: DELETED [%.1f secs elapsed]' %
-                              (target['id'], self.elapsed()))
-            self._state.delete_instance_state(target['id'])
-            return True
-
-        return False
-
-
-class TestKillAnyVM(test_case.StressTestCase):
-    """Class to destroy a random server regardless of state."""
-
-    def run(self, manager, state, *pargs, **kwargs):
-        """
-        Send an HTTP POST request to the nova cluster to destroy
-        a random server. Update state to TERMINATING.
-
-        `manager` : Manager object.
-        `state`      : `State` object describing our view of state of cluster
-        `pargs`      : positional arguments
-        `kwargs`     : keyword arguments, which include:
-                       `timeout` : how long to wait before issuing Exception
-        """
-
-        vms = state.get_instances()
-        # no vms, so return null
-        if not vms:
-            self._logger.info('no active instances to delete')
-            return
-
-        _timeout = kwargs.get('timeout', manager.config.compute.build_timeout)
-
-        target = random.choice(vms)
-        killtarget = target[0]
-
-        manager.servers_client.delete_server(killtarget['id'])
-        self._state.set_instance_state(killtarget['id'],
-                                      (killtarget, 'TERMINATING'))
-        # verify object will do the same thing as the active VM
-        return VerifyKillAnyVM(manager, state, killtarget, timeout=_timeout)
-
-VerifyKillAnyVM = VerifyKillActiveVM
-
-
-class TestUpdateVMName(test_case.StressTestCase):
-    """Class to change the name of the active server."""
-    def run(self, manager, state, *pargs, **kwargs):
-        """
-        Issue HTTP POST request to change the name of active server.
-        Update state of server to reflect name changing.
-
-        `manager` : Manager object.
-        `state`      : `State` object describing our view of state of cluster
-        `pargs`      : positional arguments
-        `kwargs`     : keyword arguments, which include:
-                       `timeout`   : how long to wait before issuing Exception
-        """
-
-        # select one machine from active ones
-        vms = state.get_instances()
-        active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
-        # no active vms, so return null
-        if not active_vms:
-            self._logger.info('no active instances to update')
-            return
-
-        _timeout = kwargs.get('timeout', manager.config.compute.build_timeout)
-
-        target = random.choice(active_vms)
-        update_target = target[0]
-
-        # Update name by appending '_updated' to the name
-        new_name = update_target['name'] + '_updated'
-        (response, body) = \
-            manager.servers_client.update_server(update_target['id'],
-                                                 name=new_name)
-        if (response.status != 200):
-            self._logger.error("response: %s " % response)
-            self._logger.error("body: %s " % body)
-            raise Exception
-
-        assert(new_name == body['name'])
-
-        self._logger.info('machine %s: ACTIVE -> UPDATING_NAME' %
-                          body['id'])
-        state.set_instance_state(body['id'],
-                                (body, 'UPDATING_NAME'))
-
-        return VerifyUpdateVMName(manager,
-                                  state,
-                                  body,
-                                  timeout=_timeout)
-
-
-class VerifyUpdateVMName(pending_action.PendingServerAction):
-    """Check that VM has new name."""
-    def retry(self):
-        """
-        Check that VM has new name. Update local view of `state` to RUNNING.
-        """
-        # don't run update verification
-        # if target machine has been deleted or is going to be deleted
-        target_id = self._target['id']
-        if (not self._target['id'] in self._state.get_instances().keys() or
-            self._state.get_instances()[target_id][1] == 'TERMINATING'):
-            return False
-
-        response, body = \
-            self._manager.serverse_client.get_server(self._target['id'])
-        if (response.status != 200):
-            self._logger.error("response: %s " % response)
-            self._logger.error("body: %s " % body)
-            raise Exception
-
-        if self._target['name'] != body['name']:
-            self._logger.error(self._target['name'] +
-                               ' vs. ' +
-                               body['name'])
-            raise Exception
-
-        # log the update
-        self._logger.info('machine %s: UPDATING_NAME -> ACTIVE' %
-                          self._target['id'])
-        self._state.set_instance_state(self._target['id'],
-                                      (body,
-                                       'ACTIVE'))
-        return True
diff --git a/stress/tests/create_kill.py b/stress/tests/create_kill.py
deleted file mode 100644
index 30ddfd7..0000000
--- a/stress/tests/create_kill.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-"""More aggressive test that creates and destroys VMs with shorter
-sleep times"""
-
-import datetime
-import time
-
-from stress.basher import BasherAction
-from stress.driver import bash_openstack
-from stress.test_servers import TestCreateVM
-from stress.test_servers import TestKillActiveVM
-from tempest import clients
-
-choice_spec = [
-    BasherAction(TestCreateVM(), 50),
-    BasherAction(TestKillActiveVM(), 50)
-]
-
-nova = clients.Manager()
-
-bash_openstack(nova,
-               choice_spec,
-               duration=datetime.timedelta(seconds=180),
-               sleep_time=100,  # in milliseconds
-               seed=int(time.time()),
-               test_name="create and delete",
-               )
diff --git a/stress/tests/floating_ips.py b/stress/tests/floating_ips.py
deleted file mode 100755
index b1b3778..0000000
--- a/stress/tests/floating_ips.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-"""Stress test that associates/disasssociates floating ips."""
-
-import datetime
-
-from stress.basher import BasherAction
-from stress.driver import bash_openstack
-from stress.test_floating_ips import TestChangeFloatingIp
-from tempest import clients
-
-
-choice_spec = [
-    BasherAction(TestChangeFloatingIp(), 100)
-]
-
-nova = clients.Manager()
-
-bash_openstack(nova,
-               choice_spec,
-               duration=datetime.timedelta(seconds=300),
-               test_name="floating_ips",
-               initial_floating_ips=8,
-               initial_vms=8)
diff --git a/stress/tests/hard_reboots.py b/stress/tests/hard_reboots.py
deleted file mode 100644
index 50a2e91..0000000
--- a/stress/tests/hard_reboots.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-"""Test that reboots random instances in a Nova cluster."""
-
-import datetime
-import time
-
-from stress.basher import BasherAction
-from stress.driver import bash_openstack
-from stress.test_server_actions import TestRebootVM
-from stress.test_servers import TestCreateVM
-from tempest import clients
-
-choice_spec = [
-    BasherAction(TestCreateVM(), 50),
-    BasherAction(TestRebootVM(), 50,
-                 kargs={'type': 'HARD'})
-]
-
-nova = clients.Manager()
-
-bash_openstack(nova,
-               choice_spec,
-               duration=datetime.timedelta(seconds=180),
-               sleep_time=500,  # in milliseconds
-               seed=int(time.time()),
-               test_name="hard reboots",
-               )
diff --git a/stress/tests/user_script_sample.py b/stress/tests/user_script_sample.py
deleted file mode 100644
index d941ea0..0000000
--- a/stress/tests/user_script_sample.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-"""Sample stress test that creates a few virtual machines and then
-destroys them"""
-
-import datetime
-
-from stress.basher import BasherAction
-from stress.driver import bash_openstack
-from stress.test_servers import TestCreateVM
-from stress.test_servers import TestKillActiveVM
-from tempest import clients
-
-choice_spec = [
-    BasherAction(TestCreateVM(), 50,
-                 kargs={'timeout': '60'}),
-    BasherAction(TestKillActiveVM(), 50)
-]
-
-
-nova = clients.Manager()
-
-bash_openstack(nova,
-               choice_spec,
-               duration=datetime.timedelta(seconds=10),
-               sleep_time=1000,  # in milliseconds
-               seed=None,
-               test_name="simple create and delete",
-               max_vms=4)
diff --git a/stress/tools/nova_destroy_all.py b/stress/tools/nova_destroy_all.py
deleted file mode 100755
index 00d8883..0000000
--- a/stress/tools/nova_destroy_all.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-from novaclient.v1_1 import client
-import tempest.config
-
-# get the environment variables for credentials
-identity = tempest.config.TempestConfig().identity
-
-nt = client.Client(identity.username, identity.password,
-                   identity.tenant_name, identity.uri)
-
-flavor_list = nt.flavors.list()
-server_list = nt.servers.list()
-images_list = nt.images.list()
-keypairs_list = nt.keypairs.list()
-floating_ips_list = nt.floating_ips.list()
-volumes_list = nt.volumes.list()
-
-print "total servers: %3d, total flavors: %3d, total images: %3d," % \
-    (len(server_list),
-     len(flavor_list),
-     len(images_list)),
-
-print "total keypairs: %3d, total floating ips: %3d" % \
-    (len(keypairs_list),
-     len(floating_ips_list))
-
-print "deleting all servers"
-for s in server_list:
-    s.delete()
-
-print "deleting all keypairs"
-for s in keypairs_list:
-    s.delete()
-
-print "deleting all floating_ips"
-for s in floating_ips_list:
-    s.delete()
-
-print "deleting all volumes"
-for s in volumes_list:
-    s.delete()
diff --git a/stress/tools/nova_status.py b/stress/tools/nova_status.py
deleted file mode 100755
index ee20282..0000000
--- a/stress/tools/nova_status.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-from novaclient.v1_1 import client
-import tempest.config
-
-# get the environment variables for credentials
-identity = tempest.config.TempestConfig().identity
-print identity.username, identity.password,\
-    identity.tenant_name, identity.uri
-
-nt = client.Client(identity.username, identity.password,
-                   identity.tenant_name, identity.uri)
-
-flavor_list = nt.flavors.list()
-server_list = nt.servers.list()
-images_list = nt.images.list()
-keypairs_list = nt.keypairs.list()
-floating_ips_list = nt.floating_ips.list()
-
-print "total servers: %3d, total flavors: %3d, total images: %3d" % \
-    (len(server_list),
-     len(flavor_list),
-     len(images_list))
-
-print "total keypairs: %3d, total floating ips: %3d" % \
-    (len(keypairs_list),
-     len(floating_ips_list))
-
-print "flavors:\t", flavor_list
-print "servers:\t", server_list
-print "images: \t", images_list
-print "keypairs:\t", keypairs_list
-print "floating ips:\t", floating_ips_list
diff --git a/stress/utils.py b/stress/utils.py
deleted file mode 100644
index ec63b99..0000000
--- a/stress/utils.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
-import shlex
-import subprocess
-
-SSH_OPTIONS = (" -q" +
-               " -o UserKnownHostsFile=/dev/null" +
-               " -o StrictHostKeyChecking=no -i ")
-
-
-def get_ssh_options(keypath):
-    return SSH_OPTIONS + keypath
-
-
-def scp(keypath, args):
-    options = get_ssh_options(keypath)
-    return subprocess.check_call(shlex.split("scp" + options + args))
-
-
-def ssh(keypath, user, node, command, check=True):
-    command = 'sudo ' + command
-    command = "ssh %s %s@%s %s" % (get_ssh_options(keypath), user,
-                                   node, command)
-    popenargs = shlex.split(command)
-    process = subprocess.Popen(popenargs, stdout=subprocess.PIPE)
-    output, unused_err = process.communicate()
-    retcode = process.poll()
-    if retcode and check:
-        raise Exception("%s: ssh failed with retcode: %s" % (node, retcode))
-    return output
-
-
-def execute_on_all(keypath, user, nodes, command):
-    for node in nodes:
-        ssh(keypath, user, node, command)
-
-
-def enum(*sequential, **named):
-    """Create auto-incremented enumerated types."""
-    enums = dict(zip(sequential, range(len(sequential))), **named)
-    return type('Enum', (), enums)
diff --git a/tempest/cli/README.rst b/tempest/cli/README.rst
new file mode 100644
index 0000000..4742d4a
--- /dev/null
+++ b/tempest/cli/README.rst
@@ -0,0 +1,48 @@
+Tempest Guide to CLI tests
+========
+
+
+What are these tests?
+---------
+The cli tests test the various OpenStack command line interface tools
+to ensure that they minimally function. The current scope is read only
+operations on a cloud that are hard to test via unit tests.
+
+
+Why are these tests in tempest?
+---------
+These tests exist here because it is extremely difficult to build a
+functional enough environment in the python-*client unit tests to
+provide this kind of testing. Because we already put up a cloud in the
+gate with devstack + tempest it was decided it was better to have
+these as a side tree in tempest instead of another QA effort which
+would split review time.
+
+
+Scope of these tests
+---------
+This should stay limited to the scope of testing the cli. Functional
+testing of the cloud should be elsewhere, this is about exercising the
+cli code.
+
+
+Example of a good test
+---------
+Tests should be isolated to a single command in one of the python
+clients.
+
+Tests should not modify the cloud.
+
+If a test is validating the cli for bad data, it should do it with
+assertRaises.
+
+A reasonable example of an existing test is as follows:
+
+    def test_admin_list(self):
+        self.nova('list')
+        self.nova('list', params='--all-tenants 1')
+        self.nova('list', params='--all-tenants 0')
+        self.assertRaises(subprocess.CalledProcessError,
+                          self.nova,
+                          'list',
+                          params='--all-tenants bad')
diff --git a/cli/__init__.py b/tempest/cli/__init__.py
similarity index 97%
rename from cli/__init__.py
rename to tempest/cli/__init__.py
index a3038d2..413990d 100644
--- a/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -21,7 +21,7 @@
 
 from oslo.config import cfg
 
-import cli.output_parser
+import tempest.cli.output_parser
 import tempest.test
 
 
@@ -52,7 +52,7 @@
         super(ClientTestBase, cls).setUpClass()
 
     def __init__(self, *args, **kwargs):
-        self.parser = cli.output_parser
+        self.parser = tempest.cli.output_parser
         super(ClientTestBase, self).__init__(*args, **kwargs)
 
     def nova(self, action, flags='', params='', admin=True, fail_ok=False):
diff --git a/cli/output_parser.py b/tempest/cli/output_parser.py
similarity index 100%
rename from cli/output_parser.py
rename to tempest/cli/output_parser.py
diff --git a/cli/simple_read_only/README.txt b/tempest/cli/simple_read_only/README.txt
similarity index 100%
rename from cli/simple_read_only/README.txt
rename to tempest/cli/simple_read_only/README.txt
diff --git a/cli/simple_read_only/__init__.py b/tempest/cli/simple_read_only/__init__.py
similarity index 100%
rename from cli/simple_read_only/__init__.py
rename to tempest/cli/simple_read_only/__init__.py
diff --git a/cli/simple_read_only/test_compute.py b/tempest/cli/simple_read_only/test_compute.py
similarity index 98%
rename from cli/simple_read_only/test_compute.py
rename to tempest/cli/simple_read_only/test_compute.py
index d301d38..fa64561 100644
--- a/cli/simple_read_only/test_compute.py
+++ b/tempest/cli/simple_read_only/test_compute.py
@@ -21,7 +21,7 @@
 from oslo.config import cfg
 import testtools
 
-import cli
+import tempest.cli
 
 
 CONF = cfg.CONF
@@ -30,7 +30,7 @@
 LOG = logging.getLogger(__name__)
 
 
-class SimpleReadOnlyNovaClientTest(cli.ClientTestBase):
+class SimpleReadOnlyNovaClientTest(tempest.cli.ClientTestBase):
 
     """
     This is a first pass at a simple read only python-novaclient test. This
diff --git a/cli/simple_read_only/test_compute_manage.py b/tempest/cli/simple_read_only/test_compute_manage.py
similarity index 96%
rename from cli/simple_read_only/test_compute_manage.py
rename to tempest/cli/simple_read_only/test_compute_manage.py
index bbcc5b1..a788c8b 100644
--- a/cli/simple_read_only/test_compute_manage.py
+++ b/tempest/cli/simple_read_only/test_compute_manage.py
@@ -18,13 +18,13 @@
 import logging
 import subprocess
 
-import cli
+import tempest.cli
 
 
 LOG = logging.getLogger(__name__)
 
 
-class SimpleReadOnlyNovaManageTest(cli.ClientTestBase):
+class SimpleReadOnlyNovaManageTest(tempest.cli.ClientTestBase):
 
     """
     This is a first pass at a simple read only nova-manage test. This
diff --git a/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/test_glance.py
similarity index 96%
rename from cli/simple_read_only/test_glance.py
rename to tempest/cli/simple_read_only/test_glance.py
index f9822cc..b3b3eb7 100644
--- a/cli/simple_read_only/test_glance.py
+++ b/tempest/cli/simple_read_only/test_glance.py
@@ -19,13 +19,13 @@
 import re
 import subprocess
 
-import cli
+import tempest.cli
 
 
 LOG = logging.getLogger(__name__)
 
 
-class SimpleReadOnlyGlanceClientTest(cli.ClientTestBase):
+class SimpleReadOnlyGlanceClientTest(tempest.cli.ClientTestBase):
     """Basic, read-only tests for Glance CLI client.
 
     Checks return values and output of read-only commands.
diff --git a/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
similarity index 97%
rename from cli/simple_read_only/test_keystone.py
rename to tempest/cli/simple_read_only/test_keystone.py
index 4b14c3c..067f58c 100644
--- a/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -19,13 +19,13 @@
 import re
 import subprocess
 
-import cli
+import tempest.cli
 
 
 LOG = logging.getLogger(__name__)
 
 
-class SimpleReadOnlyKeystoneClientTest(cli.ClientTestBase):
+class SimpleReadOnlyKeystoneClientTest(tempest.cli.ClientTestBase):
     """Basic, read-only tests for Keystone CLI client.
 
     Checks return values and output of read-only commands.
diff --git a/tempest/config.py b/tempest/config.py
index a90767e..d43c5d7 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -156,6 +156,9 @@
                default=60,
                help="Timeout in seconds to wait for output from ssh "
                     "channel."),
+    cfg.StrOpt('fixed_network_name',
+               default='private',
+               help="Visible fixed network name "),
     cfg.StrOpt('network_for_ssh',
                default='public',
                help="Network used for SSH connections."),
@@ -411,7 +414,23 @@
                help='Maximum number of instances to create during test.'),
     cfg.StrOpt('controller',
                default=None,
-               help='Controller host.')
+               help='Controller host.'),
+    # new stress options
+    cfg.StrOpt('target_controller',
+               default=None,
+               help='Controller host.'),
+    cfg.StrOpt('target_ssh_user',
+               default=None,
+               help='ssh user.'),
+    cfg.StrOpt('target_private_key_path',
+               default=None,
+               help='Path to private key.'),
+    cfg.StrOpt('target_logfiles',
+               default=None,
+               help='regexp for list of log files.'),
+    cfg.StrOpt('log_check_interval',
+               default=60,
+               help='time between log file error checks.')
 ]
 
 
diff --git a/tempest/stress/README.rst b/tempest/stress/README.rst
new file mode 100644
index 0000000..2c431ed
--- /dev/null
+++ b/tempest/stress/README.rst
@@ -0,0 +1,47 @@
+Quanta Research Cambridge OpenStack Stress Test System
+======================================================
+
+Nova is a distributed, asynchronous system that is prone to race condition
+bugs. These bugs will not be easily found during
+functional testing but will be encountered by users in large deployments in a
+way that is hard to debug. The stress test tries to cause these bugs to happen
+in a more controlled environment.
+
+
+Environment
+------------
+This particular framework assumes your working Nova cluster understands Nova 
+API 2.0. The stress tests can read the logs from the cluster. To enable this
+you have to provide the hostname to call 'nova-manage' and
+the private key and user name for ssh to the cluster in the
+[stress] section of tempest.conf. You also need to provide the
+location of the log files:
+
+	target_logfiles = "regexp to all log files to be checked for errors"
+	target_private_key_path = "private ssh key for controller and log file nodes"
+	target_ssh_user = "username for controller and log file nodes"
+	target_controller = "hostname or ip of controller node (for nova-manage)
+	log_check_interval = "time between checking logs for errors (default 60s)"
+
+
+
+Running the sample test
+-----------------------
+
+To test installation, do the following (from the tempest/stress directory):
+
+	./run_stress.py etc/sample-test.json -d 30
+
+This sample test tries to create a few VMs and kill a few VMs.
+
+
+Additional Tools
+----------------
+
+Sometimes the tests don't finish, or there are failures. In these
+cases, you may want to clean out the nova cluster. We have provided
+some scripts to do this in the ``tools`` subdirectory.
+You can use the following script to destroy any keypairs,
+floating ips, and servers:
+
+tempest/stress/tools/cleanup.py
diff --git a/stress/__init__.py b/tempest/stress/__init__.py
similarity index 75%
copy from stress/__init__.py
copy to tempest/stress/__init__.py
index 0875e0b..1caf74a 100644
--- a/stress/__init__.py
+++ b/tempest/stress/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
+# Copyright 2013 Quanta Research Cambridge, Inc.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License");
 #    you may not use this file except in compliance with the License.
@@ -11,7 +11,3 @@
 #    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 #    See the License for the specific language governing permissions and
 #    limitations under the License.
-"""Basic framework for constructing various simulated workloads for a
-nova cluster."""
-
-__author__ = "David Kranz and Eugene Shih"
diff --git a/stress/__init__.py b/tempest/stress/actions/__init__.py
similarity index 75%
copy from stress/__init__.py
copy to tempest/stress/actions/__init__.py
index 0875e0b..1caf74a 100644
--- a/stress/__init__.py
+++ b/tempest/stress/actions/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
+# Copyright 2013 Quanta Research Cambridge, Inc.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License");
 #    you may not use this file except in compliance with the License.
@@ -11,7 +11,3 @@
 #    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 #    See the License for the specific language governing permissions and
 #    limitations under the License.
-"""Basic framework for constructing various simulated workloads for a
-nova cluster."""
-
-__author__ = "David Kranz and Eugene Shih"
diff --git a/tempest/stress/actions/create_destroy_server.py b/tempest/stress/actions/create_destroy_server.py
new file mode 100644
index 0000000..44b149f
--- /dev/null
+++ b/tempest/stress/actions/create_destroy_server.py
@@ -0,0 +1,34 @@
+# Copyright 2013 Quanta Research Cambridge, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+from tempest.common.utils.data_utils import rand_name
+
+
+def create_destroy(manager, logger):
+    image = manager.config.compute.image_ref
+    flavor = manager.config.compute.flavor_ref
+    while True:
+        name = rand_name("instance")
+        logger.info("creating %s" % name)
+        resp, server = manager.servers_client.create_server(
+            name, image, flavor)
+        server_id = server['id']
+        assert(resp.status == 202)
+        manager.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+        logger.info("created %s" % server_id)
+        logger.info("deleting %s" % name)
+        resp, _ = manager.servers_client.delete_server(server_id)
+        assert(resp.status == 204)
+        manager.servers_client.wait_for_server_termination(server_id)
+        logger.info("deleted %s" % server_id)
diff --git a/tempest/stress/cleanup.py b/tempest/stress/cleanup.py
new file mode 100644
index 0000000..b2cb70a
--- /dev/null
+++ b/tempest/stress/cleanup.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Quanta Research Cambridge, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+from tempest import clients
+
+
+def cleanup():
+    admin_manager = clients.AdminManager()
+
+    _, body = admin_manager.servers_client.list_servers({"all_tenants": True})
+    for s in body['servers']:
+        try:
+            admin_manager.servers_client.delete_server(s['id'])
+        except Exception:
+            pass
+
+    for s in body['servers']:
+        try:
+            admin_manager.servers_client.wait_for_server_termination(s['id'])
+        except Exception:
+            pass
+
+    _, keypairs = admin_manager.keypairs_client.list_keypairs()
+    for k in keypairs:
+        try:
+            admin_manager.keypairs_client.delete_keypair(k['name'])
+        except Exception:
+            pass
+
+    _, floating_ips = admin_manager.floating_ips_client.list_floating_ips()
+    for f in floating_ips:
+        try:
+            admin_manager.floating_ips_client.delete_floating_ip(f['id'])
+        except Exception:
+            pass
+
+    _, users = admin_manager.identity_client.get_users()
+    for user in users:
+        if user['name'].startswith("stress_user"):
+            admin_manager.identity_client.delete_user(user['id'])
+
+    _, tenants = admin_manager.identity_client.list_tenants()
+    for tenant in tenants:
+        if tenant['name'].startswith("stress_tenant"):
+            admin_manager.identity_client.delete_tenant(tenant['id'])
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
new file mode 100644
index 0000000..51f159d
--- /dev/null
+++ b/tempest/stress/driver.py
@@ -0,0 +1,156 @@
+# Copyright 2013 Quanta Research Cambridge, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+import importlib
+import logging
+import multiprocessing
+import time
+
+from tempest import clients
+from tempest.common import ssh
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+from tempest.stress import cleanup
+
+admin_manager = clients.AdminManager()
+
+# setup logging to file
+logging.basicConfig(
+    format='%(asctime)s %(process)d %(name)-20s %(levelname)-8s %(message)s',
+    datefmt='%m-%d %H:%M:%S',
+    filename="stress.debug.log",
+    filemode="w",
+    level=logging.DEBUG,
+)
+
+# define a Handler which writes INFO messages or higher to the sys.stdout
+_console = logging.StreamHandler()
+_console.setLevel(logging.INFO)
+# set a format which is simpler for console use
+format_str = '%(asctime)s %(process)d %(name)-20s: %(levelname)-8s %(message)s'
+_formatter = logging.Formatter(format_str)
+# tell the handler to use this format
+_console.setFormatter(_formatter)
+# add the handler to the root logger
+logger = logging.getLogger('tempest.stress')
+logger.addHandler(_console)
+
+
+def do_ssh(command, host):
+    username = admin_manager.config.stress.target_ssh_user
+    key_filename = admin_manager.config.stress.target_private_key_path
+    if not (username and key_filename):
+        return None
+    ssh_client = ssh.Client(host, username, key_filename=key_filename)
+    try:
+        return ssh_client.exec_command(command)
+    except exceptions.SSHExecCommandFailed:
+        return None
+
+
+def _get_compute_nodes(controller):
+    """
+    Returns a list of active compute nodes. List is generated by running
+    nova-manage on the controller.
+    """
+    nodes = []
+    cmd = "nova-manage service list | grep ^nova-compute"
+    output = do_ssh(cmd, controller)
+    if not output:
+        return nodes
+    # For example: nova-compute xg11eth0 nova enabled :-) 2011-10-31 18:57:46
+    # This is fragile but there is, at present, no other way to get this info.
+    for line in output.split('\n'):
+        words = line.split()
+        if len(words) > 0 and words[4] == ":-)":
+            nodes.append(words[1])
+    return nodes
+
+
+def _error_in_logs(logfiles, nodes):
+    """
+    Detect errors in the nova log files on the controller and compute nodes.
+    """
+    grep = 'egrep "ERROR|TRACE" %s' % logfiles
+    for node in nodes:
+        errors = do_ssh(grep, node)
+        if not errors:
+            return None
+        if len(errors) > 0:
+            logger.error('%s: %s' % (node, errors))
+            return errors
+    return None
+
+
+def get_action_function(path):
+    (module_part, _, function) = path.rpartition('.')
+    return getattr(importlib.import_module(module_part), function)
+
+
+def stress_openstack(tests, duration):
+    """
+    Workload driver. Executes an action function against a nova-cluster.
+
+    """
+    logfiles = admin_manager.config.stress.target_logfiles
+    log_check_interval = int(admin_manager.config.stress.log_check_interval)
+    if logfiles:
+        controller = admin_manager.config.stress.target_controller
+        computes = _get_compute_nodes(controller)
+        for node in computes:
+            do_ssh("rm -f %s" % logfiles, node)
+    processes = []
+    for test in tests:
+        if test.get('use_admin', False):
+            manager = admin_manager
+        else:
+            manager = clients.Manager()
+        for _ in xrange(test.get('threads', 1)):
+            if test.get('use_isolated_tenants', False):
+                username = rand_name("stress_user")
+                tenant_name = rand_name("stress_tenant")
+                password = "pass"
+                identity_client = admin_manager.identity_client
+                _, tenant = identity_client.create_tenant(name=tenant_name)
+                identity_client.create_user(username,
+                                            password,
+                                            tenant['id'],
+                                            "email")
+                manager = clients.Manager(username=username,
+                                          password="pass",
+                                          tenant_name=tenant_name)
+            target = get_action_function(test['action'])
+            p = multiprocessing.Process(target=target,
+                                        args=(manager, logger),
+                                        kwargs=test.get('kwargs', {}))
+            processes.append(p)
+            p.start()
+    end_time = time.time() + duration
+    had_errors = False
+    while True:
+        remaining = end_time - time.time()
+        if remaining <= 0:
+            break
+        time.sleep(min(remaining, log_check_interval))
+        if not logfiles:
+            continue
+        errors = _error_in_logs(logfiles, computes)
+        if errors:
+            had_errors = True
+            break
+    for p in processes:
+        p.terminate()
+    if not had_errors:
+        logger.info("cleaning up")
+        cleanup.cleanup()
diff --git a/tempest/stress/etc/sample-test.json b/tempest/stress/etc/sample-test.json
new file mode 100644
index 0000000..5a0189c
--- /dev/null
+++ b/tempest/stress/etc/sample-test.json
@@ -0,0 +1,7 @@
+[{"action": "tempest.stress.actions.create_destroy_server.create_destroy",
+  "threads": 8,
+  "use_admin": false,
+  "use_isolated_tenants": false,
+  "kwargs": {}
+  }
+]
diff --git a/tempest/stress/run_stress.py b/tempest/stress/run_stress.py
new file mode 100755
index 0000000..ef0ec8e
--- /dev/null
+++ b/tempest/stress/run_stress.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Quanta Research Cambridge, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License");
+#    you may not use this file except in compliance with the License.
+#    You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+import argparse
+import json
+
+from tempest.stress import driver
+
+
+def main(ns):
+    tests = json.load(open(ns.tests, 'r'))
+    driver.stress_openstack(tests, ns.duration)
+
+
+parser = argparse.ArgumentParser(description='Run stress tests. ')
+parser.add_argument('-d', '--duration', default=300, type=int,
+                    help="Duration of test.")
+parser.add_argument('tests', help="Name of the file with test description.")
+main(parser.parse_args())
diff --git a/stress/__init__.py b/tempest/stress/tools/cleanup.py
old mode 100644
new mode 100755
similarity index 75%
rename from stress/__init__.py
rename to tempest/stress/tools/cleanup.py
index 0875e0b..7139d6c
--- a/stress/__init__.py
+++ b/tempest/stress/tools/cleanup.py
@@ -1,4 +1,6 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
+#!/usr/bin/env python
+
+# Copyright 2013 Quanta Research Cambridge, Inc.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License");
 #    you may not use this file except in compliance with the License.
@@ -11,7 +13,8 @@
 #    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 #    See the License for the specific language governing permissions and
 #    limitations under the License.
-"""Basic framework for constructing various simulated workloads for a
-nova cluster."""
 
-__author__ = "David Kranz and Eugene Shih"
+from tempest.stress import cleanup
+
+
+cleanup.cleanup()
diff --git a/tempest/tests/compute/servers/test_list_server_filters.py b/tempest/tests/compute/servers/test_list_server_filters.py
index 852288e..ca5e112 100644
--- a/tempest/tests/compute/servers/test_list_server_filters.py
+++ b/tempest/tests/compute/servers/test_list_server_filters.py
@@ -75,6 +75,8 @@
         cls.client.wait_for_server_status(cls.s3['id'], 'ACTIVE')
         resp, cls.s3 = cls.client.get_server(cls.s3['id'])
 
+        cls.fixed_network_name = cls.config.compute.fixed_network_name
+
     @classmethod
     def tearDownClass(cls):
         cls.client.delete_server(cls.s1['id'])
@@ -208,7 +210,7 @@
     def test_list_servers_filtered_by_ip(self):
         # Filter servers by ip
         # Here should be listed 1 server
-        ip = self.s1['addresses']['private'][0]['addr']
+        ip = self.s1['addresses'][self.fixed_network_name][0]['addr']
         params = {'ip': ip}
         resp, body = self.client.list_servers(params)
         servers = body['servers']
@@ -222,7 +224,7 @@
         # Filter servers by regex ip
         # List all servers filtered by part of ip address.
         # Here should be listed all servers
-        ip = self.s1['addresses']['private'][0]['addr'][0:-3]
+        ip = self.s1['addresses'][self.fixed_network_name][0]['addr'][0:-3]
         params = {'ip': ip}
         resp, body = self.client.list_servers(params)
         servers = body['servers']
diff --git a/tempest/tests/identity/admin/v3/test_users.py b/tempest/tests/identity/admin/v3/test_users.py
index 7118241..39b8ca1 100644
--- a/tempest/tests/identity/admin/v3/test_users.py
+++ b/tempest/tests/identity/admin/v3/test_users.py
@@ -74,6 +74,8 @@
         fetched_project_ids = list()
         _, u_project = self.v3_client.create_project(
             rand_name('project-'), description=rand_name('project-desc-'))
+        # Delete the Project at the end of this method
+        self.addCleanup(self.v3_client.delete_project, u_project['id'])
         #Create a user.
         u_name = rand_name('user-')
         u_desc = u_name + 'description'
diff --git a/tox.ini b/tox.ini
index 565a9ad..7d3d245 100644
--- a/tox.ini
+++ b/tox.ini
@@ -20,8 +20,7 @@
          NOSE_OPENSTACK_SHOW_ELAPSED=1
          NOSE_OPENSTACK_STDOUT=1
 commands =
-  nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest
-  nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-cli.xml -sv cli
+  nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest/tests tempest/cli
 
 [testenv:smoke]
 sitepackages = True
@@ -47,8 +46,7 @@
          NOSE_OPENSTACK_STDOUT=1
 commands =
    python -m tools/tempest_coverage -c start --combine
-   nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest
-   nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-cli.xml -sv cli
+   nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest/tests tempest/cli
    python -m tools/tempest_coverage -c report --html
 
 [testenv:pep8]