Merge "Configurable fixed network name"
diff --git a/stress/README.rst b/stress/README.rst
deleted file mode 100644
index d935289..0000000
--- a/stress/README.rst
+++ /dev/null
@@ -1,68 +0,0 @@
-Quanta Research Cambridge OpenStack Stress Test System
-======================================================
-
-Nova is a distributed, asynchronous system that is prone to race condition
-bugs. These bugs will not be easily found during
-functional testing but will be encountered by users in large deployments in a
-way that is hard to debug. The stress test tries to cause these bugs to happen
-in a more controlled environment.
-
-The basic idea of the test is that there are a number of actions, roughly
-corresponding to the Compute API, that are fired pseudo-randomly at a nova
-cluster as fast as possible. These actions consist of what to do, how to
-verify success, and a state filter to make sure that the operation makes sense.
-For example, if the action is to reboot a server and none are active, nothing
-should be done. A test case is a set of actions to be performed and the
-probability that each action should be selected. There are also parameters
-controlling rate of fire and stuff like that.
-
-This test framework is designed to stress test a Nova cluster. Hence,
-you must have a working Nova cluster with rate limiting turned off.
-
-Environment
-------------
-This particular framework assumes your working Nova cluster understands Nova
-API 2.0. The stress tests can read the logs from the cluster. To enable this
-you have to provide the hostname to call 'nova-manage' and
-the private key and user name for ssh to the cluster in the
-[stress] section of tempest.conf. You also need to provide the
-value of --logdir in nova.conf:
-
- host_private_key_path=<path to private ssh key>
- host_admin_user=<name of user for ssh command>
- nova_logdir=<value of --logdir in nova.conf>
- controller=<hostname for calling nova-manage>
- max_instances=<limit on instances that will be created>
-
-Also, make sure to set
-
-log_level=CRITICAL
-
-so that the API client does not log failed calls which are expected while
-running stress tests.
-
-The stress test needs the top-level tempest directory to be on PYTHONPATH
-if you are not using nosetests to run.
-
-
-Running the sample test
------------------------
-
-To test your installation, do the following (from the tempest directory):
-
- PYTHONPATH=. python stress/tests/user_script_sample.py
-
-This sample test tries to create a few VMs and kill a few VMs.
-
-
-Additional Tools
-----------------
-
-Sometimes the tests don't finish, or there are failures. In these
-cases, you may want to clean out the nova cluster. We have provided
-some scripts to do this in the ``tools`` subdirectory. To use these
-tools, you will need to install python-novaclient.
-You can then use the following script to destroy any keypairs,
-floating ips, and servers::
-
-stress/tools/nova_destroy_all.py
diff --git a/stress/__init__.py b/stress/__init__.py
deleted file mode 100644
index 0875e0b..0000000
--- a/stress/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Basic framework for constructing various simulated workloads for a
-nova cluster."""
-
-__author__ = "David Kranz and Eugene Shih"
diff --git a/stress/basher.py b/stress/basher.py
deleted file mode 100644
index e34738f..0000000
--- a/stress/basher.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Class to describe actions to be included in a stress test."""
-
-
-class BasherAction(object):
- """
- Used to describe each action that you would like to include in a test run.
- """
-
- def __init__(self, test_case, probability, pargs=[], kargs={}):
- """
- `test_case` : the name of the class that implements the action
- `pargs` : positional arguments to the constructor of `test_case`
- `kargs` : keyword arguments to the constructor of `test_case`
- `probability`: frequency that each action
- """
- self.test_case = test_case
- self.pargs = pargs
- self.kargs = kargs
- self.probability = probability
-
- def invoke(self, manager, state):
- """
- Calls the `run` method of the `test_case`.
- """
- return self.test_case.run(manager, state, *self.pargs, **self.kargs)
-
- def __str__(self):
- return self.test_case.__class__.__name__
diff --git a/stress/config.py b/stress/config.py
deleted file mode 100755
index 25cb910..0000000
--- a/stress/config.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-class StressConfig(object):
- """Provides configuration information for whitebox stress tests."""
-
- def __init__(self, conf):
- self.conf = conf
-
- @property
- def host_private_key_path(self):
- """Path to ssh key for logging into compute nodes."""
- return self.conf.compute.path_to_private_key
-
- @property
- def host_admin_user(self):
- """Username for logging into compute nodes."""
- return self.conf.compute.ssh_user
-
- @property
- def nova_logdir(self):
- """Directory containing log files on the compute nodes."""
- return self.conf.stress.nova_logdir
-
- @property
- def controller(self):
- """Controller host."""
- return self.conf.stress.controller
-
- @property
- def max_instances(self):
- """Maximum number of instances to create during test."""
- return self.conf.stress.max_instances
diff --git a/stress/driver.py b/stress/driver.py
deleted file mode 100644
index 533c000..0000000
--- a/stress/driver.py
+++ /dev/null
@@ -1,274 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""The entry point for the execution of a workloadTo execute a workload.
-Users pass in a description of the workload and a nova manager object
-to the bash_openstack function call"""
-
-import datetime
-import random
-import time
-import urlparse
-
-from config import StressConfig
-from state import ClusterState
-from state import FloatingIpState
-from state import KeyPairState
-from state import VolumeState
-import stress.utils
-from test_case import logging
-
-from tempest.common.utils.data_utils import rand_name
-
-# setup logging to file
-logging.basicConfig(
- format='%(asctime)s %(name)-20s %(levelname)-8s %(message)s',
- datefmt='%m-%d %H:%M:%S',
- filename="stress.debug.log",
- filemode="w",
- level=logging.DEBUG,
-)
-
-# define a Handler which writes INFO messages or higher to the sys.stdout
-_console = logging.StreamHandler()
-_console.setLevel(logging.INFO)
-# set a format which is simpler for console use
-_formatter = logging.Formatter('%(name)-20s: %(levelname)-8s %(message)s')
-# tell the handler to use this format
-_console.setFormatter(_formatter)
-# add the handler to the root logger
-logging.getLogger('').addHandler(_console)
-
-
-def _create_cases(choice_spec):
- """
- Generate a workload of tests from workload description
- """
- cases = []
- count = 0
- for choice in choice_spec:
- p = choice.probability
- for i in range(p):
- cases.append(choice)
- i = i + p
- count = count + p
- assert(count == 100)
- return cases
-
-
-def _get_compute_nodes(keypath, user, controller):
- """
- Returns a list of active compute nodes. List is generated by running
- nova-manage on the controller.
- """
- nodes = []
- if keypath is None or user is None:
- return nodes
- cmd = "nova-manage service list | grep ^nova-compute"
- lines = stress.utils.ssh(keypath, user, controller, cmd).split('\n')
- # For example: nova-compute xg11eth0 nova enabled :-) 2011-10-31 18:57:46
- # This is fragile but there is, at present, no other way to get this info.
- for line in lines:
- words = line.split()
- if len(words) > 0 and words[4] == ":-)":
- nodes.append(words[1])
- return nodes
-
-
-def _error_in_logs(keypath, logdir, user, nodes):
- """
- Detect errors in the nova log files on the controller and compute nodes.
- """
- grep = 'egrep "ERROR\|TRACE" %s/*.log' % logdir
- for node in nodes:
- errors = stress.utils.ssh(keypath, user, node, grep, check=False)
- if len(errors) > 0:
- logging.error('%s: %s' % (node, errors))
- return True
- return False
-
-
-def create_initial_vms(manager, state, count):
- image = manager.config.compute.image_ref
- flavor = manager.config.compute.flavor_ref
- servers = []
- logging.info('Creating %d vms' % count)
- for _ in xrange(count):
- name = rand_name('initial_vm-')
- _, server = manager.servers_client.create_server(name, image, flavor)
- servers.append(server)
- for server in servers:
- manager.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
- logging.info('Server Name: %s Id: %s' % (name, server['id']))
- state.set_instance_state(server['id'], (server, 'ACTIVE'))
-
-
-def create_initial_floating_ips(manager, state, count):
- logging.info('Creating %d floating ips' % count)
- for _ in xrange(count):
- _, ip = manager.floating_ips_client.create_floating_ip()
- logging.info('Ip: %s' % ip['ip'])
- state.add_floating_ip(FloatingIpState(ip))
-
-
-def create_initial_keypairs(manager, state, count):
- logging.info('Creating %d keypairs' % count)
- for _ in xrange(count):
- name = rand_name('keypair-')
- _, keypair = manager.keypairs_client.create_keypair(name)
- logging.info('Keypair: %s' % name)
- state.add_keypair(KeyPairState(keypair))
-
-
-def create_initial_volumes(manager, state, count):
- volumes = []
- logging.info('Creating %d volumes' % count)
- for _ in xrange(count):
- name = rand_name('volume-')
- _, volume = manager.volumes_client.create_volume(size=1,
- display_name=name)
- volumes.append(volume)
- for volume in volumes:
- manager.volumes_client.wait_for_volume_status(volume['id'],
- 'available')
- logging.info('Volume Name: %s Id: %s' % (name, volume['id']))
- state.add_volume(VolumeState(volume))
-
-
-def bash_openstack(manager,
- choice_spec,
- **kwargs):
- """
- Workload driver. Executes a workload as specified by the `choice_spec`
- parameter against a nova-cluster.
-
- `manager` : Manager object
- `choice_spec` : list of BasherChoice actions to run on the cluster
- `kargs` : keyword arguments to the constructor of `test_case`
- `duration` = how long this test should last (3 sec)
- `sleep_time` = time to sleep between actions (in msec)
- `test_name` = human readable workload description
- (default: unnamed test)
- `max_vms` = maximum number of instances to launch
- (default: 32)
- `seed` = random seed (default: None)
- """
- stress_config = StressConfig(manager.config)
- # get keyword arguments
- duration = kwargs.get('duration', datetime.timedelta(seconds=10))
- seed = kwargs.get('seed', None)
- sleep_time = float(kwargs.get('sleep_time', 3000)) / 1000
- max_vms = int(kwargs.get('max_vms', stress_config.max_instances))
- test_name = kwargs.get('test_name', 'unamed test')
-
- keypath = stress_config.host_private_key_path
- user = stress_config.host_admin_user
- logdir = stress_config.nova_logdir
- host = urlparse.urlparse(manager.config.identity.uri).hostname
- computes = _get_compute_nodes(keypath, user, host)
- stress.utils.execute_on_all(keypath, user, computes,
- "rm -f %s/*.log" % logdir)
- random.seed(seed)
- cases = _create_cases(choice_spec)
- state = ClusterState(max_vms=max_vms)
- create_initial_keypairs(manager, state,
- int(kwargs.get('initial_keypairs', 0)))
- create_initial_vms(manager, state,
- int(kwargs.get('initial_vms', 0)))
- create_initial_floating_ips(manager, state,
- int(kwargs.get('initial_floating_ips', 0)))
- create_initial_volumes(manager, state,
- int(kwargs.get('initial_volumes', 0)))
- test_end_time = time.time() + duration.seconds
-
- retry_list = []
- last_retry = time.time()
- cooldown = False
- logcheck_count = 0
- test_succeeded = True
- logging.debug('=== Test \"%s\" on %s ===' %
- (test_name, time.asctime(time.localtime())))
- for kw in kwargs:
- logging.debug('\t%s = %s', kw, kwargs[kw])
-
- while True:
- if not cooldown:
- if time.time() < test_end_time:
- case = random.choice(cases)
- logging.debug('Chose %s' % case)
- retry = case.invoke(manager, state)
- if retry is not None:
- retry_list.append(retry)
- else:
- logging.info('Cooling down...')
- cooldown = True
- if cooldown and len(retry_list) == 0:
- if _error_in_logs(keypath, logdir, user, computes):
- test_succeeded = False
- break
- # Retry verifications every 5 seconds.
- if time.time() - last_retry > 5:
- logging.debug('retry verifications for %d tasks', len(retry_list))
- new_retry_list = []
- for v in retry_list:
- v.check_timeout()
- if not v.retry():
- new_retry_list.append(v)
- retry_list = new_retry_list
- last_retry = time.time()
- time.sleep(sleep_time)
- # Check error logs after 100 actions
- if logcheck_count > 100:
- if _error_in_logs(keypath, logdir, user, computes):
- test_succeeded = False
- break
- else:
- logcheck_count = 0
- else:
- logcheck_count = logcheck_count + 1
- # Cleanup
- logging.info('Cleaning up: terminating virtual machines...')
- vms = state.get_instances()
- active_vms = [v for _k, v in vms.iteritems()
- if v and v[1] != 'TERMINATING']
- for target in active_vms:
- manager.servers_client.delete_server(target[0]['id'])
- # check to see that the server was actually killed
- for target in active_vms:
- kill_id = target[0]['id']
- i = 0
- while True:
- try:
- manager.servers_client.get_server(kill_id)
- except Exception:
- break
- i += 1
- if i > 60:
- _error_in_logs(keypath, logdir, user, computes)
- raise Exception("Cleanup timed out")
- time.sleep(1)
- logging.info('killed %s' % kill_id)
- state.delete_instance_state(kill_id)
- for floating_ip_state in state.get_floating_ips():
- manager.floating_ips_client.delete_floating_ip(
- floating_ip_state.resource_id)
- for keypair_state in state.get_keypairs():
- manager.keypairs_client.delete_keypair(keypair_state.name)
- for volume_state in state.get_volumes():
- manager.volumes_client.delete_volume(volume_state.resource_id)
-
- if test_succeeded:
- logging.info('*** Test succeeded ***')
- else:
- logging.info('*** Test had errors ***')
- return test_succeeded
diff --git a/stress/pending_action.py b/stress/pending_action.py
deleted file mode 100644
index abfa74d..0000000
--- a/stress/pending_action.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Describe follow-up actions using `PendingAction` class to verify
-that nova API calls such as create/delete are completed"""
-
-import logging
-import time
-
-from tempest.exceptions import TimeoutException
-
-
-class PendingAction(object):
- """
- Initialize and describe actions to verify that a Nova API call
- is successful.
- """
-
- def __init__(self, nova_manager, timeout=None):
- """
- `nova_manager` : Manager object.
- `timeout` : time before we declare a TimeoutException
- """
- if timeout is None:
- timeout = nova_manager.config.compute.build_timeout
- self._manager = nova_manager
- self._logger = logging.getLogger(self.__class__.__name__)
- self._start_time = time.time()
- self._timeout = timeout
-
- def retry(self):
- """
- Invoked by user of this class to verify completion of
- previous TestCase actions
- """
- return False
-
- def check_timeout(self):
- """Check for timeouts of TestCase actions."""
- time_diff = time.time() - self._start_time
- if time_diff > self._timeout:
- self._logger.error('%s exceeded timeout of %d' %
- (self.__class__.__name__, self._timeout))
- raise TimeoutException
-
- def elapsed(self):
- return time.time() - self._start_time
-
-
-class PendingServerAction(PendingAction):
- """
- Initialize and describe actions to verify that a Nova API call that
- changes server state is successful.
- """
-
- def __init__(self, nova_manager, state, target_server, timeout=None):
- """
- `state` : externally maintained data structure about
- state of VMs or other persistent objects in
- the nova cluster
- `target_server` : server that actions were performed on
- """
- super(PendingServerAction, self).__init__(nova_manager,
- timeout=timeout)
- self._state = state
- self._target = target_server
-
- def _check_for_status(self, state_string):
- """Check to see if the machine has transitioned states."""
- t = time.time() # for debugging
- target = self._target
- _resp, body = self._manager.servers_client.get_server(target['id'])
- if body['status'] != state_string:
- # grab the actual state as we think it is
- temp_obj = self._state.get_instances()[target['id']]
- self._logger.debug("machine %s in state %s" %
- (target['id'], temp_obj[1]))
- self._logger.debug('%s, time: %d' % (temp_obj[1], time.time() - t))
- return temp_obj[1]
- self._logger.debug('%s, time: %d' % (state_string, time.time() - t))
- return state_string
diff --git a/stress/state.py b/stress/state.py
deleted file mode 100644
index 9c31b76..0000000
--- a/stress/state.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-class ClusterState(object):
- """A class to store the state of various persistent objects in the Nova
- cluster, e.g. instances, volumes. Use methods to query to state which than
- can be compared to the current state of the objects in Nova.
- """
-
- def __init__(self, **kwargs):
- self._max_vms = kwargs.get('max_vms', 32)
- self._instances = {}
- self._floating_ips = []
- self._keypairs = []
- self._volumes = []
-
- # instance state methods
- def get_instances(self):
- """return the instances dictionary that we believe are in cluster."""
- return self._instances
-
- def get_max_instances(self):
- """return the maximum number of instances we can create."""
- return self._max_vms
-
- def set_instance_state(self, key, val):
- """Store `val` in the dictionary indexed at `key`."""
- self._instances[key] = val
-
- def delete_instance_state(self, key):
- """Delete state indexed at `key`."""
- del self._instances[key]
-
- #floating_ip state methods
- def get_floating_ips(self):
- """return the floating ips list for the cluster."""
- return self._floating_ips
-
- def add_floating_ip(self, floating_ip_state):
- """Add floating ip."""
- self._floating_ips.append(floating_ip_state)
-
- def remove_floating_ip(self, floating_ip_state):
- """Remove floating ip."""
- self._floating_ips.remove(floating_ip_state)
-
- # keypair methods
- def get_keypairs(self):
- """return the keypairs list for the cluster."""
- return self._keypairs
-
- def add_keypair(self, keypair_state):
- """Add keypair."""
- self._keypairs.append(keypair_state)
-
- def remove_keypair(self, keypair_state):
- """Remove keypair."""
- self._keypairs.remove(keypair_state)
-
- # volume methods
- def get_volumes(self):
- """return the volumes list for the cluster."""
- return self._volumes
-
- def add_volume(self, volume_state):
- """Add volume."""
- self._volumes.append(volume_state)
-
- def remove_volume(self, volume_state):
- """Remove volume."""
- self._volumes.remove(volume_state)
-
-
-class ServerAssociatedState(object):
- """Class that tracks resources that are associated with a particular server
- such as a volume or floating ip.
- """
-
- def __init__(self, resource_id):
- # The id of the server.
- self.server_id = None
- # The id of the resource that is attached to the server.
- self.resource_id = resource_id
- # True if in the process of attaching/detaching the resource.
- self.change_pending = False
-
-
-class FloatingIpState(ServerAssociatedState):
-
- def __init__(self, ip_desc):
- super(FloatingIpState, self).__init__(ip_desc['id'])
- self.address = ip_desc['ip']
-
-
-class VolumeState(ServerAssociatedState):
-
- def __init__(self, volume_desc):
- super(VolumeState, self).__init__(volume_desc['id'])
-
-
-class KeyPairState(object):
-
- def __init__(self, keypair_spec):
- self.name = keypair_spec['name']
- self.private_key = keypair_spec['private_key']
diff --git a/stress/test_case.py b/stress/test_case.py
deleted file mode 100644
index d04ace0..0000000
--- a/stress/test_case.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Abstract class for implementing an action. You only need to override
-the `run` method which specifies all the actual nova API class you wish
-to make."""
-
-
-import logging
-
-
-class StressTestCase(object):
-
- def __init__(self):
- self._logger = logging.getLogger(self.__class__.__name__)
-
- def run(self, nova_manager, state_obj, *pargs, **kargs):
- """Nova API methods to call that would modify state of the cluster."""
- return
diff --git a/stress/test_floating_ips.py b/stress/test_floating_ips.py
deleted file mode 100755
index c5bad95..0000000
--- a/stress/test_floating_ips.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import random
-import telnetlib
-
-from stress import pending_action
-from stress import test_case
-
-
-class TestChangeFloatingIp(test_case.StressTestCase):
- """Add or remove a floating ip from a vm."""
-
- def __init__(self):
- super(TestChangeFloatingIp, self).__init__()
- self.server_ids = None
-
- def run(self, manager, state, *pargs, **kwargs):
- if self.server_ids is None:
- vms = state.get_instances()
- self.server_ids = [k for k, v in vms.iteritems()]
- floating_ip = random.choice(state.get_floating_ips())
- if floating_ip.change_pending:
- return None
- floating_ip.change_pending = True
- timeout = int(kwargs.get('timeout', 60))
- cli = manager.floating_ips_client
- if floating_ip.server_id is None:
- server = random.choice(self.server_ids)
- address = floating_ip.address
- self._logger.info('Adding %s to server %s' % (address, server))
- resp, body = cli.associate_floating_ip_to_server(address,
- server)
- if resp.status != 202:
- raise Exception("response: %s body: %s" % (resp, body))
- floating_ip.server_id = server
- return VerifyChangeFloatingIp(manager, floating_ip,
- timeout, add=True)
- else:
- server = floating_ip.server_id
- address = floating_ip.address
- self._logger.info('Removing %s from server %s' % (address, server))
- resp, body = cli.disassociate_floating_ip_from_server(address,
- server)
- if resp.status != 202:
- raise Exception("response: %s body: %s" % (resp, body))
- return VerifyChangeFloatingIp(manager, floating_ip,
- timeout, add=False)
-
-
-class VerifyChangeFloatingIp(pending_action.PendingAction):
- """Verify that floating ip was changed."""
- def __init__(self, manager, floating_ip, timeout, add=None):
- super(VerifyChangeFloatingIp, self).__init__(manager, timeout=timeout)
- self.floating_ip = floating_ip
- self.add = add
-
- def retry(self):
- """
- Check to see that we can contact the server at its new address.
- """
- try:
- conn = telnetlib.Telnet(self.floating_ip.address, 22, timeout=0.5)
- conn.close()
- if self.add:
- self._logger.info('%s added [%.1f secs elapsed]' %
- (self.floating_ip.address, self.elapsed()))
- self.floating_ip.change_pending = False
- return True
- except Exception:
- if not self.add:
- self._logger.info('%s removed [%.1f secs elapsed]' %
- (self.floating_ip.address, self.elapsed()))
- self.floating_ip.change_pending = False
- self.floating_ip.server_id = None
- return True
- return False
diff --git a/stress/test_server_actions.py b/stress/test_server_actions.py
deleted file mode 100644
index 3a7094d..0000000
--- a/stress/test_server_actions.py
+++ /dev/null
@@ -1,275 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Defines various sub-classes of the `StressTestCase` and
-`PendingServerAction` class. Sub-classes of StressTestCase implement various
-API calls on the Nova cluster having to do with Server Actions. Each
-sub-class will have a corresponding PendingServerAction. These pending
-actions veriy that the API call was successful or not."""
-
-import random
-
-from stress import pending_action
-from stress import test_case
-import stress.utils
-from tempest.exceptions import Duplicate
-
-
-class TestRebootVM(test_case.StressTestCase):
- """Reboot a server."""
-
- def run(self, manager, state, *pargs, **kwargs):
- """
- Send an HTTP POST request to the nova cluster to reboot a random
- server. Update state of object in `state` variable to indicate that
- it is rebooting.
- `manager` : Manager object
- `state` : `State` object describing our view of state of cluster
- `pargs` : positional arguments
- `kwargs` : keyword arguments, which include:
- `timeout` : how long to wait before issuing Exception
- `type` : reboot type [SOFT or HARD] (default is SOFT)
- """
-
- vms = state.get_instances()
- active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
- # no active vms, so return null
- if not active_vms:
- self._logger.info('no ACTIVE instances to reboot')
- return
-
- _reboot_arg = kwargs.get('type', 'SOFT')
-
- # select active vm to reboot and then send request to nova controller
- target = random.choice(active_vms)
- reboot_target = target[0]
- # It seems that doing a reboot when in reboot is an error.
- try:
- response, body = manager.servers_client.reboot(reboot_target['id'],
- _reboot_arg)
- except Duplicate:
- return
-
- if (response.status != 202):
- self._logger.error("response: %s" % response)
- raise Exception
-
- if _reboot_arg == 'SOFT':
- reboot_state = 'REBOOT'
- else:
- reboot_state = 'HARD_REBOOT'
-
- self._logger.info('waiting for machine %s to change to %s' %
- (reboot_target['id'], reboot_state))
-
- return VerifyRebootVM(manager,
- state,
- reboot_target,
- reboot_state=reboot_state)
-
-
-class VerifyRebootVM(pending_action.PendingServerAction):
- """Class to verify that the reboot completed."""
- States = stress.utils.enum('REBOOT_CHECK', 'ACTIVE_CHECK')
-
- def __init__(self, manager, state, target_server,
- reboot_state=None,
- ip_addr=None):
- super(VerifyRebootVM, self).__init__(manager,
- state,
- target_server)
- self._reboot_state = reboot_state
- self._retry_state = self.States.REBOOT_CHECK
-
- def retry(self):
- """
- Check to see that the server of interest has actually rebooted. Update
- state to indicate that server is running again.
- """
- # don't run reboot verification if target machine has been
- # deleted or is going to be deleted
- target_id = self._target['id']
- if (self._target['id'] not in self._state.get_instances().keys() or
- self._state.get_instances()[target_id][1] == 'TERMINATING'):
- self._logger.debug('machine %s is deleted or TERMINATING' %
- self._target['id'])
- return True
-
- reboot_state = self._reboot_state
- if self._retry_state == self.States.REBOOT_CHECK:
- server_state = self._check_for_status(reboot_state)
- if server_state == reboot_state:
- self._logger.info('machine %s ACTIVE -> %s' %
- (self._target['id'], reboot_state))
- self._state.set_instance_state(self._target['id'],
- (self._target, reboot_state))
- self._retry_state = self.States.ACTIVE_CHECK
- elif server_state == 'ACTIVE':
- # machine must have gone ACTIVE -> REBOOT ->ACTIVE
- self._retry_state = self.States.ACTIVE_CHECK
-
- elif self._retry_state == self.States.ACTIVE_CHECK:
- if not self._check_for_status('ACTIVE'):
- return False
- target = self._target
- self._logger.info('machine %s %s -> ACTIVE [%.1f secs elapsed]' %
- (target['id'], reboot_state, self.elapsed()))
- self._state.set_instance_state(target['id'],
- (target, 'ACTIVE'))
-
- return True
-
-# This code needs to be tested against a cluster that supports resize.
-#class TestResizeVM(test_case.StressTestCase):
-# """Resize a server (change flavors)."""
-#
-# def run(self, manager, state, *pargs, **kwargs):
-# """
-# Send an HTTP POST request to the nova cluster to resize a random
-# server. Update `state` to indicate server is rebooting.
-#
-# `manager` : Manager object.
-# `state` : `State` object describing our view of state of cluster
-# `pargs` : positional arguments
-# `kwargs` : keyword arguments, which include:
-# `timeout` : how long to wait before issuing Exception
-# """
-#
-# vms = state.get_instances()
-# active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
-# # no active vms, so return null
-# if not active_vms:
-# self._logger.debug('no ACTIVE instances to resize')
-# return
-#
-# target = random.choice(active_vms)
-# resize_target = target[0]
-# print resize_target
-#
-# _timeout = kwargs.get('timeout', 600)
-#
-# # determine current flavor type, and resize to a different type
-# # m1.tiny -> m1.small, m1.small -> m1.tiny
-# curr_size = int(resize_target['flavor']['id'])
-# if curr_size == 1:
-# new_size = 2
-# else:
-# new_size = 1
-# flavor_type = { 'flavorRef': new_size } # resize to m1.small
-#
-# post_body = json.dumps({'resize' : flavor_type})
-# url = '/servers/%s/action' % resize_target['id']
-# (response, body) = manager.request('POST',
-# url,
-# body=post_body)
-#
-# if (response.status != 202):
-# self._logger.error("response: %s" % response)
-# raise Exception
-#
-# state_name = check_for_status(manager, resize_target, 'RESIZE')
-#
-# if state_name == 'RESIZE':
-# self._logger.info('machine %s: ACTIVE -> RESIZE' %
-# resize_target['id'])
-# state.set_instance_state(resize_target['id'],
-# (resize_target, 'RESIZE'))
-#
-# return VerifyResizeVM(manager,
-# state,
-# resize_target,
-# state_name=state_name,
-# timeout=_timeout)
-#
-#class VerifyResizeVM(pending_action.PendingServerAction):
-# """Verify that resizing of a VM was successful."""
-# States = enum('VERIFY_RESIZE_CHECK', 'ACTIVE_CHECK')
-#
-# def __init__(self, manager, state, created_server,
-# state_name=None,
-# timeout=300):
-# super(VerifyResizeVM, self).__init__(manager,
-# state,
-# created_server,
-# timeout=timeout)
-# self._retry_state = self.States.VERIFY_RESIZE_CHECK
-# self._state_name = state_name
-#
-# def retry(self):
-# """
-# Check to see that the server was actually resized. And change `state`
-# of server to running again.
-# """
-# # don't run resize if target machine has been deleted
-# # or is going to be deleted
-# if (self._target['id'] not in self._state.get_instances().keys() or
-# self._state.get_instances()[self._target['id']][1] ==
-# 'TERMINATING'):
-# self._logger.debug('machine %s is deleted or TERMINATING' %
-# self._target['id'])
-# return True
-#
-# if self._retry_state == self.States.VERIFY_RESIZE_CHECK:
-# if self._check_for_status('VERIFY_RESIZE') == 'VERIFY_RESIZE':
-# # now issue command to CONFIRM RESIZE
-# post_body = json.dumps({'confirmResize' : null})
-# url = '/servers/%s/action' % self._target['id']
-# (response, body) = manager.request('POST',
-# url,
-# body=post_body)
-# if (response.status != 204):
-# self._logger.error("response: %s" % response)
-# raise Exception
-#
-# self._logger.info(
-# 'CONFIRMING RESIZE of machine %s [%.1f secs elapsed]' %
-# (self._target['id'], self.elapsed())
-# )
-# state.set_instance_state(self._target['id'],
-# (self._target, 'CONFIRM_RESIZE'))
-#
-# # change states
-# self._retry_state = self.States.ACTIVE_CHECK
-#
-# return False
-#
-# elif self._retry_state == self.States.ACTIVE_CHECK:
-# if not self._check_manager("ACTIVE"):
-# return False
-# else:
-# server = self._manager.get_server(self._target['id'])
-#
-# # Find private IP of server?
-# try:
-# (_, network) = server['addresses'].popitem()
-# ip = network[0]['addr']
-# except KeyError:
-# self._logger.error(
-# 'could not get ip address for machine %s' %
-# self._target['id']
-# )
-# raise Exception
-#
-# self._logger.info(
-# 'machine %s: VERIFY_RESIZE -> ACTIVE [%.1f sec elapsed]' %
-# (self._target['id'], self.elapsed())
-# )
-# self._state.set_instance_state(self._target['id'],
-# (self._target, 'ACTIVE'))
-#
-# return True
-#
-# else:
-# # should never get here
-# self._logger.error('Unexpected state')
-# raise Exception
diff --git a/stress/test_servers.py b/stress/test_servers.py
deleted file mode 100644
index 1dd72f1..0000000
--- a/stress/test_servers.py
+++ /dev/null
@@ -1,318 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Defines various sub-classes of the `StressTestCase` and
-`PendingServerAction` class. Sub-classes of StressTestCase implement various
-API calls on the Nova cluster having to do with creating and deleting VMs.
-Each sub-class will have a corresponding PendingServerAction. These pending
-actions veriy that the API call was successful or not."""
-
-import random
-
-from stress import pending_action
-from stress import test_case
-
-
-class TestCreateVM(test_case.StressTestCase):
- """Create a virtual machine in the Nova cluster."""
- _vm_id = 0
-
- def run(self, manager, state, *pargs, **kwargs):
- """
- Send an HTTP POST request to the nova cluster to build a
- server. Update the state variable to track state of new server
- and set to PENDING state.
-
- `manager` : Manager object.
- `state` : `State` object describing our view of state of cluster
- `pargs` : positional arguments
- `kwargs` : keyword arguments, which include:
- `key_name` : name of keypair
- `image_ref` : index to image types availablexs
- `flavor_ref`: index to flavor types available
- (default = 1, which is tiny)
- """
-
- # restrict number of instances we can launch
- if len(state.get_instances()) >= state.get_max_instances():
- self._logger.debug("maximum number of instances created: %d" %
- state.get_max_instances())
- return None
-
- _key_name = kwargs.get('key_name', '')
- _image_ref = kwargs.get('image_ref', manager.config.compute.image_ref)
- _flavor_ref = kwargs.get('flavor_ref',
- manager.config.compute.flavor_ref)
-
- expected_server = {
- 'name': 'server' + str(TestCreateVM._vm_id),
- 'metadata': {
- 'key1': 'value1',
- 'key2': 'value2',
- },
- 'imageRef': _image_ref,
- 'flavorRef': _flavor_ref,
- 'adminPass': 'testpwd',
- 'key_name': _key_name,
- }
- TestCreateVM._vm_id = TestCreateVM._vm_id + 1
- create_server = manager.servers_client.create_server
- response, body = create_server(expected_server['name'],
- _image_ref,
- _flavor_ref,
- meta=expected_server['metadata'],
- adminPass=expected_server['adminPass'])
-
- if (response.status != 202):
- self._logger.error("response: %s" % response)
- self._logger.error("body: %s" % body)
- raise Exception
-
- created_server = body
-
- self._logger.info('setting machine %s to BUILD' %
- created_server['id'])
- state.set_instance_state(created_server['id'],
- (created_server, 'BUILD'))
-
- return VerifyCreateVM(manager,
- state,
- created_server,
- expected_server)
-
-
-class VerifyCreateVM(pending_action.PendingServerAction):
- """Verify that VM was built and is running."""
- def __init__(self, manager,
- state,
- created_server,
- expected_server):
- super(VerifyCreateVM, self).__init__(manager,
- state,
- created_server,
- )
- self._expected = expected_server
-
- def retry(self):
- """
- Check to see that the server was created and is running.
- Update local view of state to indicate that it is running.
- """
- # don't run create verification
- # if target machine has been deleted or is going to be deleted
- target_id = self._target['id']
- if (self._target['id'] not in self._state.get_instances().keys() or
- self._state.get_instances()[target_id][1] == 'TERMINATING'):
- self._logger.info('machine %s is deleted or TERMINATING' %
- self._target['id'])
- return True
-
- admin_pass = self._target['adminPass']
- # Could check more things here.
- if (self._expected['adminPass'] != admin_pass):
- self._logger.error('expected: %s' %
- (self._expected['adminPass']))
- self._logger.error('returned: %s' %
- (admin_pass))
- raise Exception
-
- if self._check_for_status('ACTIVE') != 'ACTIVE':
- return False
-
- self._logger.info('machine %s: BUILD -> ACTIVE [%.1f secs elapsed]' %
- (self._target['id'], self.elapsed()))
- self._state.set_instance_state(self._target['id'],
- (self._target, 'ACTIVE'))
- return True
-
-
-class TestKillActiveVM(test_case.StressTestCase):
- """Class to destroy a random ACTIVE server."""
- def run(self, manager, state, *pargs, **kwargs):
- """
- Send an HTTP POST request to the nova cluster to destroy
- a random ACTIVE server. Update `state` to indicate TERMINATING.
-
- `manager` : Manager object.
- `state` : `State` object describing our view of state of cluster
- `pargs` : positional arguments
- `kwargs` : keyword arguments, which include:
- `timeout` : how long to wait before issuing Exception
- """
- # check for active instances
- vms = state.get_instances()
- active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
- # no active vms, so return null
- if not active_vms:
- self._logger.info('no ACTIVE instances to delete')
- return
-
- _timeout = kwargs.get('timeout', manager.config.compute.build_timeout)
-
- target = random.choice(active_vms)
- killtarget = target[0]
- manager.servers_client.delete_server(killtarget['id'])
- self._logger.info('machine %s: ACTIVE -> TERMINATING' %
- killtarget['id'])
- state.set_instance_state(killtarget['id'],
- (killtarget, 'TERMINATING'))
- return VerifyKillActiveVM(manager, state,
- killtarget, timeout=_timeout)
-
-
-class VerifyKillActiveVM(pending_action.PendingServerAction):
- """Verify that server was destroyed."""
-
- def retry(self):
- """
- Check to see that the server of interest is destroyed. Update
- state to indicate that server is destroyed by deleting it from local
- view of state.
- """
- tid = self._target['id']
- # if target machine has been deleted from the state, then it was
- # already verified to be deleted
- if (not tid in self._state.get_instances().keys()):
- return False
-
- try:
- self._manager.servers_client.get_server(tid)
- except Exception:
- # if we get a 404 response, is the machine really gone?
- target = self._target
- self._logger.info('machine %s: DELETED [%.1f secs elapsed]' %
- (target['id'], self.elapsed()))
- self._state.delete_instance_state(target['id'])
- return True
-
- return False
-
-
-class TestKillAnyVM(test_case.StressTestCase):
- """Class to destroy a random server regardless of state."""
-
- def run(self, manager, state, *pargs, **kwargs):
- """
- Send an HTTP POST request to the nova cluster to destroy
- a random server. Update state to TERMINATING.
-
- `manager` : Manager object.
- `state` : `State` object describing our view of state of cluster
- `pargs` : positional arguments
- `kwargs` : keyword arguments, which include:
- `timeout` : how long to wait before issuing Exception
- """
-
- vms = state.get_instances()
- # no vms, so return null
- if not vms:
- self._logger.info('no active instances to delete')
- return
-
- _timeout = kwargs.get('timeout', manager.config.compute.build_timeout)
-
- target = random.choice(vms)
- killtarget = target[0]
-
- manager.servers_client.delete_server(killtarget['id'])
- self._state.set_instance_state(killtarget['id'],
- (killtarget, 'TERMINATING'))
- # verify object will do the same thing as the active VM
- return VerifyKillAnyVM(manager, state, killtarget, timeout=_timeout)
-
-VerifyKillAnyVM = VerifyKillActiveVM
-
-
-class TestUpdateVMName(test_case.StressTestCase):
- """Class to change the name of the active server."""
- def run(self, manager, state, *pargs, **kwargs):
- """
- Issue HTTP POST request to change the name of active server.
- Update state of server to reflect name changing.
-
- `manager` : Manager object.
- `state` : `State` object describing our view of state of cluster
- `pargs` : positional arguments
- `kwargs` : keyword arguments, which include:
- `timeout` : how long to wait before issuing Exception
- """
-
- # select one machine from active ones
- vms = state.get_instances()
- active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
- # no active vms, so return null
- if not active_vms:
- self._logger.info('no active instances to update')
- return
-
- _timeout = kwargs.get('timeout', manager.config.compute.build_timeout)
-
- target = random.choice(active_vms)
- update_target = target[0]
-
- # Update name by appending '_updated' to the name
- new_name = update_target['name'] + '_updated'
- (response, body) = \
- manager.servers_client.update_server(update_target['id'],
- name=new_name)
- if (response.status != 200):
- self._logger.error("response: %s " % response)
- self._logger.error("body: %s " % body)
- raise Exception
-
- assert(new_name == body['name'])
-
- self._logger.info('machine %s: ACTIVE -> UPDATING_NAME' %
- body['id'])
- state.set_instance_state(body['id'],
- (body, 'UPDATING_NAME'))
-
- return VerifyUpdateVMName(manager,
- state,
- body,
- timeout=_timeout)
-
-
-class VerifyUpdateVMName(pending_action.PendingServerAction):
- """Check that VM has new name."""
- def retry(self):
- """
- Check that VM has new name. Update local view of `state` to RUNNING.
- """
- # don't run update verification
- # if target machine has been deleted or is going to be deleted
- target_id = self._target['id']
- if (not self._target['id'] in self._state.get_instances().keys() or
- self._state.get_instances()[target_id][1] == 'TERMINATING'):
- return False
-
- response, body = \
- self._manager.serverse_client.get_server(self._target['id'])
- if (response.status != 200):
- self._logger.error("response: %s " % response)
- self._logger.error("body: %s " % body)
- raise Exception
-
- if self._target['name'] != body['name']:
- self._logger.error(self._target['name'] +
- ' vs. ' +
- body['name'])
- raise Exception
-
- # log the update
- self._logger.info('machine %s: UPDATING_NAME -> ACTIVE' %
- self._target['id'])
- self._state.set_instance_state(self._target['id'],
- (body,
- 'ACTIVE'))
- return True
diff --git a/stress/tests/create_kill.py b/stress/tests/create_kill.py
deleted file mode 100644
index 30ddfd7..0000000
--- a/stress/tests/create_kill.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""More aggressive test that creates and destroys VMs with shorter
-sleep times"""
-
-import datetime
-import time
-
-from stress.basher import BasherAction
-from stress.driver import bash_openstack
-from stress.test_servers import TestCreateVM
-from stress.test_servers import TestKillActiveVM
-from tempest import clients
-
-choice_spec = [
- BasherAction(TestCreateVM(), 50),
- BasherAction(TestKillActiveVM(), 50)
-]
-
-nova = clients.Manager()
-
-bash_openstack(nova,
- choice_spec,
- duration=datetime.timedelta(seconds=180),
- sleep_time=100, # in milliseconds
- seed=int(time.time()),
- test_name="create and delete",
- )
diff --git a/stress/tests/floating_ips.py b/stress/tests/floating_ips.py
deleted file mode 100755
index b1b3778..0000000
--- a/stress/tests/floating_ips.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Stress test that associates/disasssociates floating ips."""
-
-import datetime
-
-from stress.basher import BasherAction
-from stress.driver import bash_openstack
-from stress.test_floating_ips import TestChangeFloatingIp
-from tempest import clients
-
-
-choice_spec = [
- BasherAction(TestChangeFloatingIp(), 100)
-]
-
-nova = clients.Manager()
-
-bash_openstack(nova,
- choice_spec,
- duration=datetime.timedelta(seconds=300),
- test_name="floating_ips",
- initial_floating_ips=8,
- initial_vms=8)
diff --git a/stress/tests/hard_reboots.py b/stress/tests/hard_reboots.py
deleted file mode 100644
index 50a2e91..0000000
--- a/stress/tests/hard_reboots.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Test that reboots random instances in a Nova cluster."""
-
-import datetime
-import time
-
-from stress.basher import BasherAction
-from stress.driver import bash_openstack
-from stress.test_server_actions import TestRebootVM
-from stress.test_servers import TestCreateVM
-from tempest import clients
-
-choice_spec = [
- BasherAction(TestCreateVM(), 50),
- BasherAction(TestRebootVM(), 50,
- kargs={'type': 'HARD'})
-]
-
-nova = clients.Manager()
-
-bash_openstack(nova,
- choice_spec,
- duration=datetime.timedelta(seconds=180),
- sleep_time=500, # in milliseconds
- seed=int(time.time()),
- test_name="hard reboots",
- )
diff --git a/stress/tests/user_script_sample.py b/stress/tests/user_script_sample.py
deleted file mode 100644
index d941ea0..0000000
--- a/stress/tests/user_script_sample.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Sample stress test that creates a few virtual machines and then
-destroys them"""
-
-import datetime
-
-from stress.basher import BasherAction
-from stress.driver import bash_openstack
-from stress.test_servers import TestCreateVM
-from stress.test_servers import TestKillActiveVM
-from tempest import clients
-
-choice_spec = [
- BasherAction(TestCreateVM(), 50,
- kargs={'timeout': '60'}),
- BasherAction(TestKillActiveVM(), 50)
-]
-
-
-nova = clients.Manager()
-
-bash_openstack(nova,
- choice_spec,
- duration=datetime.timedelta(seconds=10),
- sleep_time=1000, # in milliseconds
- seed=None,
- test_name="simple create and delete",
- max_vms=4)
diff --git a/stress/tools/nova_destroy_all.py b/stress/tools/nova_destroy_all.py
deleted file mode 100755
index 00d8883..0000000
--- a/stress/tools/nova_destroy_all.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from novaclient.v1_1 import client
-import tempest.config
-
-# get the environment variables for credentials
-identity = tempest.config.TempestConfig().identity
-
-nt = client.Client(identity.username, identity.password,
- identity.tenant_name, identity.uri)
-
-flavor_list = nt.flavors.list()
-server_list = nt.servers.list()
-images_list = nt.images.list()
-keypairs_list = nt.keypairs.list()
-floating_ips_list = nt.floating_ips.list()
-volumes_list = nt.volumes.list()
-
-print "total servers: %3d, total flavors: %3d, total images: %3d," % \
- (len(server_list),
- len(flavor_list),
- len(images_list)),
-
-print "total keypairs: %3d, total floating ips: %3d" % \
- (len(keypairs_list),
- len(floating_ips_list))
-
-print "deleting all servers"
-for s in server_list:
- s.delete()
-
-print "deleting all keypairs"
-for s in keypairs_list:
- s.delete()
-
-print "deleting all floating_ips"
-for s in floating_ips_list:
- s.delete()
-
-print "deleting all volumes"
-for s in volumes_list:
- s.delete()
diff --git a/stress/tools/nova_status.py b/stress/tools/nova_status.py
deleted file mode 100755
index ee20282..0000000
--- a/stress/tools/nova_status.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from novaclient.v1_1 import client
-import tempest.config
-
-# get the environment variables for credentials
-identity = tempest.config.TempestConfig().identity
-print identity.username, identity.password,\
- identity.tenant_name, identity.uri
-
-nt = client.Client(identity.username, identity.password,
- identity.tenant_name, identity.uri)
-
-flavor_list = nt.flavors.list()
-server_list = nt.servers.list()
-images_list = nt.images.list()
-keypairs_list = nt.keypairs.list()
-floating_ips_list = nt.floating_ips.list()
-
-print "total servers: %3d, total flavors: %3d, total images: %3d" % \
- (len(server_list),
- len(flavor_list),
- len(images_list))
-
-print "total keypairs: %3d, total floating ips: %3d" % \
- (len(keypairs_list),
- len(floating_ips_list))
-
-print "flavors:\t", flavor_list
-print "servers:\t", server_list
-print "images: \t", images_list
-print "keypairs:\t", keypairs_list
-print "floating ips:\t", floating_ips_list
diff --git a/stress/utils.py b/stress/utils.py
deleted file mode 100644
index ec63b99..0000000
--- a/stress/utils.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import shlex
-import subprocess
-
-SSH_OPTIONS = (" -q" +
- " -o UserKnownHostsFile=/dev/null" +
- " -o StrictHostKeyChecking=no -i ")
-
-
-def get_ssh_options(keypath):
- return SSH_OPTIONS + keypath
-
-
-def scp(keypath, args):
- options = get_ssh_options(keypath)
- return subprocess.check_call(shlex.split("scp" + options + args))
-
-
-def ssh(keypath, user, node, command, check=True):
- command = 'sudo ' + command
- command = "ssh %s %s@%s %s" % (get_ssh_options(keypath), user,
- node, command)
- popenargs = shlex.split(command)
- process = subprocess.Popen(popenargs, stdout=subprocess.PIPE)
- output, unused_err = process.communicate()
- retcode = process.poll()
- if retcode and check:
- raise Exception("%s: ssh failed with retcode: %s" % (node, retcode))
- return output
-
-
-def execute_on_all(keypath, user, nodes, command):
- for node in nodes:
- ssh(keypath, user, node, command)
-
-
-def enum(*sequential, **named):
- """Create auto-incremented enumerated types."""
- enums = dict(zip(sequential, range(len(sequential))), **named)
- return type('Enum', (), enums)
diff --git a/tempest/README.rst b/tempest/README.rst
new file mode 100644
index 0000000..c41ef96
--- /dev/null
+++ b/tempest/README.rst
@@ -0,0 +1,98 @@
+Tempest Field Guide
+-----------
+
+Tempest is designed to be useful for a large number of different
+environments. This includes being useful for gating commits to
+OpenStack core projects, being used to validate OpenStack cloud
+implementations for both correctness, as well as a burn in tool for
+OpenStack clouds.
+
+As such Tempest tests come in many flavors, each with their own rules
+and guidelines. Below is the proposed Havana restructuring for Tempest
+to make this clear.
+
+tempest/
+ 3rdparty/ - 3rd party api tests
+ api/ - API tests
+ cli/ - CLI tests
+ scenario/ - complex scenario tests
+ stress/ - stress tests
+ whitebox/ - white box testing
+
+Each of these directories contains different types of tests. What
+belongs in each directory, the rules and examples for good tests, are
+documented in a README.rst file in the directory.
+
+
+3rdparty
+------------
+
+Many openstack components include 3rdparty API support. It is
+completely legitmate for Tempest to include tests of 3rdparty APIs,
+but those should be kept seperate from the normal OpenStack
+validation.
+
+TODO: tempest/tests/boto should become tempest/3rdparty/boto
+
+
+api
+------------
+
+API tests are validation tests for the OpenStack API. They should not
+use the existing python clients for OpenStack, but should instead use
+the tempest implementations of clients. This allows us to test both
+XML and JSON. Having raw clients also lets us pass invalid JSON and
+XML to the APIs and see the results, something we could not get with
+the native clients.
+
+When it makes sense, API testing should be moved closer to the
+projects themselves, possibly as functional tests in their unit test
+frameworks.
+
+TODO: The bulk of tempest/tests should move to tempest/api
+
+
+cli
+------------
+
+CLI tests use the openstack CLI to interact with the OpenStack
+cloud. CLI testing in unit tests is somewhat difficult because unlike
+server testing, there is no access to server code to
+instantiate. Tempest seems like a logical place for this, as it
+prereqs having a running OpenStack cloud.
+
+TODO: the top level cli directory moves to tempest/cli
+
+
+scenario
+------------
+
+Scenario tests are complex "through path" tests for OpenStack
+functionality. They are typically a series of steps where complicated
+state requiring multiple services is set up exercised, and torn down.
+
+Scenario tests can and should use the OpenStack python clients.
+
+TODO: tests/network/test_network_basic_ops.py,
+tests/compute/servers/*_ops.py should move to tempest/scenario (others)
+
+
+stress
+-----------
+
+Stress tests are designed to stress an OpenStack environment by
+running a high workload against it and seeing what breaks. Tools may
+be provided to help detect breaks (stack traces in the logs).
+
+TODO: old stress tests deleted, new_stress that david is working on
+moves into here.
+
+
+whitebox
+----------
+
+Whitebox tests are tests which require access to the database of the
+target OpenStack machine to verify internal state after opperations
+are made. White box tests are allowed to use the python clients.
+
+TODO: collect out whitebox tests to this location.
diff --git a/tempest/clients.py b/tempest/clients.py
index 7b1e5cc..9b2c1f5 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -42,6 +42,7 @@
from tempest.services.compute.json.servers_client import ServersClientJSON
from tempest.services.compute.json.volumes_extensions_client import \
VolumesExtensionsClientJSON
+from tempest.services.compute.xml.aggregates_client import AggregatesClientXML
from tempest.services.compute.xml.availability_zone_client import \
AvailabilityZoneClientXML
from tempest.services.compute.xml.extensions_client import ExtensionsClientXML
@@ -201,6 +202,11 @@
"xml": ServiceClientXML,
}
+AGGREGATES_CLIENT = {
+ "json": AggregatesClientJSON,
+ "xml": AggregatesClientXML,
+}
+
class Manager(object):
@@ -270,6 +276,7 @@
self.availability_zone_client = \
AVAILABILITY_ZONE_CLIENT[interface](*client_args)
self.service_client = SERVICE_CLIENT[interface](*client_args)
+ self.aggregates_client = AGGREGATES_CLIENT[interface](*client_args)
except KeyError:
msg = "Unsupported interface type `%s'" % interface
raise exceptions.InvalidConfiguration(msg)
@@ -285,7 +292,6 @@
self.custom_object_client = ObjectClientCustomizedHeader(*client_args)
self.custom_account_client = \
AccountClientCustomizedHeader(*client_args)
- self.aggregates_client = AggregatesClientJSON(*client_args)
class AltManager(Manager):
diff --git a/tempest/services/compute/xml/aggregates_client.py b/tempest/services/compute/xml/aggregates_client.py
new file mode 100644
index 0000000..0ef8e22
--- /dev/null
+++ b/tempest/services/compute/xml/aggregates_client.py
@@ -0,0 +1,103 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+
+from tempest.common.rest_client import RestClientXML
+from tempest import exceptions
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import xml_to_json
+
+
+class AggregatesClientXML(RestClientXML):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(AggregatesClientXML, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.compute.catalog_type
+
+ def _format_aggregate(self, g):
+ agg = xml_to_json(g)
+ aggregate = {}
+ for key, value in agg.items():
+ if key == 'hosts':
+ aggregate['hosts'] = []
+ for k, v in value.items():
+ aggregate['hosts'].append(v)
+ elif key == 'availability_zone':
+ aggregate[key] = None if value == 'None' else value
+ else:
+ aggregate[key] = value
+ return aggregate
+
+ def _parse_array(self, node):
+ return [self._format_aggregate(x) for x in node]
+
+ def list_aggregates(self):
+ """Get aggregate list."""
+ resp, body = self.get("os-aggregates", self.headers)
+ aggregates = self._parse_array(etree.fromstring(body))
+ return resp, aggregates
+
+ def get_aggregate(self, aggregate_id):
+ """Get details of the given aggregate."""
+ resp, body = self.get("os-aggregates/%s" % str(aggregate_id),
+ self.headers)
+ aggregate = self._format_aggregate(etree.fromstring(body))
+ return resp, aggregate
+
+ def create_aggregate(self, name, availability_zone=None):
+ """Creates a new aggregate."""
+ post_body = Element("aggregate",
+ name=name,
+ availability_zone=availability_zone)
+ resp, body = self.post('os-aggregates',
+ str(Document(post_body)),
+ self.headers)
+ aggregate = self._format_aggregate(etree.fromstring(body))
+ return resp, aggregate
+
+ def delete_aggregate(self, aggregate_id):
+ """Deletes the given aggregate."""
+ return self.delete("os-aggregates/%s" % str(aggregate_id),
+ self.headers)
+
+ def is_resource_deleted(self, id):
+ try:
+ self.get_aggregate(id)
+ except exceptions.NotFound:
+ return True
+ return False
+
+ def add_host(self, aggregate_id, host):
+ """Adds a host to the given aggregate."""
+ post_body = Element("add_host", host=host)
+ resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
+ str(Document(post_body)),
+ self.headers)
+ aggregate = self._format_aggregate(etree.fromstring(body))
+ return resp, aggregate
+
+ def remove_host(self, aggregate_id, host):
+ """Removes a host from the given aggregate."""
+ post_body = Element("remove_host", host=host)
+ resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
+ str(Document(post_body)),
+ self.headers)
+ aggregate = self._format_aggregate(etree.fromstring(body))
+ return resp, aggregate
diff --git a/tempest/test.py b/tempest/test.py
index 4db9827..de255d5 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -51,10 +51,8 @@
class BaseTestCase(testtools.TestCase,
testtools.testcase.WithAttributes,
testresources.ResourcedTestCase):
- def __init__(self, *args, **kwargs):
- super(BaseTestCase, self).__init__(*args, **kwargs)
- #NOTE(afazekas): inspection workaround
- BaseTestCase.config = config.TempestConfig()
+
+ config = config.TempestConfig()
@classmethod
def setUpClass(cls):
@@ -62,6 +60,29 @@
super(BaseTestCase, cls).setUpClass()
+def call_until_true(func, duration, sleep_for):
+ """
+ Call the given function until it returns True (and return True) or
+ until the specified duration (in seconds) elapses (and return
+ False).
+
+ :param func: A zero argument callable that returns True on success.
+ :param duration: The number of seconds for which to attempt a
+ successful call of the function.
+ :param sleep_for: The number of seconds to sleep after an unsuccessful
+ invocation of the function.
+ """
+ now = time.time()
+ timeout = now + duration
+ while now < timeout:
+ if func():
+ return True
+ LOG.debug("Sleeping for %d seconds", sleep_for)
+ time.sleep(sleep_for)
+ now = time.time()
+ return False
+
+
class TestCase(BaseTestCase):
"""Base test case class for all Tempest tests
@@ -96,57 +117,33 @@
self.os_resources.remove(thing)
del self.resource_keys[key]
-
-def call_until_true(func, duration, sleep_for):
- """
- Call the given function until it returns True (and return True) or
- until the specified duration (in seconds) elapses (and return
- False).
-
- :param func: A zero argument callable that returns True on success.
- :param duration: The number of seconds for which to attempt a successful
- call of the function.
- :param sleep_for: The number of seconds to sleep after an unsuccessful
- invocation of the function.
- """
- now = time.time()
- timeout = now + duration
- while now < timeout:
- if func():
- return True
- LOG.debug("Sleeping for %d seconds", sleep_for)
- time.sleep(sleep_for)
- now = time.time()
- return False
-
-
-def status_timeout(testcase, things, thing_id, expected_status):
- """
- Given a thing and an expected status, do a loop, sleeping
- for a configurable amount of time, checking for the
- expected status to show. At any time, if the returned
- status of the thing is ERROR, fail out.
- """
- def check_status():
- # python-novaclient has resources available to its client
- # that all implement a get() method taking an identifier
- # for the singular resource to retrieve.
- thing = things.get(thing_id)
- new_status = thing.status
- if new_status == 'ERROR':
- testcase.fail("%s failed to get to expected status."
+ def status_timeout(self, things, thing_id, expected_status):
+ """
+ Given a thing and an expected status, do a loop, sleeping
+ for a configurable amount of time, checking for the
+ expected status to show. At any time, if the returned
+ status of the thing is ERROR, fail out.
+ """
+ def check_status():
+ # python-novaclient has resources available to its client
+ # that all implement a get() method taking an identifier
+ # for the singular resource to retrieve.
+ thing = things.get(thing_id)
+ new_status = thing.status
+ if new_status == 'ERROR':
+ self.fail("%s failed to get to expected status."
"In ERROR state."
% thing)
- elif new_status == expected_status:
- return True # All good.
- LOG.debug("Waiting for %s to get to %s status. "
- "Currently in %s status",
- thing, expected_status, new_status)
- conf = config.TempestConfig()
- if not call_until_true(check_status,
- conf.compute.build_timeout,
- conf.compute.build_interval):
- testcase.fail("Timed out waiting for thing %s to become %s"
+ elif new_status == expected_status:
+ return True # All good.
+ LOG.debug("Waiting for %s to get to %s status. "
+ "Currently in %s status",
+ thing, expected_status, new_status)
+ conf = config.TempestConfig()
+ if not call_until_true(check_status,
+ conf.compute.build_timeout,
+ conf.compute.build_interval):
+ self.fail("Timed out waiting for thing %s to become %s"
% (thing_id, expected_status))
diff --git a/tempest/tests/boto/test_ec2_instance_run.py b/tempest/tests/boto/test_ec2_instance_run.py
index 08dc330..b6b93d8 100644
--- a/tempest/tests/boto/test_ec2_instance_run.py
+++ b/tempest/tests/boto/test_ec2_instance_run.py
@@ -202,14 +202,13 @@
re_search_wait(_output, text)
part_lines = ssh.get_partitions().split('\n')
- # "attaching" invalid EC2 state ! #1074901
volume.attach(instance.id, "/dev/vdh")
def _volume_state():
volume.update(validate=True)
return volume.status
- #self.assertVolumeStatusWait(_volume_state, "in-use") # #1074901
+ self.assertVolumeStatusWait(_volume_state, "in-use")
re_search_wait(_volume_state, "in-use")
#NOTE(afazekas): Different Hypervisor backends names
@@ -229,9 +228,9 @@
#TODO(afazekas): Resource compare to the flavor settings
- volume.detach() # "detaching" invalid EC2 status #1074901
+ volume.detach()
- #self.assertVolumeStatusWait(_volume_state, "available")
+ self.assertVolumeStatusWait(_volume_state, "available")
re_search_wait(_volume_state, "available")
LOG.info("Volume %s state: %s", volume.id, volume.status)
diff --git a/tempest/tests/boto/test_ec2_volumes.py b/tempest/tests/boto/test_ec2_volumes.py
index dc8ff31..37a913e 100644
--- a/tempest/tests/boto/test_ec2_volumes.py
+++ b/tempest/tests/boto/test_ec2_volumes.py
@@ -39,7 +39,6 @@
cls.client = cls.os.ec2api_client
cls.zone = cls.client.get_good_zone()
-#NOTE(afazekas): as admin it can trigger the Bug #1074901
@attr(type='smoke')
def test_create_get_delete(self):
# EC2 Create, get, delete Volume
diff --git a/tempest/tests/compute/admin/test_aggregates.py b/tempest/tests/compute/admin/test_aggregates.py
index 06acc41..07df77f 100644
--- a/tempest/tests/compute/admin/test_aggregates.py
+++ b/tempest/tests/compute/admin/test_aggregates.py
@@ -27,13 +27,14 @@
Tests Aggregates API that require admin privileges
"""
+ _host_key = 'OS-EXT-SRV-ATTR:host'
_interface = 'json'
@classmethod
def setUpClass(cls):
super(AggregatesAdminTestJSON, cls).setUpClass()
cls.client = cls.os_adm.aggregates_client
- cls.user_client = cls.os.aggregates_client
+ cls.user_client = cls.aggregates_client
cls.aggregate_name_prefix = 'test_aggregate_'
cls.az_name_prefix = 'test_az_'
@@ -212,7 +213,7 @@
availability_zone=az_name)
servers_client.wait_for_server_status(server['id'], 'ACTIVE')
resp, body = admin_servers_client.get_server(server['id'])
- self.assertEqual(self.host, body['OS-EXT-SRV-ATTR:host'])
+ self.assertEqual(self.host, body[self._host_key])
@attr(type='negative')
def test_aggregate_add_non_exist_host(self):
@@ -254,3 +255,9 @@
self.assertRaises(exceptions.Unauthorized,
self.user_client.remove_host,
aggregate['id'], self.host)
+
+
+class AggregatesAdminTestXML(AggregatesAdminTestJSON):
+ _host_key = (
+ '{http://docs.openstack.org/compute/ext/extended_status/api/v1.1}host')
+ _interface = 'xml'
diff --git a/tempest/tests/compute/base.py b/tempest/tests/compute/base.py
index 221cfb6..b313e0b 100644
--- a/tempest/tests/compute/base.py
+++ b/tempest/tests/compute/base.py
@@ -62,6 +62,7 @@
cls.interfaces_client = os.interfaces_client
cls.fixed_ips_client = os.fixed_ips_client
cls.availability_zone_client = os.availability_zone_client
+ cls.aggregates_client = os.aggregates_client
cls.build_interval = cls.config.compute.build_interval
cls.build_timeout = cls.config.compute.build_timeout
cls.ssh_user = cls.config.compute.ssh_user
diff --git a/tempest/tests/compute/floating_ips/test_floating_ips_actions.py b/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
index d800fb5..5fe911f 100644
--- a/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
@@ -53,8 +53,8 @@
@classmethod
def tearDownClass(cls):
#Deleting the floating IP which is created in this method
- super(FloatingIPsTestJSON, cls).tearDownClass()
resp, body = cls.client.delete_floating_ip(cls.floating_ip_id)
+ super(FloatingIPsTestJSON, cls).tearDownClass()
@attr(type='positive')
def test_allocate_floating_ip(self):
diff --git a/tempest/tests/compute/images/test_images_whitebox.py b/tempest/tests/compute/images/test_images_whitebox.py
index 105a38a..9ec05dd 100644
--- a/tempest/tests/compute/images/test_images_whitebox.py
+++ b/tempest/tests/compute/images/test_images_whitebox.py
@@ -37,10 +37,10 @@
@classmethod
def tearDownClass(cls):
"""Delete images after a test is executed."""
- super(ImagesWhiteboxTest, cls).tearDownClass()
for image_id in cls.image_ids:
cls.client.delete_image(image_id)
cls.image_ids.remove(image_id)
+ super(ImagesWhiteboxTest, cls).tearDownClass()
@classmethod
def update_state(self, server_id, vm_state, task_state, deleted=0):
diff --git a/tempest/tests/compute/servers/test_attach_interfaces.py b/tempest/tests/compute/servers/test_attach_interfaces.py
index 5e447c4..c7d4fa0 100644
--- a/tempest/tests/compute/servers/test_attach_interfaces.py
+++ b/tempest/tests/compute/servers/test_attach_interfaces.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest import clients
from tempest.tests.compute import base
import time
@@ -24,11 +23,10 @@
@classmethod
def setUpClass(cls):
- super(AttachInterfacesTestJSON, cls).setUpClass()
- os = clients.Manager(interface=cls._interface)
- if not os.config.network.quantum_available:
+ if not cls.config.network.quantum_available:
raise cls.skipException("Quantum is required")
- cls.client = os.interfaces_client
+ super(AttachInterfacesTestJSON, cls).setUpClass()
+ cls.client = cls.os.interfaces_client
def _check_interface(self, iface, port_id=None, network_id=None,
fixed_ip=None):
diff --git a/tempest/tests/compute/servers/test_server_advanced_ops.py b/tempest/tests/compute/servers/test_server_advanced_ops.py
index 8be9c54..ad859d0 100644
--- a/tempest/tests/compute/servers/test_server_advanced_ops.py
+++ b/tempest/tests/compute/servers/test_server_advanced_ops.py
@@ -66,18 +66,18 @@
self.assertEqual(self.instance.status, 'BUILD')
instance_id = self.get_resource('instance').id
- test.status_timeout(
- self, self.compute_client.servers, instance_id, 'ACTIVE')
+ self.status_timeout(
+ self.compute_client.servers, instance_id, 'ACTIVE')
instance = self.get_resource('instance')
instance_id = instance.id
resize_flavor = self.config.compute.flavor_ref_alt
LOG.debug("Resizing instance %s from flavor %s to flavor %s",
instance.id, instance.flavor, resize_flavor)
instance.resize(resize_flavor)
- test.status_timeout(self, self.compute_client.servers, instance_id,
+ self.status_timeout(self.compute_client.servers, instance_id,
'VERIFY_RESIZE')
LOG.debug("Confirming resize of instance %s", instance_id)
instance.confirm_resize()
- test.status_timeout(
- self, self.compute_client.servers, instance_id, 'ACTIVE')
+ self.status_timeout(
+ self.compute_client.servers, instance_id, 'ACTIVE')
diff --git a/tempest/tests/compute/servers/test_server_basic_ops.py b/tempest/tests/compute/servers/test_server_basic_ops.py
index e4e246a..fdbbd3c 100644
--- a/tempest/tests/compute/servers/test_server_basic_ops.py
+++ b/tempest/tests/compute/servers/test_server_basic_ops.py
@@ -101,8 +101,8 @@
def wait_on_active(self):
instance_id = self.get_resource('instance').id
- test.status_timeout(
- self, self.compute_client.servers, instance_id, 'ACTIVE')
+ self.status_timeout(
+ self.compute_client.servers, instance_id, 'ACTIVE')
def pause_server(self):
instance = self.get_resource('instance')
@@ -110,8 +110,8 @@
LOG.debug("Pausing instance %s. Current status: %s",
instance_id, instance.status)
instance.pause()
- test.status_timeout(
- self, self.compute_client.servers, instance_id, 'PAUSED')
+ self.status_timeout(
+ self.compute_client.servers, instance_id, 'PAUSED')
def unpause_server(self):
instance = self.get_resource('instance')
@@ -119,8 +119,8 @@
LOG.debug("Unpausing instance %s. Current status: %s",
instance_id, instance.status)
instance.unpause()
- test.status_timeout(
- self, self.compute_client.servers, instance_id, 'ACTIVE')
+ self.status_timeout(
+ self.compute_client.servers, instance_id, 'ACTIVE')
def suspend_server(self):
instance = self.get_resource('instance')
@@ -128,7 +128,7 @@
LOG.debug("Suspending instance %s. Current status: %s",
instance_id, instance.status)
instance.suspend()
- test.status_timeout(self, self.compute_client.servers,
+ self.status_timeout(self.compute_client.servers,
instance_id, 'SUSPENDED')
def resume_server(self):
@@ -137,8 +137,8 @@
LOG.debug("Resuming instance %s. Current status: %s",
instance_id, instance.status)
instance.resume()
- test.status_timeout(
- self, self.compute_client.servers, instance_id, 'ACTIVE')
+ self.status_timeout(
+ self.compute_client.servers, instance_id, 'ACTIVE')
def terminate_instance(self):
instance = self.get_resource('instance')
diff --git a/tempest/tests/compute/servers/test_server_rescue.py b/tempest/tests/compute/servers/test_server_rescue.py
index 04c5b27..862a86a 100644
--- a/tempest/tests/compute/servers/test_server_rescue.py
+++ b/tempest/tests/compute/servers/test_server_rescue.py
@@ -85,7 +85,6 @@
@classmethod
def tearDownClass(cls):
- super(ServerRescueTestJSON, cls).tearDownClass()
#Deleting the floating IP which is created in this method
cls.floating_ips_client.delete_floating_ip(cls.floating_ip_id)
client = cls.volumes_extensions_client
@@ -93,6 +92,7 @@
client.delete_volume(str(cls.volume_to_detach['id']).strip())
resp, cls.sg = cls.security_groups_client.delete_security_group(
cls.sg_id)
+ super(ServerRescueTestJSON, cls).tearDownClass()
def tearDown(self):
super(ServerRescueTestJSON, self).tearDown()
diff --git a/tempest/tests/network/common.py b/tempest/tests/network/common.py
index 6246f54..6811acf 100644
--- a/tempest/tests/network/common.py
+++ b/tempest/tests/network/common.py
@@ -269,7 +269,7 @@
self.set_resource(name, server)
except AttributeError:
self.fail("Server not successfully created.")
- test.status_timeout(self, client.servers, server.id, 'ACTIVE')
+ self.status_timeout(client.servers, server.id, 'ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
diff --git a/tempest/tests/volume/admin/test_multi_backend.py b/tempest/tests/volume/admin/test_multi_backend.py
index 04007c9..3d5fae4 100644
--- a/tempest/tests/volume/admin/test_multi_backend.py
+++ b/tempest/tests/volume/admin/test_multi_backend.py
@@ -97,8 +97,6 @@
@classmethod
def tearDownClass(cls):
- super(VolumeMultiBackendTest, cls).tearDownClass()
-
## volumes deletion
for volume_id in cls.volume_id_list:
cls.client.delete_volume(volume_id)
@@ -108,6 +106,8 @@
for volume_type in cls.volume_type_list:
cls.client2.delete_volume_type(volume_type)
+ super(VolumeMultiBackendTest, cls).tearDownClass()
+
def test_multi_backend_enabled(self):
# this test checks that multi backend is enabled for at least the
# computes where the volumes created in setUp were made
diff --git a/tempest/tests/volume/admin/test_volume_types_extra_specs.py b/tempest/tests/volume/admin/test_volume_types_extra_specs.py
index c8cf8d9..1cd7653 100644
--- a/tempest/tests/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/tests/volume/admin/test_volume_types_extra_specs.py
@@ -30,8 +30,8 @@
@classmethod
def tearDownClass(cls):
- super(VolumeTypesExtraSpecsTest, cls).tearDownClass()
cls.client.delete_volume_type(cls.volume_type['id'])
+ super(VolumeTypesExtraSpecsTest, cls).tearDownClass()
def test_volume_type_extra_specs_list(self):
# List Volume types extra specs.
diff --git a/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
index 13fcbbf..bd6e279 100644
--- a/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
@@ -36,8 +36,8 @@
@classmethod
def tearDownClass(cls):
- super(ExtraSpecsNegativeTest, cls).tearDownClass()
cls.client.delete_volume_type(cls.volume_type['id'])
+ super(ExtraSpecsNegativeTest, cls).tearDownClass()
def test_update_no_body(self):
# Should not update volume type extra specs with no body
diff --git a/tempest/tests/volume/test_volumes_actions.py b/tempest/tests/volume/test_volumes_actions.py
index fb9b975..e6eb8d8 100644
--- a/tempest/tests/volume/test_volumes_actions.py
+++ b/tempest/tests/volume/test_volumes_actions.py
@@ -43,7 +43,6 @@
@classmethod
def tearDownClass(cls):
- super(VolumesActionsTest, cls).tearDownClass()
# Delete the test instance and volume
cls.client.delete_volume(cls.volume['id'])
cls.client.wait_for_resource_deletion(cls.volume['id'])
@@ -51,6 +50,8 @@
cls.servers_client.delete_server(cls.server['id'])
cls.client.wait_for_resource_deletion(cls.server['id'])
+ super(VolumesActionsTest, cls).tearDownClass()
+
@attr(type='smoke')
def test_attach_detach_volume_to_instance(self):
# Volume is attached and detached successfully from an instance
diff --git a/tox.ini b/tox.ini
index 4a2f80e..565a9ad 100644
--- a/tox.ini
+++ b/tox.ini
@@ -9,17 +9,52 @@
NOSE_OPENSTACK_YELLOW=3
NOSE_OPENSTACK_SHOW_ELAPSED=1
NOSE_OPENSTACK_STDOUT=1
-deps = -r{toxinidir}/tools/pip-requires
- -r{toxinidir}/tools/test-requires
-commands = nosetests {posargs}
+
+[testenv:full]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+ NOSE_WITH_OPENSTACK=1
+ NOSE_OPENSTACK_COLOR=1
+ NOSE_OPENSTACK_RED=15
+ NOSE_OPENSTACK_YELLOW=3
+ NOSE_OPENSTACK_SHOW_ELAPSED=1
+ NOSE_OPENSTACK_STDOUT=1
+commands =
+ nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest
+ nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-cli.xml -sv cli
+
+[testenv:smoke]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+ NOSE_WITH_OPENSTACK=1
+ NOSE_OPENSTACK_COLOR=1
+ NOSE_OPENSTACK_RED=15
+ NOSE_OPENSTACK_YELLOW=3
+ NOSE_OPENSTACK_SHOW_ELAPSED=1
+ NOSE_OPENSTACK_STDOUT=1
+commands =
+ nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit -sv --attr=type=smoke --xunit-file=nosetests-smoke.xml tempest
+
[testenv:coverage]
-commands = python -m tools/tempest_coverage -c start --combine
- nosetests {posargs}
- python -m tools/tempest_coverage -c report --html
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+ NOSE_WITH_OPENSTACK=1
+ NOSE_OPENSTACK_COLOR=1
+ NOSE_OPENSTACK_RED=15
+ NOSE_OPENSTACK_YELLOW=3
+ NOSE_OPENSTACK_SHOW_ELAPSED=1
+ NOSE_OPENSTACK_STDOUT=1
+commands =
+ python -m tools/tempest_coverage -c start --combine
+ nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest
+ nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-cli.xml -sv cli
+ python -m tools/tempest_coverage -c report --html
[testenv:pep8]
commands = flake8
+deps = -r{toxinidir}/tools/pip-requires
+ -r{toxinidir}/tools/test-requires
[flake8]
ignore = E125,H302,H404