Merge "Enhance the validation of the quotas update"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index b64b047..9e93759 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -79,6 +79,9 @@
# Name of a user used to authenticated to an instance
ssh_user = cirros
+# Visible fixed network name
+fixed_network_name = private
+
# Network id used for SSH (public, private, etc)
network_for_ssh = private
@@ -282,3 +285,26 @@
# Status change wait interval
build_interval = 1
+
+[orchestration]
+# Status change wait interval
+build_interval = 1
+
+# Status change wait timout. This may vary across environments as some some
+# tests spawn full VMs, which could be slow if the test is already in a VM.
+build_timeout = 300
+
+# Whether or not Heat is expected to be available
+heat_available = false
+
+# Instance type for tests. Needs to be big enough for a
+# full OS plus the test workload
+instance_type = m1.tiny
+
+# Name of heat-cfntools enabled image to use when launching test instances
+# If not specified, tests that spawn instances will not run
+#image_ref = ubuntu-vm-heat-cfntools
+
+# Name of existing keypair to launch servers with. The default is not to specify
+# any key, which will generate a keypair for each test class
+#keypair_name = heat_key
diff --git a/stress/README.rst b/stress/README.rst
deleted file mode 100644
index d935289..0000000
--- a/stress/README.rst
+++ /dev/null
@@ -1,68 +0,0 @@
-Quanta Research Cambridge OpenStack Stress Test System
-======================================================
-
-Nova is a distributed, asynchronous system that is prone to race condition
-bugs. These bugs will not be easily found during
-functional testing but will be encountered by users in large deployments in a
-way that is hard to debug. The stress test tries to cause these bugs to happen
-in a more controlled environment.
-
-The basic idea of the test is that there are a number of actions, roughly
-corresponding to the Compute API, that are fired pseudo-randomly at a nova
-cluster as fast as possible. These actions consist of what to do, how to
-verify success, and a state filter to make sure that the operation makes sense.
-For example, if the action is to reboot a server and none are active, nothing
-should be done. A test case is a set of actions to be performed and the
-probability that each action should be selected. There are also parameters
-controlling rate of fire and stuff like that.
-
-This test framework is designed to stress test a Nova cluster. Hence,
-you must have a working Nova cluster with rate limiting turned off.
-
-Environment
-------------
-This particular framework assumes your working Nova cluster understands Nova
-API 2.0. The stress tests can read the logs from the cluster. To enable this
-you have to provide the hostname to call 'nova-manage' and
-the private key and user name for ssh to the cluster in the
-[stress] section of tempest.conf. You also need to provide the
-value of --logdir in nova.conf:
-
- host_private_key_path=<path to private ssh key>
- host_admin_user=<name of user for ssh command>
- nova_logdir=<value of --logdir in nova.conf>
- controller=<hostname for calling nova-manage>
- max_instances=<limit on instances that will be created>
-
-Also, make sure to set
-
-log_level=CRITICAL
-
-so that the API client does not log failed calls which are expected while
-running stress tests.
-
-The stress test needs the top-level tempest directory to be on PYTHONPATH
-if you are not using nosetests to run.
-
-
-Running the sample test
------------------------
-
-To test your installation, do the following (from the tempest directory):
-
- PYTHONPATH=. python stress/tests/user_script_sample.py
-
-This sample test tries to create a few VMs and kill a few VMs.
-
-
-Additional Tools
-----------------
-
-Sometimes the tests don't finish, or there are failures. In these
-cases, you may want to clean out the nova cluster. We have provided
-some scripts to do this in the ``tools`` subdirectory. To use these
-tools, you will need to install python-novaclient.
-You can then use the following script to destroy any keypairs,
-floating ips, and servers::
-
-stress/tools/nova_destroy_all.py
diff --git a/stress/basher.py b/stress/basher.py
deleted file mode 100644
index e34738f..0000000
--- a/stress/basher.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Class to describe actions to be included in a stress test."""
-
-
-class BasherAction(object):
- """
- Used to describe each action that you would like to include in a test run.
- """
-
- def __init__(self, test_case, probability, pargs=[], kargs={}):
- """
- `test_case` : the name of the class that implements the action
- `pargs` : positional arguments to the constructor of `test_case`
- `kargs` : keyword arguments to the constructor of `test_case`
- `probability`: frequency that each action
- """
- self.test_case = test_case
- self.pargs = pargs
- self.kargs = kargs
- self.probability = probability
-
- def invoke(self, manager, state):
- """
- Calls the `run` method of the `test_case`.
- """
- return self.test_case.run(manager, state, *self.pargs, **self.kargs)
-
- def __str__(self):
- return self.test_case.__class__.__name__
diff --git a/stress/config.py b/stress/config.py
deleted file mode 100755
index 25cb910..0000000
--- a/stress/config.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-class StressConfig(object):
- """Provides configuration information for whitebox stress tests."""
-
- def __init__(self, conf):
- self.conf = conf
-
- @property
- def host_private_key_path(self):
- """Path to ssh key for logging into compute nodes."""
- return self.conf.compute.path_to_private_key
-
- @property
- def host_admin_user(self):
- """Username for logging into compute nodes."""
- return self.conf.compute.ssh_user
-
- @property
- def nova_logdir(self):
- """Directory containing log files on the compute nodes."""
- return self.conf.stress.nova_logdir
-
- @property
- def controller(self):
- """Controller host."""
- return self.conf.stress.controller
-
- @property
- def max_instances(self):
- """Maximum number of instances to create during test."""
- return self.conf.stress.max_instances
diff --git a/stress/driver.py b/stress/driver.py
deleted file mode 100644
index 533c000..0000000
--- a/stress/driver.py
+++ /dev/null
@@ -1,274 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""The entry point for the execution of a workloadTo execute a workload.
-Users pass in a description of the workload and a nova manager object
-to the bash_openstack function call"""
-
-import datetime
-import random
-import time
-import urlparse
-
-from config import StressConfig
-from state import ClusterState
-from state import FloatingIpState
-from state import KeyPairState
-from state import VolumeState
-import stress.utils
-from test_case import logging
-
-from tempest.common.utils.data_utils import rand_name
-
-# setup logging to file
-logging.basicConfig(
- format='%(asctime)s %(name)-20s %(levelname)-8s %(message)s',
- datefmt='%m-%d %H:%M:%S',
- filename="stress.debug.log",
- filemode="w",
- level=logging.DEBUG,
-)
-
-# define a Handler which writes INFO messages or higher to the sys.stdout
-_console = logging.StreamHandler()
-_console.setLevel(logging.INFO)
-# set a format which is simpler for console use
-_formatter = logging.Formatter('%(name)-20s: %(levelname)-8s %(message)s')
-# tell the handler to use this format
-_console.setFormatter(_formatter)
-# add the handler to the root logger
-logging.getLogger('').addHandler(_console)
-
-
-def _create_cases(choice_spec):
- """
- Generate a workload of tests from workload description
- """
- cases = []
- count = 0
- for choice in choice_spec:
- p = choice.probability
- for i in range(p):
- cases.append(choice)
- i = i + p
- count = count + p
- assert(count == 100)
- return cases
-
-
-def _get_compute_nodes(keypath, user, controller):
- """
- Returns a list of active compute nodes. List is generated by running
- nova-manage on the controller.
- """
- nodes = []
- if keypath is None or user is None:
- return nodes
- cmd = "nova-manage service list | grep ^nova-compute"
- lines = stress.utils.ssh(keypath, user, controller, cmd).split('\n')
- # For example: nova-compute xg11eth0 nova enabled :-) 2011-10-31 18:57:46
- # This is fragile but there is, at present, no other way to get this info.
- for line in lines:
- words = line.split()
- if len(words) > 0 and words[4] == ":-)":
- nodes.append(words[1])
- return nodes
-
-
-def _error_in_logs(keypath, logdir, user, nodes):
- """
- Detect errors in the nova log files on the controller and compute nodes.
- """
- grep = 'egrep "ERROR\|TRACE" %s/*.log' % logdir
- for node in nodes:
- errors = stress.utils.ssh(keypath, user, node, grep, check=False)
- if len(errors) > 0:
- logging.error('%s: %s' % (node, errors))
- return True
- return False
-
-
-def create_initial_vms(manager, state, count):
- image = manager.config.compute.image_ref
- flavor = manager.config.compute.flavor_ref
- servers = []
- logging.info('Creating %d vms' % count)
- for _ in xrange(count):
- name = rand_name('initial_vm-')
- _, server = manager.servers_client.create_server(name, image, flavor)
- servers.append(server)
- for server in servers:
- manager.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
- logging.info('Server Name: %s Id: %s' % (name, server['id']))
- state.set_instance_state(server['id'], (server, 'ACTIVE'))
-
-
-def create_initial_floating_ips(manager, state, count):
- logging.info('Creating %d floating ips' % count)
- for _ in xrange(count):
- _, ip = manager.floating_ips_client.create_floating_ip()
- logging.info('Ip: %s' % ip['ip'])
- state.add_floating_ip(FloatingIpState(ip))
-
-
-def create_initial_keypairs(manager, state, count):
- logging.info('Creating %d keypairs' % count)
- for _ in xrange(count):
- name = rand_name('keypair-')
- _, keypair = manager.keypairs_client.create_keypair(name)
- logging.info('Keypair: %s' % name)
- state.add_keypair(KeyPairState(keypair))
-
-
-def create_initial_volumes(manager, state, count):
- volumes = []
- logging.info('Creating %d volumes' % count)
- for _ in xrange(count):
- name = rand_name('volume-')
- _, volume = manager.volumes_client.create_volume(size=1,
- display_name=name)
- volumes.append(volume)
- for volume in volumes:
- manager.volumes_client.wait_for_volume_status(volume['id'],
- 'available')
- logging.info('Volume Name: %s Id: %s' % (name, volume['id']))
- state.add_volume(VolumeState(volume))
-
-
-def bash_openstack(manager,
- choice_spec,
- **kwargs):
- """
- Workload driver. Executes a workload as specified by the `choice_spec`
- parameter against a nova-cluster.
-
- `manager` : Manager object
- `choice_spec` : list of BasherChoice actions to run on the cluster
- `kargs` : keyword arguments to the constructor of `test_case`
- `duration` = how long this test should last (3 sec)
- `sleep_time` = time to sleep between actions (in msec)
- `test_name` = human readable workload description
- (default: unnamed test)
- `max_vms` = maximum number of instances to launch
- (default: 32)
- `seed` = random seed (default: None)
- """
- stress_config = StressConfig(manager.config)
- # get keyword arguments
- duration = kwargs.get('duration', datetime.timedelta(seconds=10))
- seed = kwargs.get('seed', None)
- sleep_time = float(kwargs.get('sleep_time', 3000)) / 1000
- max_vms = int(kwargs.get('max_vms', stress_config.max_instances))
- test_name = kwargs.get('test_name', 'unamed test')
-
- keypath = stress_config.host_private_key_path
- user = stress_config.host_admin_user
- logdir = stress_config.nova_logdir
- host = urlparse.urlparse(manager.config.identity.uri).hostname
- computes = _get_compute_nodes(keypath, user, host)
- stress.utils.execute_on_all(keypath, user, computes,
- "rm -f %s/*.log" % logdir)
- random.seed(seed)
- cases = _create_cases(choice_spec)
- state = ClusterState(max_vms=max_vms)
- create_initial_keypairs(manager, state,
- int(kwargs.get('initial_keypairs', 0)))
- create_initial_vms(manager, state,
- int(kwargs.get('initial_vms', 0)))
- create_initial_floating_ips(manager, state,
- int(kwargs.get('initial_floating_ips', 0)))
- create_initial_volumes(manager, state,
- int(kwargs.get('initial_volumes', 0)))
- test_end_time = time.time() + duration.seconds
-
- retry_list = []
- last_retry = time.time()
- cooldown = False
- logcheck_count = 0
- test_succeeded = True
- logging.debug('=== Test \"%s\" on %s ===' %
- (test_name, time.asctime(time.localtime())))
- for kw in kwargs:
- logging.debug('\t%s = %s', kw, kwargs[kw])
-
- while True:
- if not cooldown:
- if time.time() < test_end_time:
- case = random.choice(cases)
- logging.debug('Chose %s' % case)
- retry = case.invoke(manager, state)
- if retry is not None:
- retry_list.append(retry)
- else:
- logging.info('Cooling down...')
- cooldown = True
- if cooldown and len(retry_list) == 0:
- if _error_in_logs(keypath, logdir, user, computes):
- test_succeeded = False
- break
- # Retry verifications every 5 seconds.
- if time.time() - last_retry > 5:
- logging.debug('retry verifications for %d tasks', len(retry_list))
- new_retry_list = []
- for v in retry_list:
- v.check_timeout()
- if not v.retry():
- new_retry_list.append(v)
- retry_list = new_retry_list
- last_retry = time.time()
- time.sleep(sleep_time)
- # Check error logs after 100 actions
- if logcheck_count > 100:
- if _error_in_logs(keypath, logdir, user, computes):
- test_succeeded = False
- break
- else:
- logcheck_count = 0
- else:
- logcheck_count = logcheck_count + 1
- # Cleanup
- logging.info('Cleaning up: terminating virtual machines...')
- vms = state.get_instances()
- active_vms = [v for _k, v in vms.iteritems()
- if v and v[1] != 'TERMINATING']
- for target in active_vms:
- manager.servers_client.delete_server(target[0]['id'])
- # check to see that the server was actually killed
- for target in active_vms:
- kill_id = target[0]['id']
- i = 0
- while True:
- try:
- manager.servers_client.get_server(kill_id)
- except Exception:
- break
- i += 1
- if i > 60:
- _error_in_logs(keypath, logdir, user, computes)
- raise Exception("Cleanup timed out")
- time.sleep(1)
- logging.info('killed %s' % kill_id)
- state.delete_instance_state(kill_id)
- for floating_ip_state in state.get_floating_ips():
- manager.floating_ips_client.delete_floating_ip(
- floating_ip_state.resource_id)
- for keypair_state in state.get_keypairs():
- manager.keypairs_client.delete_keypair(keypair_state.name)
- for volume_state in state.get_volumes():
- manager.volumes_client.delete_volume(volume_state.resource_id)
-
- if test_succeeded:
- logging.info('*** Test succeeded ***')
- else:
- logging.info('*** Test had errors ***')
- return test_succeeded
diff --git a/stress/pending_action.py b/stress/pending_action.py
deleted file mode 100644
index abfa74d..0000000
--- a/stress/pending_action.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Describe follow-up actions using `PendingAction` class to verify
-that nova API calls such as create/delete are completed"""
-
-import logging
-import time
-
-from tempest.exceptions import TimeoutException
-
-
-class PendingAction(object):
- """
- Initialize and describe actions to verify that a Nova API call
- is successful.
- """
-
- def __init__(self, nova_manager, timeout=None):
- """
- `nova_manager` : Manager object.
- `timeout` : time before we declare a TimeoutException
- """
- if timeout is None:
- timeout = nova_manager.config.compute.build_timeout
- self._manager = nova_manager
- self._logger = logging.getLogger(self.__class__.__name__)
- self._start_time = time.time()
- self._timeout = timeout
-
- def retry(self):
- """
- Invoked by user of this class to verify completion of
- previous TestCase actions
- """
- return False
-
- def check_timeout(self):
- """Check for timeouts of TestCase actions."""
- time_diff = time.time() - self._start_time
- if time_diff > self._timeout:
- self._logger.error('%s exceeded timeout of %d' %
- (self.__class__.__name__, self._timeout))
- raise TimeoutException
-
- def elapsed(self):
- return time.time() - self._start_time
-
-
-class PendingServerAction(PendingAction):
- """
- Initialize and describe actions to verify that a Nova API call that
- changes server state is successful.
- """
-
- def __init__(self, nova_manager, state, target_server, timeout=None):
- """
- `state` : externally maintained data structure about
- state of VMs or other persistent objects in
- the nova cluster
- `target_server` : server that actions were performed on
- """
- super(PendingServerAction, self).__init__(nova_manager,
- timeout=timeout)
- self._state = state
- self._target = target_server
-
- def _check_for_status(self, state_string):
- """Check to see if the machine has transitioned states."""
- t = time.time() # for debugging
- target = self._target
- _resp, body = self._manager.servers_client.get_server(target['id'])
- if body['status'] != state_string:
- # grab the actual state as we think it is
- temp_obj = self._state.get_instances()[target['id']]
- self._logger.debug("machine %s in state %s" %
- (target['id'], temp_obj[1]))
- self._logger.debug('%s, time: %d' % (temp_obj[1], time.time() - t))
- return temp_obj[1]
- self._logger.debug('%s, time: %d' % (state_string, time.time() - t))
- return state_string
diff --git a/stress/state.py b/stress/state.py
deleted file mode 100644
index 9c31b76..0000000
--- a/stress/state.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-class ClusterState(object):
- """A class to store the state of various persistent objects in the Nova
- cluster, e.g. instances, volumes. Use methods to query to state which than
- can be compared to the current state of the objects in Nova.
- """
-
- def __init__(self, **kwargs):
- self._max_vms = kwargs.get('max_vms', 32)
- self._instances = {}
- self._floating_ips = []
- self._keypairs = []
- self._volumes = []
-
- # instance state methods
- def get_instances(self):
- """return the instances dictionary that we believe are in cluster."""
- return self._instances
-
- def get_max_instances(self):
- """return the maximum number of instances we can create."""
- return self._max_vms
-
- def set_instance_state(self, key, val):
- """Store `val` in the dictionary indexed at `key`."""
- self._instances[key] = val
-
- def delete_instance_state(self, key):
- """Delete state indexed at `key`."""
- del self._instances[key]
-
- #floating_ip state methods
- def get_floating_ips(self):
- """return the floating ips list for the cluster."""
- return self._floating_ips
-
- def add_floating_ip(self, floating_ip_state):
- """Add floating ip."""
- self._floating_ips.append(floating_ip_state)
-
- def remove_floating_ip(self, floating_ip_state):
- """Remove floating ip."""
- self._floating_ips.remove(floating_ip_state)
-
- # keypair methods
- def get_keypairs(self):
- """return the keypairs list for the cluster."""
- return self._keypairs
-
- def add_keypair(self, keypair_state):
- """Add keypair."""
- self._keypairs.append(keypair_state)
-
- def remove_keypair(self, keypair_state):
- """Remove keypair."""
- self._keypairs.remove(keypair_state)
-
- # volume methods
- def get_volumes(self):
- """return the volumes list for the cluster."""
- return self._volumes
-
- def add_volume(self, volume_state):
- """Add volume."""
- self._volumes.append(volume_state)
-
- def remove_volume(self, volume_state):
- """Remove volume."""
- self._volumes.remove(volume_state)
-
-
-class ServerAssociatedState(object):
- """Class that tracks resources that are associated with a particular server
- such as a volume or floating ip.
- """
-
- def __init__(self, resource_id):
- # The id of the server.
- self.server_id = None
- # The id of the resource that is attached to the server.
- self.resource_id = resource_id
- # True if in the process of attaching/detaching the resource.
- self.change_pending = False
-
-
-class FloatingIpState(ServerAssociatedState):
-
- def __init__(self, ip_desc):
- super(FloatingIpState, self).__init__(ip_desc['id'])
- self.address = ip_desc['ip']
-
-
-class VolumeState(ServerAssociatedState):
-
- def __init__(self, volume_desc):
- super(VolumeState, self).__init__(volume_desc['id'])
-
-
-class KeyPairState(object):
-
- def __init__(self, keypair_spec):
- self.name = keypair_spec['name']
- self.private_key = keypair_spec['private_key']
diff --git a/stress/test_case.py b/stress/test_case.py
deleted file mode 100644
index d04ace0..0000000
--- a/stress/test_case.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Abstract class for implementing an action. You only need to override
-the `run` method which specifies all the actual nova API class you wish
-to make."""
-
-
-import logging
-
-
-class StressTestCase(object):
-
- def __init__(self):
- self._logger = logging.getLogger(self.__class__.__name__)
-
- def run(self, nova_manager, state_obj, *pargs, **kargs):
- """Nova API methods to call that would modify state of the cluster."""
- return
diff --git a/stress/test_floating_ips.py b/stress/test_floating_ips.py
deleted file mode 100755
index c5bad95..0000000
--- a/stress/test_floating_ips.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import random
-import telnetlib
-
-from stress import pending_action
-from stress import test_case
-
-
-class TestChangeFloatingIp(test_case.StressTestCase):
- """Add or remove a floating ip from a vm."""
-
- def __init__(self):
- super(TestChangeFloatingIp, self).__init__()
- self.server_ids = None
-
- def run(self, manager, state, *pargs, **kwargs):
- if self.server_ids is None:
- vms = state.get_instances()
- self.server_ids = [k for k, v in vms.iteritems()]
- floating_ip = random.choice(state.get_floating_ips())
- if floating_ip.change_pending:
- return None
- floating_ip.change_pending = True
- timeout = int(kwargs.get('timeout', 60))
- cli = manager.floating_ips_client
- if floating_ip.server_id is None:
- server = random.choice(self.server_ids)
- address = floating_ip.address
- self._logger.info('Adding %s to server %s' % (address, server))
- resp, body = cli.associate_floating_ip_to_server(address,
- server)
- if resp.status != 202:
- raise Exception("response: %s body: %s" % (resp, body))
- floating_ip.server_id = server
- return VerifyChangeFloatingIp(manager, floating_ip,
- timeout, add=True)
- else:
- server = floating_ip.server_id
- address = floating_ip.address
- self._logger.info('Removing %s from server %s' % (address, server))
- resp, body = cli.disassociate_floating_ip_from_server(address,
- server)
- if resp.status != 202:
- raise Exception("response: %s body: %s" % (resp, body))
- return VerifyChangeFloatingIp(manager, floating_ip,
- timeout, add=False)
-
-
-class VerifyChangeFloatingIp(pending_action.PendingAction):
- """Verify that floating ip was changed."""
- def __init__(self, manager, floating_ip, timeout, add=None):
- super(VerifyChangeFloatingIp, self).__init__(manager, timeout=timeout)
- self.floating_ip = floating_ip
- self.add = add
-
- def retry(self):
- """
- Check to see that we can contact the server at its new address.
- """
- try:
- conn = telnetlib.Telnet(self.floating_ip.address, 22, timeout=0.5)
- conn.close()
- if self.add:
- self._logger.info('%s added [%.1f secs elapsed]' %
- (self.floating_ip.address, self.elapsed()))
- self.floating_ip.change_pending = False
- return True
- except Exception:
- if not self.add:
- self._logger.info('%s removed [%.1f secs elapsed]' %
- (self.floating_ip.address, self.elapsed()))
- self.floating_ip.change_pending = False
- self.floating_ip.server_id = None
- return True
- return False
diff --git a/stress/test_server_actions.py b/stress/test_server_actions.py
deleted file mode 100644
index 3a7094d..0000000
--- a/stress/test_server_actions.py
+++ /dev/null
@@ -1,275 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Defines various sub-classes of the `StressTestCase` and
-`PendingServerAction` class. Sub-classes of StressTestCase implement various
-API calls on the Nova cluster having to do with Server Actions. Each
-sub-class will have a corresponding PendingServerAction. These pending
-actions veriy that the API call was successful or not."""
-
-import random
-
-from stress import pending_action
-from stress import test_case
-import stress.utils
-from tempest.exceptions import Duplicate
-
-
-class TestRebootVM(test_case.StressTestCase):
- """Reboot a server."""
-
- def run(self, manager, state, *pargs, **kwargs):
- """
- Send an HTTP POST request to the nova cluster to reboot a random
- server. Update state of object in `state` variable to indicate that
- it is rebooting.
- `manager` : Manager object
- `state` : `State` object describing our view of state of cluster
- `pargs` : positional arguments
- `kwargs` : keyword arguments, which include:
- `timeout` : how long to wait before issuing Exception
- `type` : reboot type [SOFT or HARD] (default is SOFT)
- """
-
- vms = state.get_instances()
- active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
- # no active vms, so return null
- if not active_vms:
- self._logger.info('no ACTIVE instances to reboot')
- return
-
- _reboot_arg = kwargs.get('type', 'SOFT')
-
- # select active vm to reboot and then send request to nova controller
- target = random.choice(active_vms)
- reboot_target = target[0]
- # It seems that doing a reboot when in reboot is an error.
- try:
- response, body = manager.servers_client.reboot(reboot_target['id'],
- _reboot_arg)
- except Duplicate:
- return
-
- if (response.status != 202):
- self._logger.error("response: %s" % response)
- raise Exception
-
- if _reboot_arg == 'SOFT':
- reboot_state = 'REBOOT'
- else:
- reboot_state = 'HARD_REBOOT'
-
- self._logger.info('waiting for machine %s to change to %s' %
- (reboot_target['id'], reboot_state))
-
- return VerifyRebootVM(manager,
- state,
- reboot_target,
- reboot_state=reboot_state)
-
-
-class VerifyRebootVM(pending_action.PendingServerAction):
- """Class to verify that the reboot completed."""
- States = stress.utils.enum('REBOOT_CHECK', 'ACTIVE_CHECK')
-
- def __init__(self, manager, state, target_server,
- reboot_state=None,
- ip_addr=None):
- super(VerifyRebootVM, self).__init__(manager,
- state,
- target_server)
- self._reboot_state = reboot_state
- self._retry_state = self.States.REBOOT_CHECK
-
- def retry(self):
- """
- Check to see that the server of interest has actually rebooted. Update
- state to indicate that server is running again.
- """
- # don't run reboot verification if target machine has been
- # deleted or is going to be deleted
- target_id = self._target['id']
- if (self._target['id'] not in self._state.get_instances().keys() or
- self._state.get_instances()[target_id][1] == 'TERMINATING'):
- self._logger.debug('machine %s is deleted or TERMINATING' %
- self._target['id'])
- return True
-
- reboot_state = self._reboot_state
- if self._retry_state == self.States.REBOOT_CHECK:
- server_state = self._check_for_status(reboot_state)
- if server_state == reboot_state:
- self._logger.info('machine %s ACTIVE -> %s' %
- (self._target['id'], reboot_state))
- self._state.set_instance_state(self._target['id'],
- (self._target, reboot_state))
- self._retry_state = self.States.ACTIVE_CHECK
- elif server_state == 'ACTIVE':
- # machine must have gone ACTIVE -> REBOOT ->ACTIVE
- self._retry_state = self.States.ACTIVE_CHECK
-
- elif self._retry_state == self.States.ACTIVE_CHECK:
- if not self._check_for_status('ACTIVE'):
- return False
- target = self._target
- self._logger.info('machine %s %s -> ACTIVE [%.1f secs elapsed]' %
- (target['id'], reboot_state, self.elapsed()))
- self._state.set_instance_state(target['id'],
- (target, 'ACTIVE'))
-
- return True
-
-# This code needs to be tested against a cluster that supports resize.
-#class TestResizeVM(test_case.StressTestCase):
-# """Resize a server (change flavors)."""
-#
-# def run(self, manager, state, *pargs, **kwargs):
-# """
-# Send an HTTP POST request to the nova cluster to resize a random
-# server. Update `state` to indicate server is rebooting.
-#
-# `manager` : Manager object.
-# `state` : `State` object describing our view of state of cluster
-# `pargs` : positional arguments
-# `kwargs` : keyword arguments, which include:
-# `timeout` : how long to wait before issuing Exception
-# """
-#
-# vms = state.get_instances()
-# active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
-# # no active vms, so return null
-# if not active_vms:
-# self._logger.debug('no ACTIVE instances to resize')
-# return
-#
-# target = random.choice(active_vms)
-# resize_target = target[0]
-# print resize_target
-#
-# _timeout = kwargs.get('timeout', 600)
-#
-# # determine current flavor type, and resize to a different type
-# # m1.tiny -> m1.small, m1.small -> m1.tiny
-# curr_size = int(resize_target['flavor']['id'])
-# if curr_size == 1:
-# new_size = 2
-# else:
-# new_size = 1
-# flavor_type = { 'flavorRef': new_size } # resize to m1.small
-#
-# post_body = json.dumps({'resize' : flavor_type})
-# url = '/servers/%s/action' % resize_target['id']
-# (response, body) = manager.request('POST',
-# url,
-# body=post_body)
-#
-# if (response.status != 202):
-# self._logger.error("response: %s" % response)
-# raise Exception
-#
-# state_name = check_for_status(manager, resize_target, 'RESIZE')
-#
-# if state_name == 'RESIZE':
-# self._logger.info('machine %s: ACTIVE -> RESIZE' %
-# resize_target['id'])
-# state.set_instance_state(resize_target['id'],
-# (resize_target, 'RESIZE'))
-#
-# return VerifyResizeVM(manager,
-# state,
-# resize_target,
-# state_name=state_name,
-# timeout=_timeout)
-#
-#class VerifyResizeVM(pending_action.PendingServerAction):
-# """Verify that resizing of a VM was successful."""
-# States = enum('VERIFY_RESIZE_CHECK', 'ACTIVE_CHECK')
-#
-# def __init__(self, manager, state, created_server,
-# state_name=None,
-# timeout=300):
-# super(VerifyResizeVM, self).__init__(manager,
-# state,
-# created_server,
-# timeout=timeout)
-# self._retry_state = self.States.VERIFY_RESIZE_CHECK
-# self._state_name = state_name
-#
-# def retry(self):
-# """
-# Check to see that the server was actually resized. And change `state`
-# of server to running again.
-# """
-# # don't run resize if target machine has been deleted
-# # or is going to be deleted
-# if (self._target['id'] not in self._state.get_instances().keys() or
-# self._state.get_instances()[self._target['id']][1] ==
-# 'TERMINATING'):
-# self._logger.debug('machine %s is deleted or TERMINATING' %
-# self._target['id'])
-# return True
-#
-# if self._retry_state == self.States.VERIFY_RESIZE_CHECK:
-# if self._check_for_status('VERIFY_RESIZE') == 'VERIFY_RESIZE':
-# # now issue command to CONFIRM RESIZE
-# post_body = json.dumps({'confirmResize' : null})
-# url = '/servers/%s/action' % self._target['id']
-# (response, body) = manager.request('POST',
-# url,
-# body=post_body)
-# if (response.status != 204):
-# self._logger.error("response: %s" % response)
-# raise Exception
-#
-# self._logger.info(
-# 'CONFIRMING RESIZE of machine %s [%.1f secs elapsed]' %
-# (self._target['id'], self.elapsed())
-# )
-# state.set_instance_state(self._target['id'],
-# (self._target, 'CONFIRM_RESIZE'))
-#
-# # change states
-# self._retry_state = self.States.ACTIVE_CHECK
-#
-# return False
-#
-# elif self._retry_state == self.States.ACTIVE_CHECK:
-# if not self._check_manager("ACTIVE"):
-# return False
-# else:
-# server = self._manager.get_server(self._target['id'])
-#
-# # Find private IP of server?
-# try:
-# (_, network) = server['addresses'].popitem()
-# ip = network[0]['addr']
-# except KeyError:
-# self._logger.error(
-# 'could not get ip address for machine %s' %
-# self._target['id']
-# )
-# raise Exception
-#
-# self._logger.info(
-# 'machine %s: VERIFY_RESIZE -> ACTIVE [%.1f sec elapsed]' %
-# (self._target['id'], self.elapsed())
-# )
-# self._state.set_instance_state(self._target['id'],
-# (self._target, 'ACTIVE'))
-#
-# return True
-#
-# else:
-# # should never get here
-# self._logger.error('Unexpected state')
-# raise Exception
diff --git a/stress/test_servers.py b/stress/test_servers.py
deleted file mode 100644
index 1dd72f1..0000000
--- a/stress/test_servers.py
+++ /dev/null
@@ -1,318 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Defines various sub-classes of the `StressTestCase` and
-`PendingServerAction` class. Sub-classes of StressTestCase implement various
-API calls on the Nova cluster having to do with creating and deleting VMs.
-Each sub-class will have a corresponding PendingServerAction. These pending
-actions veriy that the API call was successful or not."""
-
-import random
-
-from stress import pending_action
-from stress import test_case
-
-
-class TestCreateVM(test_case.StressTestCase):
- """Create a virtual machine in the Nova cluster."""
- _vm_id = 0
-
- def run(self, manager, state, *pargs, **kwargs):
- """
- Send an HTTP POST request to the nova cluster to build a
- server. Update the state variable to track state of new server
- and set to PENDING state.
-
- `manager` : Manager object.
- `state` : `State` object describing our view of state of cluster
- `pargs` : positional arguments
- `kwargs` : keyword arguments, which include:
- `key_name` : name of keypair
- `image_ref` : index to image types availablexs
- `flavor_ref`: index to flavor types available
- (default = 1, which is tiny)
- """
-
- # restrict number of instances we can launch
- if len(state.get_instances()) >= state.get_max_instances():
- self._logger.debug("maximum number of instances created: %d" %
- state.get_max_instances())
- return None
-
- _key_name = kwargs.get('key_name', '')
- _image_ref = kwargs.get('image_ref', manager.config.compute.image_ref)
- _flavor_ref = kwargs.get('flavor_ref',
- manager.config.compute.flavor_ref)
-
- expected_server = {
- 'name': 'server' + str(TestCreateVM._vm_id),
- 'metadata': {
- 'key1': 'value1',
- 'key2': 'value2',
- },
- 'imageRef': _image_ref,
- 'flavorRef': _flavor_ref,
- 'adminPass': 'testpwd',
- 'key_name': _key_name,
- }
- TestCreateVM._vm_id = TestCreateVM._vm_id + 1
- create_server = manager.servers_client.create_server
- response, body = create_server(expected_server['name'],
- _image_ref,
- _flavor_ref,
- meta=expected_server['metadata'],
- adminPass=expected_server['adminPass'])
-
- if (response.status != 202):
- self._logger.error("response: %s" % response)
- self._logger.error("body: %s" % body)
- raise Exception
-
- created_server = body
-
- self._logger.info('setting machine %s to BUILD' %
- created_server['id'])
- state.set_instance_state(created_server['id'],
- (created_server, 'BUILD'))
-
- return VerifyCreateVM(manager,
- state,
- created_server,
- expected_server)
-
-
-class VerifyCreateVM(pending_action.PendingServerAction):
- """Verify that VM was built and is running."""
- def __init__(self, manager,
- state,
- created_server,
- expected_server):
- super(VerifyCreateVM, self).__init__(manager,
- state,
- created_server,
- )
- self._expected = expected_server
-
- def retry(self):
- """
- Check to see that the server was created and is running.
- Update local view of state to indicate that it is running.
- """
- # don't run create verification
- # if target machine has been deleted or is going to be deleted
- target_id = self._target['id']
- if (self._target['id'] not in self._state.get_instances().keys() or
- self._state.get_instances()[target_id][1] == 'TERMINATING'):
- self._logger.info('machine %s is deleted or TERMINATING' %
- self._target['id'])
- return True
-
- admin_pass = self._target['adminPass']
- # Could check more things here.
- if (self._expected['adminPass'] != admin_pass):
- self._logger.error('expected: %s' %
- (self._expected['adminPass']))
- self._logger.error('returned: %s' %
- (admin_pass))
- raise Exception
-
- if self._check_for_status('ACTIVE') != 'ACTIVE':
- return False
-
- self._logger.info('machine %s: BUILD -> ACTIVE [%.1f secs elapsed]' %
- (self._target['id'], self.elapsed()))
- self._state.set_instance_state(self._target['id'],
- (self._target, 'ACTIVE'))
- return True
-
-
-class TestKillActiveVM(test_case.StressTestCase):
- """Class to destroy a random ACTIVE server."""
- def run(self, manager, state, *pargs, **kwargs):
- """
- Send an HTTP POST request to the nova cluster to destroy
- a random ACTIVE server. Update `state` to indicate TERMINATING.
-
- `manager` : Manager object.
- `state` : `State` object describing our view of state of cluster
- `pargs` : positional arguments
- `kwargs` : keyword arguments, which include:
- `timeout` : how long to wait before issuing Exception
- """
- # check for active instances
- vms = state.get_instances()
- active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
- # no active vms, so return null
- if not active_vms:
- self._logger.info('no ACTIVE instances to delete')
- return
-
- _timeout = kwargs.get('timeout', manager.config.compute.build_timeout)
-
- target = random.choice(active_vms)
- killtarget = target[0]
- manager.servers_client.delete_server(killtarget['id'])
- self._logger.info('machine %s: ACTIVE -> TERMINATING' %
- killtarget['id'])
- state.set_instance_state(killtarget['id'],
- (killtarget, 'TERMINATING'))
- return VerifyKillActiveVM(manager, state,
- killtarget, timeout=_timeout)
-
-
-class VerifyKillActiveVM(pending_action.PendingServerAction):
- """Verify that server was destroyed."""
-
- def retry(self):
- """
- Check to see that the server of interest is destroyed. Update
- state to indicate that server is destroyed by deleting it from local
- view of state.
- """
- tid = self._target['id']
- # if target machine has been deleted from the state, then it was
- # already verified to be deleted
- if (not tid in self._state.get_instances().keys()):
- return False
-
- try:
- self._manager.servers_client.get_server(tid)
- except Exception:
- # if we get a 404 response, is the machine really gone?
- target = self._target
- self._logger.info('machine %s: DELETED [%.1f secs elapsed]' %
- (target['id'], self.elapsed()))
- self._state.delete_instance_state(target['id'])
- return True
-
- return False
-
-
-class TestKillAnyVM(test_case.StressTestCase):
- """Class to destroy a random server regardless of state."""
-
- def run(self, manager, state, *pargs, **kwargs):
- """
- Send an HTTP POST request to the nova cluster to destroy
- a random server. Update state to TERMINATING.
-
- `manager` : Manager object.
- `state` : `State` object describing our view of state of cluster
- `pargs` : positional arguments
- `kwargs` : keyword arguments, which include:
- `timeout` : how long to wait before issuing Exception
- """
-
- vms = state.get_instances()
- # no vms, so return null
- if not vms:
- self._logger.info('no active instances to delete')
- return
-
- _timeout = kwargs.get('timeout', manager.config.compute.build_timeout)
-
- target = random.choice(vms)
- killtarget = target[0]
-
- manager.servers_client.delete_server(killtarget['id'])
- self._state.set_instance_state(killtarget['id'],
- (killtarget, 'TERMINATING'))
- # verify object will do the same thing as the active VM
- return VerifyKillAnyVM(manager, state, killtarget, timeout=_timeout)
-
-VerifyKillAnyVM = VerifyKillActiveVM
-
-
-class TestUpdateVMName(test_case.StressTestCase):
- """Class to change the name of the active server."""
- def run(self, manager, state, *pargs, **kwargs):
- """
- Issue HTTP POST request to change the name of active server.
- Update state of server to reflect name changing.
-
- `manager` : Manager object.
- `state` : `State` object describing our view of state of cluster
- `pargs` : positional arguments
- `kwargs` : keyword arguments, which include:
- `timeout` : how long to wait before issuing Exception
- """
-
- # select one machine from active ones
- vms = state.get_instances()
- active_vms = [v for k, v in vms.iteritems() if v and v[1] == 'ACTIVE']
- # no active vms, so return null
- if not active_vms:
- self._logger.info('no active instances to update')
- return
-
- _timeout = kwargs.get('timeout', manager.config.compute.build_timeout)
-
- target = random.choice(active_vms)
- update_target = target[0]
-
- # Update name by appending '_updated' to the name
- new_name = update_target['name'] + '_updated'
- (response, body) = \
- manager.servers_client.update_server(update_target['id'],
- name=new_name)
- if (response.status != 200):
- self._logger.error("response: %s " % response)
- self._logger.error("body: %s " % body)
- raise Exception
-
- assert(new_name == body['name'])
-
- self._logger.info('machine %s: ACTIVE -> UPDATING_NAME' %
- body['id'])
- state.set_instance_state(body['id'],
- (body, 'UPDATING_NAME'))
-
- return VerifyUpdateVMName(manager,
- state,
- body,
- timeout=_timeout)
-
-
-class VerifyUpdateVMName(pending_action.PendingServerAction):
- """Check that VM has new name."""
- def retry(self):
- """
- Check that VM has new name. Update local view of `state` to RUNNING.
- """
- # don't run update verification
- # if target machine has been deleted or is going to be deleted
- target_id = self._target['id']
- if (not self._target['id'] in self._state.get_instances().keys() or
- self._state.get_instances()[target_id][1] == 'TERMINATING'):
- return False
-
- response, body = \
- self._manager.serverse_client.get_server(self._target['id'])
- if (response.status != 200):
- self._logger.error("response: %s " % response)
- self._logger.error("body: %s " % body)
- raise Exception
-
- if self._target['name'] != body['name']:
- self._logger.error(self._target['name'] +
- ' vs. ' +
- body['name'])
- raise Exception
-
- # log the update
- self._logger.info('machine %s: UPDATING_NAME -> ACTIVE' %
- self._target['id'])
- self._state.set_instance_state(self._target['id'],
- (body,
- 'ACTIVE'))
- return True
diff --git a/stress/tests/create_kill.py b/stress/tests/create_kill.py
deleted file mode 100644
index 30ddfd7..0000000
--- a/stress/tests/create_kill.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""More aggressive test that creates and destroys VMs with shorter
-sleep times"""
-
-import datetime
-import time
-
-from stress.basher import BasherAction
-from stress.driver import bash_openstack
-from stress.test_servers import TestCreateVM
-from stress.test_servers import TestKillActiveVM
-from tempest import clients
-
-choice_spec = [
- BasherAction(TestCreateVM(), 50),
- BasherAction(TestKillActiveVM(), 50)
-]
-
-nova = clients.Manager()
-
-bash_openstack(nova,
- choice_spec,
- duration=datetime.timedelta(seconds=180),
- sleep_time=100, # in milliseconds
- seed=int(time.time()),
- test_name="create and delete",
- )
diff --git a/stress/tests/floating_ips.py b/stress/tests/floating_ips.py
deleted file mode 100755
index b1b3778..0000000
--- a/stress/tests/floating_ips.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Stress test that associates/disasssociates floating ips."""
-
-import datetime
-
-from stress.basher import BasherAction
-from stress.driver import bash_openstack
-from stress.test_floating_ips import TestChangeFloatingIp
-from tempest import clients
-
-
-choice_spec = [
- BasherAction(TestChangeFloatingIp(), 100)
-]
-
-nova = clients.Manager()
-
-bash_openstack(nova,
- choice_spec,
- duration=datetime.timedelta(seconds=300),
- test_name="floating_ips",
- initial_floating_ips=8,
- initial_vms=8)
diff --git a/stress/tests/hard_reboots.py b/stress/tests/hard_reboots.py
deleted file mode 100644
index 50a2e91..0000000
--- a/stress/tests/hard_reboots.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Test that reboots random instances in a Nova cluster."""
-
-import datetime
-import time
-
-from stress.basher import BasherAction
-from stress.driver import bash_openstack
-from stress.test_server_actions import TestRebootVM
-from stress.test_servers import TestCreateVM
-from tempest import clients
-
-choice_spec = [
- BasherAction(TestCreateVM(), 50),
- BasherAction(TestRebootVM(), 50,
- kargs={'type': 'HARD'})
-]
-
-nova = clients.Manager()
-
-bash_openstack(nova,
- choice_spec,
- duration=datetime.timedelta(seconds=180),
- sleep_time=500, # in milliseconds
- seed=int(time.time()),
- test_name="hard reboots",
- )
diff --git a/stress/tests/user_script_sample.py b/stress/tests/user_script_sample.py
deleted file mode 100644
index d941ea0..0000000
--- a/stress/tests/user_script_sample.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Sample stress test that creates a few virtual machines and then
-destroys them"""
-
-import datetime
-
-from stress.basher import BasherAction
-from stress.driver import bash_openstack
-from stress.test_servers import TestCreateVM
-from stress.test_servers import TestKillActiveVM
-from tempest import clients
-
-choice_spec = [
- BasherAction(TestCreateVM(), 50,
- kargs={'timeout': '60'}),
- BasherAction(TestKillActiveVM(), 50)
-]
-
-
-nova = clients.Manager()
-
-bash_openstack(nova,
- choice_spec,
- duration=datetime.timedelta(seconds=10),
- sleep_time=1000, # in milliseconds
- seed=None,
- test_name="simple create and delete",
- max_vms=4)
diff --git a/stress/tools/nova_destroy_all.py b/stress/tools/nova_destroy_all.py
deleted file mode 100755
index 00d8883..0000000
--- a/stress/tools/nova_destroy_all.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from novaclient.v1_1 import client
-import tempest.config
-
-# get the environment variables for credentials
-identity = tempest.config.TempestConfig().identity
-
-nt = client.Client(identity.username, identity.password,
- identity.tenant_name, identity.uri)
-
-flavor_list = nt.flavors.list()
-server_list = nt.servers.list()
-images_list = nt.images.list()
-keypairs_list = nt.keypairs.list()
-floating_ips_list = nt.floating_ips.list()
-volumes_list = nt.volumes.list()
-
-print "total servers: %3d, total flavors: %3d, total images: %3d," % \
- (len(server_list),
- len(flavor_list),
- len(images_list)),
-
-print "total keypairs: %3d, total floating ips: %3d" % \
- (len(keypairs_list),
- len(floating_ips_list))
-
-print "deleting all servers"
-for s in server_list:
- s.delete()
-
-print "deleting all keypairs"
-for s in keypairs_list:
- s.delete()
-
-print "deleting all floating_ips"
-for s in floating_ips_list:
- s.delete()
-
-print "deleting all volumes"
-for s in volumes_list:
- s.delete()
diff --git a/stress/tools/nova_status.py b/stress/tools/nova_status.py
deleted file mode 100755
index ee20282..0000000
--- a/stress/tools/nova_status.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from novaclient.v1_1 import client
-import tempest.config
-
-# get the environment variables for credentials
-identity = tempest.config.TempestConfig().identity
-print identity.username, identity.password,\
- identity.tenant_name, identity.uri
-
-nt = client.Client(identity.username, identity.password,
- identity.tenant_name, identity.uri)
-
-flavor_list = nt.flavors.list()
-server_list = nt.servers.list()
-images_list = nt.images.list()
-keypairs_list = nt.keypairs.list()
-floating_ips_list = nt.floating_ips.list()
-
-print "total servers: %3d, total flavors: %3d, total images: %3d" % \
- (len(server_list),
- len(flavor_list),
- len(images_list))
-
-print "total keypairs: %3d, total floating ips: %3d" % \
- (len(keypairs_list),
- len(floating_ips_list))
-
-print "flavors:\t", flavor_list
-print "servers:\t", server_list
-print "images: \t", images_list
-print "keypairs:\t", keypairs_list
-print "floating ips:\t", floating_ips_list
diff --git a/stress/utils.py b/stress/utils.py
deleted file mode 100644
index ec63b99..0000000
--- a/stress/utils.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import shlex
-import subprocess
-
-SSH_OPTIONS = (" -q" +
- " -o UserKnownHostsFile=/dev/null" +
- " -o StrictHostKeyChecking=no -i ")
-
-
-def get_ssh_options(keypath):
- return SSH_OPTIONS + keypath
-
-
-def scp(keypath, args):
- options = get_ssh_options(keypath)
- return subprocess.check_call(shlex.split("scp" + options + args))
-
-
-def ssh(keypath, user, node, command, check=True):
- command = 'sudo ' + command
- command = "ssh %s %s@%s %s" % (get_ssh_options(keypath), user,
- node, command)
- popenargs = shlex.split(command)
- process = subprocess.Popen(popenargs, stdout=subprocess.PIPE)
- output, unused_err = process.communicate()
- retcode = process.poll()
- if retcode and check:
- raise Exception("%s: ssh failed with retcode: %s" % (node, retcode))
- return output
-
-
-def execute_on_all(keypath, user, nodes, command):
- for node in nodes:
- ssh(keypath, user, node, command)
-
-
-def enum(*sequential, **named):
- """Create auto-incremented enumerated types."""
- enums = dict(zip(sequential, range(len(sequential))), **named)
- return type('Enum', (), enums)
diff --git a/tempest/README.rst b/tempest/README.rst
index c41ef96..6718ee0 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -12,11 +12,11 @@
to make this clear.
tempest/
- 3rdparty/ - 3rd party api tests
api/ - API tests
cli/ - CLI tests
scenario/ - complex scenario tests
stress/ - stress tests
+ thirdparty/ - 3rd party api tests
whitebox/ - white box testing
Each of these directories contains different types of tests. What
@@ -24,17 +24,6 @@
documented in a README.rst file in the directory.
-3rdparty
-------------
-
-Many openstack components include 3rdparty API support. It is
-completely legitmate for Tempest to include tests of 3rdparty APIs,
-but those should be kept seperate from the normal OpenStack
-validation.
-
-TODO: tempest/tests/boto should become tempest/3rdparty/boto
-
-
api
------------
@@ -88,6 +77,17 @@
moves into here.
+thirdparty
+------------
+
+Many openstack components include 3rdparty API support. It is
+completely legitmate for Tempest to include tests of 3rdparty APIs,
+but those should be kept seperate from the normal OpenStack
+validation.
+
+TODO: tempest/tests/boto should become tempest/3rdparty/boto
+
+
whitebox
----------
diff --git a/tempest/cli/README.rst b/tempest/cli/README.rst
new file mode 100644
index 0000000..4742d4a
--- /dev/null
+++ b/tempest/cli/README.rst
@@ -0,0 +1,48 @@
+Tempest Guide to CLI tests
+========
+
+
+What are these tests?
+---------
+The cli tests test the various OpenStack command line interface tools
+to ensure that they minimally function. The current scope is read only
+operations on a cloud that are hard to test via unit tests.
+
+
+Why are these tests in tempest?
+---------
+These tests exist here because it is extremely difficult to build a
+functional enough environment in the python-*client unit tests to
+provide this kind of testing. Because we already put up a cloud in the
+gate with devstack + tempest it was decided it was better to have
+these as a side tree in tempest instead of another QA effort which
+would split review time.
+
+
+Scope of these tests
+---------
+This should stay limited to the scope of testing the cli. Functional
+testing of the cloud should be elsewhere, this is about exercising the
+cli code.
+
+
+Example of a good test
+---------
+Tests should be isolated to a single command in one of the python
+clients.
+
+Tests should not modify the cloud.
+
+If a test is validating the cli for bad data, it should do it with
+assertRaises.
+
+A reasonable example of an existing test is as follows:
+
+ def test_admin_list(self):
+ self.nova('list')
+ self.nova('list', params='--all-tenants 1')
+ self.nova('list', params='--all-tenants 0')
+ self.assertRaises(subprocess.CalledProcessError,
+ self.nova,
+ 'list',
+ params='--all-tenants bad')
diff --git a/cli/__init__.py b/tempest/cli/__init__.py
similarity index 97%
rename from cli/__init__.py
rename to tempest/cli/__init__.py
index a3038d2..413990d 100644
--- a/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -21,7 +21,7 @@
from oslo.config import cfg
-import cli.output_parser
+import tempest.cli.output_parser
import tempest.test
@@ -52,7 +52,7 @@
super(ClientTestBase, cls).setUpClass()
def __init__(self, *args, **kwargs):
- self.parser = cli.output_parser
+ self.parser = tempest.cli.output_parser
super(ClientTestBase, self).__init__(*args, **kwargs)
def nova(self, action, flags='', params='', admin=True, fail_ok=False):
diff --git a/cli/output_parser.py b/tempest/cli/output_parser.py
similarity index 100%
rename from cli/output_parser.py
rename to tempest/cli/output_parser.py
diff --git a/cli/simple_read_only/README.txt b/tempest/cli/simple_read_only/README.txt
similarity index 100%
rename from cli/simple_read_only/README.txt
rename to tempest/cli/simple_read_only/README.txt
diff --git a/cli/simple_read_only/__init__.py b/tempest/cli/simple_read_only/__init__.py
similarity index 100%
rename from cli/simple_read_only/__init__.py
rename to tempest/cli/simple_read_only/__init__.py
diff --git a/cli/simple_read_only/test_compute.py b/tempest/cli/simple_read_only/test_compute.py
similarity index 98%
rename from cli/simple_read_only/test_compute.py
rename to tempest/cli/simple_read_only/test_compute.py
index d301d38..fa64561 100644
--- a/cli/simple_read_only/test_compute.py
+++ b/tempest/cli/simple_read_only/test_compute.py
@@ -21,7 +21,7 @@
from oslo.config import cfg
import testtools
-import cli
+import tempest.cli
CONF = cfg.CONF
@@ -30,7 +30,7 @@
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyNovaClientTest(cli.ClientTestBase):
+class SimpleReadOnlyNovaClientTest(tempest.cli.ClientTestBase):
"""
This is a first pass at a simple read only python-novaclient test. This
diff --git a/cli/simple_read_only/test_compute_manage.py b/tempest/cli/simple_read_only/test_compute_manage.py
similarity index 96%
rename from cli/simple_read_only/test_compute_manage.py
rename to tempest/cli/simple_read_only/test_compute_manage.py
index bbcc5b1..a788c8b 100644
--- a/cli/simple_read_only/test_compute_manage.py
+++ b/tempest/cli/simple_read_only/test_compute_manage.py
@@ -18,13 +18,13 @@
import logging
import subprocess
-import cli
+import tempest.cli
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyNovaManageTest(cli.ClientTestBase):
+class SimpleReadOnlyNovaManageTest(tempest.cli.ClientTestBase):
"""
This is a first pass at a simple read only nova-manage test. This
diff --git a/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/test_glance.py
similarity index 96%
rename from cli/simple_read_only/test_glance.py
rename to tempest/cli/simple_read_only/test_glance.py
index f9822cc..b3b3eb7 100644
--- a/cli/simple_read_only/test_glance.py
+++ b/tempest/cli/simple_read_only/test_glance.py
@@ -19,13 +19,13 @@
import re
import subprocess
-import cli
+import tempest.cli
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyGlanceClientTest(cli.ClientTestBase):
+class SimpleReadOnlyGlanceClientTest(tempest.cli.ClientTestBase):
"""Basic, read-only tests for Glance CLI client.
Checks return values and output of read-only commands.
diff --git a/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
similarity index 97%
rename from cli/simple_read_only/test_keystone.py
rename to tempest/cli/simple_read_only/test_keystone.py
index 4b14c3c..067f58c 100644
--- a/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -19,13 +19,13 @@
import re
import subprocess
-import cli
+import tempest.cli
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyKeystoneClientTest(cli.ClientTestBase):
+class SimpleReadOnlyKeystoneClientTest(tempest.cli.ClientTestBase):
"""Basic, read-only tests for Keystone CLI client.
Checks return values and output of read-only commands.
diff --git a/tempest/clients.py b/tempest/clients.py
index 7b1e5cc..037a1c4 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -40,8 +40,10 @@
from tempest.services.compute.json.security_groups_client import \
SecurityGroupsClientJSON
from tempest.services.compute.json.servers_client import ServersClientJSON
+from tempest.services.compute.json.services_client import ServicesClientJSON
from tempest.services.compute.json.volumes_extensions_client import \
VolumesExtensionsClientJSON
+from tempest.services.compute.xml.aggregates_client import AggregatesClientXML
from tempest.services.compute.xml.availability_zone_client import \
AvailabilityZoneClientXML
from tempest.services.compute.xml.extensions_client import ExtensionsClientXML
@@ -58,6 +60,7 @@
from tempest.services.compute.xml.security_groups_client \
import SecurityGroupsClientXML
from tempest.services.compute.xml.servers_client import ServersClientXML
+from tempest.services.compute.xml.services_client import ServicesClientXML
from tempest.services.compute.xml.volumes_extensions_client import \
VolumesExtensionsClientXML
from tempest.services.identity.json.identity_client import IdentityClientJSON
@@ -85,6 +88,8 @@
from tempest.services.object_storage.object_client import ObjectClient
from tempest.services.object_storage.object_client import \
ObjectClientCustomizedHeader
+from tempest.services.orchestration.json.orchestration_client import \
+ OrchestrationClient
from tempest.services.volume.json.admin.volume_types_client import \
VolumeTypesClientJSON
from tempest.services.volume.json.snapshots_client import SnapshotsClientJSON
@@ -201,6 +206,16 @@
"xml": ServiceClientXML,
}
+AGGREGATES_CLIENT = {
+ "json": AggregatesClientJSON,
+ "xml": AggregatesClientXML,
+}
+
+SERVICES_CLIENT = {
+ "json": ServicesClientJSON,
+ "xml": ServicesClientXML,
+}
+
class Manager(object):
@@ -270,6 +285,8 @@
self.availability_zone_client = \
AVAILABILITY_ZONE_CLIENT[interface](*client_args)
self.service_client = SERVICE_CLIENT[interface](*client_args)
+ self.aggregates_client = AGGREGATES_CLIENT[interface](*client_args)
+ self.services_client = SERVICES_CLIENT[interface](*client_args)
except KeyError:
msg = "Unsupported interface type `%s'" % interface
raise exceptions.InvalidConfiguration(msg)
@@ -280,12 +297,12 @@
self.image_client_v2 = ImageClientV2JSON(*client_args)
self.container_client = ContainerClient(*client_args)
self.object_client = ObjectClient(*client_args)
+ self.orchestration_client = OrchestrationClient(*client_args)
self.ec2api_client = botoclients.APIClientEC2(*client_args)
self.s3_client = botoclients.ObjectClientS3(*client_args)
self.custom_object_client = ObjectClientCustomizedHeader(*client_args)
self.custom_account_client = \
AccountClientCustomizedHeader(*client_args)
- self.aggregates_client = AggregatesClientJSON(*client_args)
class AltManager(Manager):
@@ -331,3 +348,17 @@
conf.compute_admin.password,
conf.compute_admin.tenant_name,
interface=interface)
+
+
+class OrchestrationManager(Manager):
+ """
+ Manager object that uses the admin credentials for its
+ so that heat templates can create users
+ """
+ def __init__(self, interface='json'):
+ conf = config.TempestConfig()
+ base = super(OrchestrationManager, self)
+ base.__init__(conf.identity.admin_username,
+ conf.identity.admin_password,
+ conf.identity.admin_tenant_name,
+ interface=interface)
diff --git a/tempest/config.py b/tempest/config.py
index a90767e..f5f56a8 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -156,6 +156,9 @@
default=60,
help="Timeout in seconds to wait for output from ssh "
"channel."),
+ cfg.StrOpt('fixed_network_name',
+ default='private',
+ help="Visible fixed network name "),
cfg.StrOpt('network_for_ssh',
default='public',
help="Network used for SSH connections."),
@@ -348,6 +351,48 @@
for opt in ObjectStoreConfig:
conf.register_opt(opt, group='object-storage')
+
+orchestration_group = cfg.OptGroup(name='orchestration',
+ title='Orchestration Service Options')
+
+OrchestrationGroup = [
+ cfg.StrOpt('catalog_type',
+ default='orchestration',
+ help="Catalog type of the Orchestration service."),
+ cfg.BoolOpt('allow_tenant_isolation',
+ default=False,
+ help="Allows test cases to create/destroy tenants and "
+ "users. This option enables isolated test cases and "
+ "better parallel execution, but also requires that "
+ "OpenStack Identity API admin credentials are known."),
+ cfg.IntOpt('build_interval',
+ default=1,
+ help="Time in seconds between build status checks."),
+ cfg.IntOpt('build_timeout',
+ default=300,
+ help="Timeout in seconds to wait for a stack to build."),
+ cfg.BoolOpt('heat_available',
+ default=False,
+ help="Whether or not Heat is expected to be available"),
+ cfg.StrOpt('instance_type',
+ default='m1.tiny',
+ help="Instance type for tests. Needs to be big enough for a "
+ "full OS plus the test workload"),
+ cfg.StrOpt('image_ref',
+ default=None,
+ help="Name of heat-cfntools enabled image to use when "
+ "launching test instances."),
+ cfg.StrOpt('keypair_name',
+ default=None,
+ help="Name of existing keypair to launch servers with."),
+]
+
+
+def register_orchestration_opts(conf):
+ conf.register_group(orchestration_group)
+ for opt in OrchestrationGroup:
+ conf.register_opt(opt, group='orchestration')
+
boto_group = cfg.OptGroup(name='boto',
title='EC2/S3 options')
BotoConfig = [
@@ -411,7 +456,23 @@
help='Maximum number of instances to create during test.'),
cfg.StrOpt('controller',
default=None,
- help='Controller host.')
+ help='Controller host.'),
+ # new stress options
+ cfg.StrOpt('target_controller',
+ default=None,
+ help='Controller host.'),
+ cfg.StrOpt('target_ssh_user',
+ default=None,
+ help='ssh user.'),
+ cfg.StrOpt('target_private_key_path',
+ default=None,
+ help='Path to private key.'),
+ cfg.StrOpt('target_logfiles',
+ default=None,
+ help='regexp for list of log files.'),
+ cfg.StrOpt('log_check_interval',
+ default=60,
+ help='time between log file error checks.')
]
@@ -466,6 +527,7 @@
register_network_opts(cfg.CONF)
register_volume_opts(cfg.CONF)
register_object_storage_opts(cfg.CONF)
+ register_orchestration_opts(cfg.CONF)
register_boto_opts(cfg.CONF)
register_compute_admin_opts(cfg.CONF)
register_stress_opts(cfg.CONF)
@@ -476,6 +538,7 @@
self.network = cfg.CONF.network
self.volume = cfg.CONF.volume
self.object_storage = cfg.CONF['object-storage']
+ self.orchestration = cfg.CONF.orchestration
self.boto = cfg.CONF.boto
self.compute_admin = cfg.CONF['compute-admin']
self.stress = cfg.CONF.stress
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 235a2e7..448fbdf 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -90,6 +90,11 @@
message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
+class StackBuildErrorException(TempestException):
+ message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
+ "due to '%(stack_status_reason)s'")
+
+
class BadRequest(RestClientException):
message = "Bad request"
diff --git a/tempest/manager.py b/tempest/manager.py
index 6f23727..047ad41 100644
--- a/tempest/manager.py
+++ b/tempest/manager.py
@@ -17,15 +17,6 @@
import logging
-# Default client libs
-import glanceclient
-import keystoneclient.v2_0.client
-import novaclient.client
-try:
- import quantumclient.v2_0.client
-except ImportError:
- pass
-
import tempest.config
from tempest import exceptions
# Tempest REST Fuzz testing client libs
@@ -86,121 +77,6 @@
pass
-class DefaultClientManager(Manager):
-
- """
- Manager that provides the default clients to access the various
- OpenStack APIs.
- """
-
- NOVACLIENT_VERSION = '2'
-
- def __init__(self):
- super(DefaultClientManager, self).__init__()
- self.compute_client = self._get_compute_client()
- self.image_client = self._get_image_client()
- self.identity_client = self._get_identity_client()
- self.network_client = self._get_network_client()
- self.client_attr_names = [
- 'compute_client',
- 'image_client',
- 'identity_client',
- 'network_client',
- ]
-
- def _get_compute_client(self, username=None, password=None,
- tenant_name=None):
- # Novaclient will not execute operations for anyone but the
- # identified user, so a new client needs to be created for
- # each user that operations need to be performed for.
- if not username:
- username = self.config.identity.username
- if not password:
- password = self.config.identity.password
- if not tenant_name:
- tenant_name = self.config.identity.tenant_name
-
- if None in (username, password, tenant_name):
- msg = ("Missing required credentials for compute client. "
- "username: %(username)s, password: %(password)s, "
- "tenant_name: %(tenant_name)s") % locals()
- raise exceptions.InvalidConfiguration(msg)
-
- auth_url = self.config.identity.uri
- dscv = self.config.identity.disable_ssl_certificate_validation
-
- client_args = (username, password, tenant_name, auth_url)
-
- # Create our default Nova client to use in testing
- service_type = self.config.compute.catalog_type
- return novaclient.client.Client(self.NOVACLIENT_VERSION,
- *client_args,
- service_type=service_type,
- no_cache=True,
- insecure=dscv)
-
- def _get_image_client(self):
- keystone = self._get_identity_client()
- token = keystone.auth_token
- endpoint = keystone.service_catalog.url_for(service_type='image',
- endpoint_type='publicURL')
- dscv = self.config.identity.disable_ssl_certificate_validation
- return glanceclient.Client('1', endpoint=endpoint, token=token,
- insecure=dscv)
-
- def _get_identity_client(self, username=None, password=None,
- tenant_name=None):
- # This identity client is not intended to check the security
- # of the identity service, so use admin credentials by default.
- if not username:
- username = self.config.identity.admin_username
- if not password:
- password = self.config.identity.admin_password
- if not tenant_name:
- tenant_name = self.config.identity.admin_tenant_name
-
- if None in (username, password, tenant_name):
- msg = ("Missing required credentials for identity client. "
- "username: %(username)s, password: %(password)s, "
- "tenant_name: %(tenant_name)s") % locals()
- raise exceptions.InvalidConfiguration(msg)
-
- auth_url = self.config.identity.uri
- dscv = self.config.identity.disable_ssl_certificate_validation
-
- return keystoneclient.v2_0.client.Client(username=username,
- password=password,
- tenant_name=tenant_name,
- auth_url=auth_url,
- insecure=dscv)
-
- def _get_network_client(self):
- # The intended configuration is for the network client to have
- # admin privileges and indicate for whom resources are being
- # created via a 'tenant_id' parameter. This will often be
- # preferable to authenticating as a specific user because
- # working with certain resources (public routers and networks)
- # often requires admin privileges anyway.
- username = self.config.identity.admin_username
- password = self.config.identity.admin_password
- tenant_name = self.config.identity.admin_tenant_name
-
- if None in (username, password, tenant_name):
- msg = ("Missing required credentials for network client. "
- "username: %(username)s, password: %(password)s, "
- "tenant_name: %(tenant_name)s") % locals()
- raise exceptions.InvalidConfiguration(msg)
-
- auth_url = self.config.identity.uri
- dscv = self.config.identity.disable_ssl_certificate_validation
-
- return quantumclient.v2_0.client.Client(username=username,
- password=password,
- tenant_name=tenant_name,
- auth_url=auth_url,
- insecure=dscv)
-
-
class ComputeFuzzClientManager(FuzzClientManager):
"""
diff --git a/tempest/scenario/README.rst b/tempest/scenario/README.rst
new file mode 100644
index 0000000..c5fa0d3
--- /dev/null
+++ b/tempest/scenario/README.rst
@@ -0,0 +1,45 @@
+Tempest Guide to Scenario tests
+========
+
+
+What are these tests?
+--------
+
+Scenario tests are "through path" tests of OpenStack
+function. Complicated setups where one part might depend on completion
+of a previous part. They ideally involve the integration between
+multiple OpenStack services to exercise the touch points between them.
+
+An example would be: start with a blank environment, upload a glance
+image, deploy a vm from it, ssh to the guest, make changes, capture
+that vm's image back into glance as a snapshot, and launch a second vm
+from that snapshot.
+
+
+Why are these tests in tempest?
+--------
+This is one of tempests core purposes, testing the integration between
+projects.
+
+
+Scope of these tests
+--------
+Scenario tests should always test at least 2 services in
+interaction. They should use the official python client libraries for
+OpenStack, as they provide a more realistic approach in how people
+will interact with the services.
+
+TODO: once we have service tags, tests should be tagged with which
+services they exercise.
+
+
+Example of a good test
+--------
+While we are looking for interaction of 2 or more services, be
+specific in your interactions. A giant "this is my data center" smoke
+test is hard to debug when it goes wrong.
+
+A flow of interactions between glance and nova, like in the
+introduction, is a good example. Especially if it involves a repeated
+interaction when a resource is setup, modified, detached, and then
+reused later again.
diff --git a/tempest/tests/boto/__init__.py b/tempest/scenario/__init__.py
similarity index 100%
copy from tempest/tests/boto/__init__.py
copy to tempest/scenario/__init__.py
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
new file mode 100644
index 0000000..4f94195
--- /dev/null
+++ b/tempest/scenario/manager.py
@@ -0,0 +1,422 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import subprocess
+
+# Default client libs
+import glanceclient
+import keystoneclient.v2_0.client
+import netaddr
+import novaclient.client
+try:
+ # TODO(sdague): is there are reason this is still optional
+ from quantumclient.common import exceptions as exc
+ import quantumclient.v2_0.client
+
+except ImportError:
+ pass
+
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+import tempest.manager
+import tempest.test
+from tempest.tests.network import common as net_common
+
+
+LOG = logging.getLogger(__name__)
+
+
+class OfficialClientManager(tempest.manager.Manager):
+ """
+ Manager that provides access to the official python clients for
+ calling various OpenStack APIs.
+ """
+
+ NOVACLIENT_VERSION = '2'
+
+ def __init__(self):
+ super(OfficialClientManager, self).__init__()
+ self.compute_client = self._get_compute_client()
+ self.image_client = self._get_image_client()
+ self.identity_client = self._get_identity_client()
+ self.network_client = self._get_network_client()
+ self.client_attr_names = [
+ 'compute_client',
+ 'image_client',
+ 'identity_client',
+ 'network_client',
+ ]
+
+ def _get_compute_client(self, username=None, password=None,
+ tenant_name=None):
+ # Novaclient will not execute operations for anyone but the
+ # identified user, so a new client needs to be created for
+ # each user that operations need to be performed for.
+ if not username:
+ username = self.config.identity.username
+ if not password:
+ password = self.config.identity.password
+ if not tenant_name:
+ tenant_name = self.config.identity.tenant_name
+
+ if None in (username, password, tenant_name):
+ msg = ("Missing required credentials for compute client. "
+ "username: %(username)s, password: %(password)s, "
+ "tenant_name: %(tenant_name)s") % locals()
+ raise exceptions.InvalidConfiguration(msg)
+
+ auth_url = self.config.identity.uri
+ dscv = self.config.identity.disable_ssl_certificate_validation
+
+ client_args = (username, password, tenant_name, auth_url)
+
+ # Create our default Nova client to use in testing
+ service_type = self.config.compute.catalog_type
+ return novaclient.client.Client(self.NOVACLIENT_VERSION,
+ *client_args,
+ service_type=service_type,
+ no_cache=True,
+ insecure=dscv)
+
+ def _get_image_client(self):
+ keystone = self._get_identity_client()
+ token = keystone.auth_token
+ endpoint = keystone.service_catalog.url_for(service_type='image',
+ endpoint_type='publicURL')
+ dscv = self.config.identity.disable_ssl_certificate_validation
+ return glanceclient.Client('1', endpoint=endpoint, token=token,
+ insecure=dscv)
+
+ def _get_identity_client(self, username=None, password=None,
+ tenant_name=None):
+ # This identity client is not intended to check the security
+ # of the identity service, so use admin credentials by default.
+ if not username:
+ username = self.config.identity.admin_username
+ if not password:
+ password = self.config.identity.admin_password
+ if not tenant_name:
+ tenant_name = self.config.identity.admin_tenant_name
+
+ if None in (username, password, tenant_name):
+ msg = ("Missing required credentials for identity client. "
+ "username: %(username)s, password: %(password)s, "
+ "tenant_name: %(tenant_name)s") % locals()
+ raise exceptions.InvalidConfiguration(msg)
+
+ auth_url = self.config.identity.uri
+ dscv = self.config.identity.disable_ssl_certificate_validation
+
+ return keystoneclient.v2_0.client.Client(username=username,
+ password=password,
+ tenant_name=tenant_name,
+ auth_url=auth_url,
+ insecure=dscv)
+
+ def _get_network_client(self):
+ # The intended configuration is for the network client to have
+ # admin privileges and indicate for whom resources are being
+ # created via a 'tenant_id' parameter. This will often be
+ # preferable to authenticating as a specific user because
+ # working with certain resources (public routers and networks)
+ # often requires admin privileges anyway.
+ username = self.config.identity.admin_username
+ password = self.config.identity.admin_password
+ tenant_name = self.config.identity.admin_tenant_name
+
+ if None in (username, password, tenant_name):
+ msg = ("Missing required credentials for network client. "
+ "username: %(username)s, password: %(password)s, "
+ "tenant_name: %(tenant_name)s") % locals()
+ raise exceptions.InvalidConfiguration(msg)
+
+ auth_url = self.config.identity.uri
+ dscv = self.config.identity.disable_ssl_certificate_validation
+
+ return quantumclient.v2_0.client.Client(username=username,
+ password=password,
+ tenant_name=tenant_name,
+ auth_url=auth_url,
+ insecure=dscv)
+
+
+class OfficialClientTest(tempest.test.TestCase):
+ """
+ Official Client test base class for scenario testing.
+
+ Official Client tests are tests that have the following characteristics:
+
+ * Test basic operations of an API, typically in an order that
+ a regular user would perform those operations
+ * Test only the correct inputs and action paths -- no fuzz or
+ random input data is sent, only valid inputs.
+ * Use only the default client tool for calling an API
+ """
+
+ manager_class = OfficialClientManager
+
+ @classmethod
+ def tearDownClass(cls):
+ # NOTE(jaypipes): Because scenario tests are typically run in a
+ # specific order, and because test methods in scenario tests
+ # generally create resources in a particular order, we destroy
+ # resources in the reverse order in which resources are added to
+ # the scenario test class object
+ while cls.os_resources:
+ thing = cls.os_resources.pop()
+ LOG.debug("Deleting %r from shared resources of %s" %
+ (thing, cls.__name__))
+
+ try:
+ # OpenStack resources are assumed to have a delete()
+ # method which destroys the resource...
+ thing.delete()
+ except Exception as e:
+ # If the resource is already missing, mission accomplished.
+ if e.__class__.__name__ == 'NotFound':
+ continue
+ raise
+
+ def is_deletion_complete():
+ # Deletion testing is only required for objects whose
+ # existence cannot be checked via retrieval.
+ if isinstance(thing, dict):
+ return True
+ try:
+ thing.get()
+ except Exception as e:
+ # Clients are expected to return an exception
+ # called 'NotFound' if retrieval fails.
+ if e.__class__.__name__ == 'NotFound':
+ return True
+ raise
+ return False
+
+ # Block until resource deletion has completed or timed-out
+ tempest.test.call_until_true(is_deletion_complete, 10, 1)
+
+
+class NetworkScenarioTest(OfficialClientTest):
+ """
+ Base class for network scenario tests
+ """
+
+ @classmethod
+ def check_preconditions(cls):
+ if (cls.config.network.quantum_available):
+ cls.enabled = True
+ #verify that quantum_available is telling the truth
+ try:
+ cls.network_client.list_networks()
+ except exc.EndpointNotFound:
+ cls.enabled = False
+ raise
+ else:
+ cls.enabled = False
+ msg = 'Quantum not available'
+ raise cls.skipException(msg)
+
+ @classmethod
+ def setUpClass(cls):
+ super(NetworkScenarioTest, cls).setUpClass()
+ cls.tenant_id = cls.manager._get_identity_client(
+ cls.config.identity.username,
+ cls.config.identity.password,
+ cls.config.identity.tenant_name).tenant_id
+
+ def _create_keypair(self, client, namestart='keypair-smoke-'):
+ kp_name = rand_name(namestart)
+ keypair = client.keypairs.create(kp_name)
+ try:
+ self.assertEqual(keypair.id, kp_name)
+ self.set_resource(kp_name, keypair)
+ except AttributeError:
+ self.fail("Keypair object not successfully created.")
+ return keypair
+
+ def _create_security_group(self, client, namestart='secgroup-smoke-'):
+ # Create security group
+ sg_name = rand_name(namestart)
+ sg_desc = sg_name + " description"
+ secgroup = client.security_groups.create(sg_name, sg_desc)
+ try:
+ self.assertEqual(secgroup.name, sg_name)
+ self.assertEqual(secgroup.description, sg_desc)
+ self.set_resource(sg_name, secgroup)
+ except AttributeError:
+ self.fail("SecurityGroup object not successfully created.")
+
+ # Add rules to the security group
+ rulesets = [
+ {
+ # ssh
+ 'ip_protocol': 'tcp',
+ 'from_port': 22,
+ 'to_port': 22,
+ 'cidr': '0.0.0.0/0',
+ 'group_id': secgroup.id
+ },
+ {
+ # ping
+ 'ip_protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '0.0.0.0/0',
+ 'group_id': secgroup.id
+ }
+ ]
+ for ruleset in rulesets:
+ try:
+ client.security_group_rules.create(secgroup.id, **ruleset)
+ except Exception:
+ self.fail("Failed to create rule in security group.")
+
+ return secgroup
+
+ def _create_network(self, tenant_id, namestart='network-smoke-'):
+ name = rand_name(namestart)
+ body = dict(
+ network=dict(
+ name=name,
+ tenant_id=tenant_id,
+ ),
+ )
+ result = self.network_client.create_network(body=body)
+ network = net_common.DeletableNetwork(client=self.network_client,
+ **result['network'])
+ self.assertEqual(network.name, name)
+ self.set_resource(name, network)
+ return network
+
+ def _list_networks(self):
+ nets = self.network_client.list_networks()
+ return nets['networks']
+
+ def _list_subnets(self):
+ subnets = self.network_client.list_subnets()
+ return subnets['subnets']
+
+ def _list_routers(self):
+ routers = self.network_client.list_routers()
+ return routers['routers']
+
+ def _create_subnet(self, network, namestart='subnet-smoke-'):
+ """
+ Create a subnet for the given network within the cidr block
+ configured for tenant networks.
+ """
+ cfg = self.config.network
+ tenant_cidr = netaddr.IPNetwork(cfg.tenant_network_cidr)
+ result = None
+ # Repeatedly attempt subnet creation with sequential cidr
+ # blocks until an unallocated block is found.
+ for subnet_cidr in tenant_cidr.subnet(cfg.tenant_network_mask_bits):
+ body = dict(
+ subnet=dict(
+ ip_version=4,
+ network_id=network.id,
+ tenant_id=network.tenant_id,
+ cidr=str(subnet_cidr),
+ ),
+ )
+ try:
+ result = self.network_client.create_subnet(body=body)
+ break
+ except exc.QuantumClientException as e:
+ is_overlapping_cidr = 'overlaps with another subnet' in str(e)
+ if not is_overlapping_cidr:
+ raise
+ self.assertIsNotNone(result, 'Unable to allocate tenant network')
+ subnet = net_common.DeletableSubnet(client=self.network_client,
+ **result['subnet'])
+ self.assertEqual(subnet.cidr, str(subnet_cidr))
+ self.set_resource(rand_name(namestart), subnet)
+ return subnet
+
+ def _create_port(self, network, namestart='port-quotatest-'):
+ name = rand_name(namestart)
+ body = dict(
+ port=dict(name=name,
+ network_id=network.id,
+ tenant_id=network.tenant_id))
+ result = self.network_client.create_port(body=body)
+ self.assertIsNotNone(result, 'Unable to allocate port')
+ port = net_common.DeletablePort(client=self.network_client,
+ **result['port'])
+ self.set_resource(name, port)
+ return port
+
+ def _create_server(self, client, network, name, key_name, security_groups):
+ flavor_id = self.config.compute.flavor_ref
+ base_image_id = self.config.compute.image_ref
+ create_kwargs = {
+ 'nics': [
+ {'net-id': network.id},
+ ],
+ 'key_name': key_name,
+ 'security_groups': security_groups,
+ }
+ server = client.servers.create(name, base_image_id, flavor_id,
+ **create_kwargs)
+ try:
+ self.assertEqual(server.name, name)
+ self.set_resource(name, server)
+ except AttributeError:
+ self.fail("Server not successfully created.")
+ self.status_timeout(client.servers, server.id, 'ACTIVE')
+ # The instance retrieved on creation is missing network
+ # details, necessitating retrieval after it becomes active to
+ # ensure correct details.
+ server = client.servers.get(server.id)
+ self.set_resource(name, server)
+ return server
+
+ def _create_floating_ip(self, server, external_network_id):
+ result = self.network_client.list_ports(device_id=server.id)
+ ports = result.get('ports', [])
+ self.assertEqual(len(ports), 1,
+ "Unable to determine which port to target.")
+ port_id = ports[0]['id']
+ body = dict(
+ floatingip=dict(
+ floating_network_id=external_network_id,
+ port_id=port_id,
+ tenant_id=server.tenant_id,
+ )
+ )
+ result = self.network_client.create_floatingip(body=body)
+ floating_ip = net_common.DeletableFloatingIp(
+ client=self.network_client,
+ **result['floatingip'])
+ self.set_resource(rand_name('floatingip-'), floating_ip)
+ return floating_ip
+
+ def _ping_ip_address(self, ip_address):
+ cmd = ['ping', '-c1', '-w1', ip_address]
+
+ def ping():
+ proc = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc.wait()
+ if proc.returncode == 0:
+ return True
+
+ # TODO(mnewby) Allow configuration of execution and sleep duration.
+ return tempest.test.call_until_true(ping, 20, 1)
diff --git a/tempest/tests/network/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
similarity index 98%
rename from tempest/tests/network/test_network_basic_ops.py
rename to tempest/scenario/test_network_basic_ops.py
index 92ca65f..ee2dc0d 100644
--- a/tempest/tests/network/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -17,11 +17,12 @@
# under the License.
from tempest.common.utils.data_utils import rand_name
+from tempest.scenario import manager
from tempest.test import attr
-import tempest.tests.network.common as net_common
+from tempest.tests.network import common as net_common
-class TestNetworkBasicOps(net_common.TestNetworkSmokeCommon):
+class TestNetworkBasicOps(manager.NetworkScenarioTest):
"""
This smoke test suite assumes that Nova has been configured to
diff --git a/tempest/tests/network/test_network_quota_basic.py b/tempest/scenario/test_network_quotas.py
similarity index 95%
rename from tempest/tests/network/test_network_quota_basic.py
rename to tempest/scenario/test_network_quotas.py
index eaec708..8c3af73 100644
--- a/tempest/tests/network/test_network_quota_basic.py
+++ b/tempest/scenario/test_network_quotas.py
@@ -16,12 +16,12 @@
# under the License.
from quantumclient.common import exceptions as exc
-from tempest.tests.network.common import TestNetworkSmokeCommon
+from tempest.scenario.manager import NetworkScenarioTest
MAX_REASONABLE_ITERATIONS = 51 # more than enough. Default for port is 50.
-class TestNetworkQuotaBasic(TestNetworkSmokeCommon):
+class TestNetworkQuotaBasic(NetworkScenarioTest):
"""
This test suite contains tests that each loop trying to grab a
particular resource until a quota limit is hit.
diff --git a/tempest/tests/compute/servers/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
similarity index 87%
rename from tempest/tests/compute/servers/test_server_advanced_ops.py
rename to tempest/scenario/test_server_advanced_ops.py
index 8be9c54..e48157e 100644
--- a/tempest/tests/compute/servers/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -19,12 +19,12 @@
from tempest.common.utils.data_utils import rand_name
-from tempest import test
+from tempest.scenario import manager
LOG = logging.getLogger(__name__)
-class TestServerAdvancedOps(test.DefaultClientSmokeTest):
+class TestServerAdvancedOps(manager.OfficialClientTest):
"""
This test case stresses some advanced server instance operations:
@@ -66,18 +66,18 @@
self.assertEqual(self.instance.status, 'BUILD')
instance_id = self.get_resource('instance').id
- test.status_timeout(
- self, self.compute_client.servers, instance_id, 'ACTIVE')
+ self.status_timeout(
+ self.compute_client.servers, instance_id, 'ACTIVE')
instance = self.get_resource('instance')
instance_id = instance.id
resize_flavor = self.config.compute.flavor_ref_alt
LOG.debug("Resizing instance %s from flavor %s to flavor %s",
instance.id, instance.flavor, resize_flavor)
instance.resize(resize_flavor)
- test.status_timeout(self, self.compute_client.servers, instance_id,
+ self.status_timeout(self.compute_client.servers, instance_id,
'VERIFY_RESIZE')
LOG.debug("Confirming resize of instance %s", instance_id)
instance.confirm_resize()
- test.status_timeout(
- self, self.compute_client.servers, instance_id, 'ACTIVE')
+ self.status_timeout(
+ self.compute_client.servers, instance_id, 'ACTIVE')
diff --git a/tempest/tests/compute/servers/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
similarity index 90%
rename from tempest/tests/compute/servers/test_server_basic_ops.py
rename to tempest/scenario/test_server_basic_ops.py
index e4e246a..c5c6728 100644
--- a/tempest/tests/compute/servers/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -18,12 +18,12 @@
import logging
from tempest.common.utils.data_utils import rand_name
-from tempest import test
+from tempest.scenario import manager
LOG = logging.getLogger(__name__)
-class TestServerBasicOps(test.DefaultClientSmokeTest):
+class TestServerBasicOps(manager.OfficialClientTest):
"""
This smoke test case follows this basic set of operations:
@@ -101,8 +101,8 @@
def wait_on_active(self):
instance_id = self.get_resource('instance').id
- test.status_timeout(
- self, self.compute_client.servers, instance_id, 'ACTIVE')
+ self.status_timeout(
+ self.compute_client.servers, instance_id, 'ACTIVE')
def pause_server(self):
instance = self.get_resource('instance')
@@ -110,8 +110,8 @@
LOG.debug("Pausing instance %s. Current status: %s",
instance_id, instance.status)
instance.pause()
- test.status_timeout(
- self, self.compute_client.servers, instance_id, 'PAUSED')
+ self.status_timeout(
+ self.compute_client.servers, instance_id, 'PAUSED')
def unpause_server(self):
instance = self.get_resource('instance')
@@ -119,8 +119,8 @@
LOG.debug("Unpausing instance %s. Current status: %s",
instance_id, instance.status)
instance.unpause()
- test.status_timeout(
- self, self.compute_client.servers, instance_id, 'ACTIVE')
+ self.status_timeout(
+ self.compute_client.servers, instance_id, 'ACTIVE')
def suspend_server(self):
instance = self.get_resource('instance')
@@ -128,7 +128,7 @@
LOG.debug("Suspending instance %s. Current status: %s",
instance_id, instance.status)
instance.suspend()
- test.status_timeout(self, self.compute_client.servers,
+ self.status_timeout(self.compute_client.servers,
instance_id, 'SUSPENDED')
def resume_server(self):
@@ -137,8 +137,8 @@
LOG.debug("Resuming instance %s. Current status: %s",
instance_id, instance.status)
instance.resume()
- test.status_timeout(
- self, self.compute_client.servers, instance_id, 'ACTIVE')
+ self.status_timeout(
+ self.compute_client.servers, instance_id, 'ACTIVE')
def terminate_instance(self):
instance = self.get_resource('instance')
diff --git a/tempest/services/compute/json/services_client.py b/tempest/services/compute/json/services_client.py
new file mode 100644
index 0000000..d054f72
--- /dev/null
+++ b/tempest/services/compute/json/services_client.py
@@ -0,0 +1,33 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from tempest.common.rest_client import RestClient
+
+
+class ServicesClientJSON(RestClient):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(ServicesClientJSON, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.compute.catalog_type
+
+ def list_services(self):
+ resp, body = self.get("os-services")
+ body = json.loads(body)
+ return resp, body['services']
diff --git a/tempest/services/compute/xml/aggregates_client.py b/tempest/services/compute/xml/aggregates_client.py
new file mode 100644
index 0000000..0ef8e22
--- /dev/null
+++ b/tempest/services/compute/xml/aggregates_client.py
@@ -0,0 +1,103 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+
+from tempest.common.rest_client import RestClientXML
+from tempest import exceptions
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import xml_to_json
+
+
+class AggregatesClientXML(RestClientXML):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(AggregatesClientXML, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.compute.catalog_type
+
+ def _format_aggregate(self, g):
+ agg = xml_to_json(g)
+ aggregate = {}
+ for key, value in agg.items():
+ if key == 'hosts':
+ aggregate['hosts'] = []
+ for k, v in value.items():
+ aggregate['hosts'].append(v)
+ elif key == 'availability_zone':
+ aggregate[key] = None if value == 'None' else value
+ else:
+ aggregate[key] = value
+ return aggregate
+
+ def _parse_array(self, node):
+ return [self._format_aggregate(x) for x in node]
+
+ def list_aggregates(self):
+ """Get aggregate list."""
+ resp, body = self.get("os-aggregates", self.headers)
+ aggregates = self._parse_array(etree.fromstring(body))
+ return resp, aggregates
+
+ def get_aggregate(self, aggregate_id):
+ """Get details of the given aggregate."""
+ resp, body = self.get("os-aggregates/%s" % str(aggregate_id),
+ self.headers)
+ aggregate = self._format_aggregate(etree.fromstring(body))
+ return resp, aggregate
+
+ def create_aggregate(self, name, availability_zone=None):
+ """Creates a new aggregate."""
+ post_body = Element("aggregate",
+ name=name,
+ availability_zone=availability_zone)
+ resp, body = self.post('os-aggregates',
+ str(Document(post_body)),
+ self.headers)
+ aggregate = self._format_aggregate(etree.fromstring(body))
+ return resp, aggregate
+
+ def delete_aggregate(self, aggregate_id):
+ """Deletes the given aggregate."""
+ return self.delete("os-aggregates/%s" % str(aggregate_id),
+ self.headers)
+
+ def is_resource_deleted(self, id):
+ try:
+ self.get_aggregate(id)
+ except exceptions.NotFound:
+ return True
+ return False
+
+ def add_host(self, aggregate_id, host):
+ """Adds a host to the given aggregate."""
+ post_body = Element("add_host", host=host)
+ resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
+ str(Document(post_body)),
+ self.headers)
+ aggregate = self._format_aggregate(etree.fromstring(body))
+ return resp, aggregate
+
+ def remove_host(self, aggregate_id, host):
+ """Removes a host from the given aggregate."""
+ post_body = Element("remove_host", host=host)
+ resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
+ str(Document(post_body)),
+ self.headers)
+ aggregate = self._format_aggregate(etree.fromstring(body))
+ return resp, aggregate
diff --git a/tempest/services/compute/xml/services_client.py b/tempest/services/compute/xml/services_client.py
new file mode 100644
index 0000000..ce23403
--- /dev/null
+++ b/tempest/services/compute/xml/services_client.py
@@ -0,0 +1,34 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from tempest.common.rest_client import RestClientXML
+from tempest.services.compute.xml.common import xml_to_json
+
+
+class ServicesClientXML(RestClientXML):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(ServicesClientXML, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.compute.catalog_type
+
+ def list_services(self):
+ resp, body = self.get("os-services", self.headers)
+ node = etree.fromstring(body)
+ body = [xml_to_json(x) for x in node.getchildren()]
+ return resp, body
diff --git a/tempest/tests/boto/__init__.py b/tempest/services/orchestration/__init__.py
similarity index 100%
copy from tempest/tests/boto/__init__.py
copy to tempest/services/orchestration/__init__.py
diff --git a/tempest/tests/boto/__init__.py b/tempest/services/orchestration/json/__init__.py
similarity index 100%
copy from tempest/tests/boto/__init__.py
copy to tempest/services/orchestration/json/__init__.py
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
new file mode 100644
index 0000000..81162df
--- /dev/null
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -0,0 +1,99 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import time
+import urllib
+
+from tempest.common import rest_client
+from tempest import exceptions
+
+
+class OrchestrationClient(rest_client.RestClient):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(OrchestrationClient, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.orchestration.catalog_type
+ self.build_interval = self.config.orchestration.build_interval
+ self.build_timeout = self.config.orchestration.build_timeout
+
+ def list_stacks(self, params=None):
+ """Lists all stacks for a user."""
+
+ uri = 'stacks'
+ if params:
+ uri += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(uri)
+ body = json.loads(body)
+ return resp, body
+
+ def create_stack(self, name, disable_rollback=True, parameters={},
+ timeout_mins=60, template=None, template_url=None):
+ post_body = {
+ "stack_name": name,
+ "disable_rollback": disable_rollback,
+ "parameters": parameters,
+ "timeout_mins": timeout_mins,
+ "template": "HeatTemplateFormatVersion: '2012-12-12'\n"
+ }
+ if template:
+ post_body['template'] = template
+ if template_url:
+ post_body['template_url'] = template_url
+ body = json.dumps(post_body)
+ uri = 'stacks'
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ return resp, body
+
+ def get_stack(self, stack_identifier):
+ """Returns the details of a single stack."""
+ url = "stacks/%s" % stack_identifier
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['stack']
+
+ def delete_stack(self, stack_identifier):
+ """Deletes the specified Stack."""
+ return self.delete("stacks/%s" % str(stack_identifier))
+
+ def wait_for_stack_status(self, stack_identifier, status, failure_status=(
+ 'CREATE_FAILED',
+ 'DELETE_FAILED',
+ 'UPDATE_FAILED',
+ 'ROLLBACK_FAILED')):
+ """Waits for a Volume to reach a given status."""
+ stack_status = None
+ start = int(time.time())
+
+ while stack_status != status:
+ resp, body = self.get_stack(stack_identifier)
+ stack_name = body['stack_name']
+ stack_status = body['stack_status']
+ if stack_status in failure_status:
+ raise exceptions.StackBuildErrorException(
+ stack_identifier=stack_identifier,
+ stack_status=stack_status,
+ stack_status_reason=body['stack_status_reason'])
+
+ if int(time.time()) - start >= self.build_timeout:
+ message = ('Stack %s failed to reach %s status within '
+ 'the required time (%s s).' %
+ (stack_name, status, self.build_timeout))
+ raise exceptions.TimeoutException(message)
+ time.sleep(self.build_interval)
diff --git a/tempest/stress/README.rst b/tempest/stress/README.rst
new file mode 100644
index 0000000..2c431ed
--- /dev/null
+++ b/tempest/stress/README.rst
@@ -0,0 +1,47 @@
+Quanta Research Cambridge OpenStack Stress Test System
+======================================================
+
+Nova is a distributed, asynchronous system that is prone to race condition
+bugs. These bugs will not be easily found during
+functional testing but will be encountered by users in large deployments in a
+way that is hard to debug. The stress test tries to cause these bugs to happen
+in a more controlled environment.
+
+
+Environment
+------------
+This particular framework assumes your working Nova cluster understands Nova
+API 2.0. The stress tests can read the logs from the cluster. To enable this
+you have to provide the hostname to call 'nova-manage' and
+the private key and user name for ssh to the cluster in the
+[stress] section of tempest.conf. You also need to provide the
+location of the log files:
+
+ target_logfiles = "regexp to all log files to be checked for errors"
+ target_private_key_path = "private ssh key for controller and log file nodes"
+ target_ssh_user = "username for controller and log file nodes"
+ target_controller = "hostname or ip of controller node (for nova-manage)
+ log_check_interval = "time between checking logs for errors (default 60s)"
+
+
+
+Running the sample test
+-----------------------
+
+To test installation, do the following (from the tempest/stress directory):
+
+ ./run_stress.py etc/sample-test.json -d 30
+
+This sample test tries to create a few VMs and kill a few VMs.
+
+
+Additional Tools
+----------------
+
+Sometimes the tests don't finish, or there are failures. In these
+cases, you may want to clean out the nova cluster. We have provided
+some scripts to do this in the ``tools`` subdirectory.
+You can use the following script to destroy any keypairs,
+floating ips, and servers:
+
+tempest/stress/tools/cleanup.py
diff --git a/stress/__init__.py b/tempest/stress/__init__.py
similarity index 75%
copy from stress/__init__.py
copy to tempest/stress/__init__.py
index 0875e0b..1caf74a 100644
--- a/stress/__init__.py
+++ b/tempest/stress/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
+# Copyright 2013 Quanta Research Cambridge, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,7 +11,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Basic framework for constructing various simulated workloads for a
-nova cluster."""
-
-__author__ = "David Kranz and Eugene Shih"
diff --git a/stress/__init__.py b/tempest/stress/actions/__init__.py
similarity index 75%
copy from stress/__init__.py
copy to tempest/stress/actions/__init__.py
index 0875e0b..1caf74a 100644
--- a/stress/__init__.py
+++ b/tempest/stress/actions/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
+# Copyright 2013 Quanta Research Cambridge, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,7 +11,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Basic framework for constructing various simulated workloads for a
-nova cluster."""
-
-__author__ = "David Kranz and Eugene Shih"
diff --git a/tempest/stress/actions/create_destroy_server.py b/tempest/stress/actions/create_destroy_server.py
new file mode 100644
index 0000000..44b149f
--- /dev/null
+++ b/tempest/stress/actions/create_destroy_server.py
@@ -0,0 +1,34 @@
+# Copyright 2013 Quanta Research Cambridge, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.common.utils.data_utils import rand_name
+
+
+def create_destroy(manager, logger):
+ image = manager.config.compute.image_ref
+ flavor = manager.config.compute.flavor_ref
+ while True:
+ name = rand_name("instance")
+ logger.info("creating %s" % name)
+ resp, server = manager.servers_client.create_server(
+ name, image, flavor)
+ server_id = server['id']
+ assert(resp.status == 202)
+ manager.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+ logger.info("created %s" % server_id)
+ logger.info("deleting %s" % name)
+ resp, _ = manager.servers_client.delete_server(server_id)
+ assert(resp.status == 204)
+ manager.servers_client.wait_for_server_termination(server_id)
+ logger.info("deleted %s" % server_id)
diff --git a/tempest/stress/cleanup.py b/tempest/stress/cleanup.py
new file mode 100644
index 0000000..b2cb70a
--- /dev/null
+++ b/tempest/stress/cleanup.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Quanta Research Cambridge, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest import clients
+
+
+def cleanup():
+ admin_manager = clients.AdminManager()
+
+ _, body = admin_manager.servers_client.list_servers({"all_tenants": True})
+ for s in body['servers']:
+ try:
+ admin_manager.servers_client.delete_server(s['id'])
+ except Exception:
+ pass
+
+ for s in body['servers']:
+ try:
+ admin_manager.servers_client.wait_for_server_termination(s['id'])
+ except Exception:
+ pass
+
+ _, keypairs = admin_manager.keypairs_client.list_keypairs()
+ for k in keypairs:
+ try:
+ admin_manager.keypairs_client.delete_keypair(k['name'])
+ except Exception:
+ pass
+
+ _, floating_ips = admin_manager.floating_ips_client.list_floating_ips()
+ for f in floating_ips:
+ try:
+ admin_manager.floating_ips_client.delete_floating_ip(f['id'])
+ except Exception:
+ pass
+
+ _, users = admin_manager.identity_client.get_users()
+ for user in users:
+ if user['name'].startswith("stress_user"):
+ admin_manager.identity_client.delete_user(user['id'])
+
+ _, tenants = admin_manager.identity_client.list_tenants()
+ for tenant in tenants:
+ if tenant['name'].startswith("stress_tenant"):
+ admin_manager.identity_client.delete_tenant(tenant['id'])
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
new file mode 100644
index 0000000..51f159d
--- /dev/null
+++ b/tempest/stress/driver.py
@@ -0,0 +1,156 @@
+# Copyright 2013 Quanta Research Cambridge, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import importlib
+import logging
+import multiprocessing
+import time
+
+from tempest import clients
+from tempest.common import ssh
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+from tempest.stress import cleanup
+
+admin_manager = clients.AdminManager()
+
+# setup logging to file
+logging.basicConfig(
+ format='%(asctime)s %(process)d %(name)-20s %(levelname)-8s %(message)s',
+ datefmt='%m-%d %H:%M:%S',
+ filename="stress.debug.log",
+ filemode="w",
+ level=logging.DEBUG,
+)
+
+# define a Handler which writes INFO messages or higher to the sys.stdout
+_console = logging.StreamHandler()
+_console.setLevel(logging.INFO)
+# set a format which is simpler for console use
+format_str = '%(asctime)s %(process)d %(name)-20s: %(levelname)-8s %(message)s'
+_formatter = logging.Formatter(format_str)
+# tell the handler to use this format
+_console.setFormatter(_formatter)
+# add the handler to the root logger
+logger = logging.getLogger('tempest.stress')
+logger.addHandler(_console)
+
+
+def do_ssh(command, host):
+ username = admin_manager.config.stress.target_ssh_user
+ key_filename = admin_manager.config.stress.target_private_key_path
+ if not (username and key_filename):
+ return None
+ ssh_client = ssh.Client(host, username, key_filename=key_filename)
+ try:
+ return ssh_client.exec_command(command)
+ except exceptions.SSHExecCommandFailed:
+ return None
+
+
+def _get_compute_nodes(controller):
+ """
+ Returns a list of active compute nodes. List is generated by running
+ nova-manage on the controller.
+ """
+ nodes = []
+ cmd = "nova-manage service list | grep ^nova-compute"
+ output = do_ssh(cmd, controller)
+ if not output:
+ return nodes
+ # For example: nova-compute xg11eth0 nova enabled :-) 2011-10-31 18:57:46
+ # This is fragile but there is, at present, no other way to get this info.
+ for line in output.split('\n'):
+ words = line.split()
+ if len(words) > 0 and words[4] == ":-)":
+ nodes.append(words[1])
+ return nodes
+
+
+def _error_in_logs(logfiles, nodes):
+ """
+ Detect errors in the nova log files on the controller and compute nodes.
+ """
+ grep = 'egrep "ERROR|TRACE" %s' % logfiles
+ for node in nodes:
+ errors = do_ssh(grep, node)
+ if not errors:
+ return None
+ if len(errors) > 0:
+ logger.error('%s: %s' % (node, errors))
+ return errors
+ return None
+
+
+def get_action_function(path):
+ (module_part, _, function) = path.rpartition('.')
+ return getattr(importlib.import_module(module_part), function)
+
+
+def stress_openstack(tests, duration):
+ """
+ Workload driver. Executes an action function against a nova-cluster.
+
+ """
+ logfiles = admin_manager.config.stress.target_logfiles
+ log_check_interval = int(admin_manager.config.stress.log_check_interval)
+ if logfiles:
+ controller = admin_manager.config.stress.target_controller
+ computes = _get_compute_nodes(controller)
+ for node in computes:
+ do_ssh("rm -f %s" % logfiles, node)
+ processes = []
+ for test in tests:
+ if test.get('use_admin', False):
+ manager = admin_manager
+ else:
+ manager = clients.Manager()
+ for _ in xrange(test.get('threads', 1)):
+ if test.get('use_isolated_tenants', False):
+ username = rand_name("stress_user")
+ tenant_name = rand_name("stress_tenant")
+ password = "pass"
+ identity_client = admin_manager.identity_client
+ _, tenant = identity_client.create_tenant(name=tenant_name)
+ identity_client.create_user(username,
+ password,
+ tenant['id'],
+ "email")
+ manager = clients.Manager(username=username,
+ password="pass",
+ tenant_name=tenant_name)
+ target = get_action_function(test['action'])
+ p = multiprocessing.Process(target=target,
+ args=(manager, logger),
+ kwargs=test.get('kwargs', {}))
+ processes.append(p)
+ p.start()
+ end_time = time.time() + duration
+ had_errors = False
+ while True:
+ remaining = end_time - time.time()
+ if remaining <= 0:
+ break
+ time.sleep(min(remaining, log_check_interval))
+ if not logfiles:
+ continue
+ errors = _error_in_logs(logfiles, computes)
+ if errors:
+ had_errors = True
+ break
+ for p in processes:
+ p.terminate()
+ if not had_errors:
+ logger.info("cleaning up")
+ cleanup.cleanup()
diff --git a/tempest/stress/etc/sample-test.json b/tempest/stress/etc/sample-test.json
new file mode 100644
index 0000000..5a0189c
--- /dev/null
+++ b/tempest/stress/etc/sample-test.json
@@ -0,0 +1,7 @@
+[{"action": "tempest.stress.actions.create_destroy_server.create_destroy",
+ "threads": 8,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {}
+ }
+]
diff --git a/tempest/stress/run_stress.py b/tempest/stress/run_stress.py
new file mode 100755
index 0000000..ef0ec8e
--- /dev/null
+++ b/tempest/stress/run_stress.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Quanta Research Cambridge, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import json
+
+from tempest.stress import driver
+
+
+def main(ns):
+ tests = json.load(open(ns.tests, 'r'))
+ driver.stress_openstack(tests, ns.duration)
+
+
+parser = argparse.ArgumentParser(description='Run stress tests. ')
+parser.add_argument('-d', '--duration', default=300, type=int,
+ help="Duration of test.")
+parser.add_argument('tests', help="Name of the file with test description.")
+main(parser.parse_args())
diff --git a/stress/__init__.py b/tempest/stress/tools/cleanup.py
old mode 100644
new mode 100755
similarity index 75%
rename from stress/__init__.py
rename to tempest/stress/tools/cleanup.py
index 0875e0b..7139d6c
--- a/stress/__init__.py
+++ b/tempest/stress/tools/cleanup.py
@@ -1,4 +1,6 @@
-# Copyright 2011 Quanta Research Cambridge, Inc.
+#!/usr/bin/env python
+
+# Copyright 2013 Quanta Research Cambridge, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,7 +13,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Basic framework for constructing various simulated workloads for a
-nova cluster."""
-__author__ = "David Kranz and Eugene Shih"
+from tempest.stress import cleanup
+
+
+cleanup.cleanup()
diff --git a/tempest/test.py b/tempest/test.py
index 4db9827..b7f4b9b 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -37,13 +37,12 @@
"""
def decorator(f):
- testtool_attributes = ('smoke')
-
- if 'type' in kwargs and kwargs['type'] in testtool_attributes:
- return nose.plugins.attrib.attr(*args, **kwargs)(
- testtools.testcase.attr(kwargs['type'])(f))
- else:
- return nose.plugins.attrib.attr(*args, **kwargs)(f)
+ if 'type' in kwargs and isinstance(kwargs['type'], str):
+ f = testtools.testcase.attr(kwargs['type'])(f)
+ elif 'type' in kwargs and isinstance(kwargs['type'], list):
+ for attr in kwargs['type']:
+ f = testtools.testcase.attr(attr)(f)
+ return nose.plugins.attrib.attr(*args, **kwargs)(f)
return decorator
@@ -51,10 +50,8 @@
class BaseTestCase(testtools.TestCase,
testtools.testcase.WithAttributes,
testresources.ResourcedTestCase):
- def __init__(self, *args, **kwargs):
- super(BaseTestCase, self).__init__(*args, **kwargs)
- #NOTE(afazekas): inspection workaround
- BaseTestCase.config = config.TempestConfig()
+
+ config = config.TempestConfig()
@classmethod
def setUpClass(cls):
@@ -62,6 +59,29 @@
super(BaseTestCase, cls).setUpClass()
+def call_until_true(func, duration, sleep_for):
+ """
+ Call the given function until it returns True (and return True) or
+ until the specified duration (in seconds) elapses (and return
+ False).
+
+ :param func: A zero argument callable that returns True on success.
+ :param duration: The number of seconds for which to attempt a
+ successful call of the function.
+ :param sleep_for: The number of seconds to sleep after an unsuccessful
+ invocation of the function.
+ """
+ now = time.time()
+ timeout = now + duration
+ while now < timeout:
+ if func():
+ return True
+ LOG.debug("Sleeping for %d seconds", sleep_for)
+ time.sleep(sleep_for)
+ now = time.time()
+ return False
+
+
class TestCase(BaseTestCase):
"""Base test case class for all Tempest tests
@@ -96,117 +116,36 @@
self.os_resources.remove(thing)
del self.resource_keys[key]
-
-def call_until_true(func, duration, sleep_for):
- """
- Call the given function until it returns True (and return True) or
- until the specified duration (in seconds) elapses (and return
- False).
-
- :param func: A zero argument callable that returns True on success.
- :param duration: The number of seconds for which to attempt a successful
- call of the function.
- :param sleep_for: The number of seconds to sleep after an unsuccessful
- invocation of the function.
- """
- now = time.time()
- timeout = now + duration
- while now < timeout:
- if func():
- return True
- LOG.debug("Sleeping for %d seconds", sleep_for)
- time.sleep(sleep_for)
- now = time.time()
- return False
-
-
-def status_timeout(testcase, things, thing_id, expected_status):
- """
- Given a thing and an expected status, do a loop, sleeping
- for a configurable amount of time, checking for the
- expected status to show. At any time, if the returned
- status of the thing is ERROR, fail out.
- """
- def check_status():
- # python-novaclient has resources available to its client
- # that all implement a get() method taking an identifier
- # for the singular resource to retrieve.
- thing = things.get(thing_id)
- new_status = thing.status
- if new_status == 'ERROR':
- testcase.fail("%s failed to get to expected status."
+ def status_timeout(self, things, thing_id, expected_status):
+ """
+ Given a thing and an expected status, do a loop, sleeping
+ for a configurable amount of time, checking for the
+ expected status to show. At any time, if the returned
+ status of the thing is ERROR, fail out.
+ """
+ def check_status():
+ # python-novaclient has resources available to its client
+ # that all implement a get() method taking an identifier
+ # for the singular resource to retrieve.
+ thing = things.get(thing_id)
+ new_status = thing.status
+ if new_status == 'ERROR':
+ self.fail("%s failed to get to expected status."
"In ERROR state."
% thing)
- elif new_status == expected_status:
- return True # All good.
- LOG.debug("Waiting for %s to get to %s status. "
- "Currently in %s status",
- thing, expected_status, new_status)
- conf = config.TempestConfig()
- if not call_until_true(check_status,
- conf.compute.build_timeout,
- conf.compute.build_interval):
- testcase.fail("Timed out waiting for thing %s to become %s"
+ elif new_status == expected_status:
+ return True # All good.
+ LOG.debug("Waiting for %s to get to %s status. "
+ "Currently in %s status",
+ thing, expected_status, new_status)
+ conf = config.TempestConfig()
+ if not call_until_true(check_status,
+ conf.compute.build_timeout,
+ conf.compute.build_interval):
+ self.fail("Timed out waiting for thing %s to become %s"
% (thing_id, expected_status))
-class DefaultClientSmokeTest(TestCase):
-
- """
- Base smoke test case class that provides the default clients to
- access the various OpenStack APIs.
-
- Smoke tests are tests that have the following characteristics:
-
- * Test basic operations of an API, typically in an order that
- a regular user would perform those operations
- * Test only the correct inputs and action paths -- no fuzz or
- random input data is sent, only valid inputs.
- * Use only the default client tool for calling an API
- """
-
- manager_class = manager.DefaultClientManager
-
- @classmethod
- def tearDownClass(cls):
- # NOTE(jaypipes): Because smoke tests are typically run in a specific
- # order, and because test methods in smoke tests generally create
- # resources in a particular order, we destroy resources in the reverse
- # order in which resources are added to the smoke test class object
- while cls.os_resources:
- thing = cls.os_resources.pop()
- LOG.debug("Deleting %r from shared resources of %s" %
- (thing, cls.__name__))
-
- try:
- # OpenStack resources are assumed to have a delete()
- # method which destroys the resource...
- thing.delete()
- except Exception as e:
- # If the resource is already missing, mission accomplished.
- if e.__class__.__name__ == 'NotFound':
- continue
- raise
-
- def is_deletion_complete():
- # Deletion testing is only required for objects whose
- # existence cannot be checked via retrieval.
- if isinstance(thing, dict):
- return True
- try:
- thing.get()
- except Exception as e:
- # Clients are expected to return an exception
- # called 'NotFound' if retrieval fails.
- if e.__class__.__name__ == 'NotFound':
- return True
- raise
- return False
-
- # Block until resource deletion has completed or timed-out
- call_until_true(is_deletion_complete, 10, 1)
-
-
class ComputeFuzzClientTest(TestCase):
"""
diff --git a/tempest/tests/compute/__init__.py b/tempest/tests/compute/__init__.py
index 36893e3..968f17e 100644
--- a/tempest/tests/compute/__init__.py
+++ b/tempest/tests/compute/__init__.py
@@ -28,7 +28,6 @@
CREATE_IMAGE_ENABLED = CONFIG.compute.create_image_enabled
RESIZE_AVAILABLE = CONFIG.compute.resize_available
CHANGE_PASSWORD_AVAILABLE = CONFIG.compute.change_password_available
-WHITEBOX_ENABLED = CONFIG.whitebox.whitebox_enabled
DISK_CONFIG_ENABLED = True
DISK_CONFIG_ENABLED_OVERRIDE = CONFIG.compute.disk_config_enabled_override
FLAVOR_EXTRA_DATA_ENABLED = True
diff --git a/tempest/tests/compute/admin/test_aggregates.py b/tempest/tests/compute/admin/test_aggregates.py
index 06acc41..07df77f 100644
--- a/tempest/tests/compute/admin/test_aggregates.py
+++ b/tempest/tests/compute/admin/test_aggregates.py
@@ -27,13 +27,14 @@
Tests Aggregates API that require admin privileges
"""
+ _host_key = 'OS-EXT-SRV-ATTR:host'
_interface = 'json'
@classmethod
def setUpClass(cls):
super(AggregatesAdminTestJSON, cls).setUpClass()
cls.client = cls.os_adm.aggregates_client
- cls.user_client = cls.os.aggregates_client
+ cls.user_client = cls.aggregates_client
cls.aggregate_name_prefix = 'test_aggregate_'
cls.az_name_prefix = 'test_az_'
@@ -212,7 +213,7 @@
availability_zone=az_name)
servers_client.wait_for_server_status(server['id'], 'ACTIVE')
resp, body = admin_servers_client.get_server(server['id'])
- self.assertEqual(self.host, body['OS-EXT-SRV-ATTR:host'])
+ self.assertEqual(self.host, body[self._host_key])
@attr(type='negative')
def test_aggregate_add_non_exist_host(self):
@@ -254,3 +255,9 @@
self.assertRaises(exceptions.Unauthorized,
self.user_client.remove_host,
aggregate['id'], self.host)
+
+
+class AggregatesAdminTestXML(AggregatesAdminTestJSON):
+ _host_key = (
+ '{http://docs.openstack.org/compute/ext/extended_status/api/v1.1}host')
+ _interface = 'xml'
diff --git a/tempest/tests/compute/admin/test_quotas.py b/tempest/tests/compute/admin/test_quotas.py
index e905b19..7160aed 100644
--- a/tempest/tests/compute/admin/test_quotas.py
+++ b/tempest/tests/compute/admin/test_quotas.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest.test import attr
from tempest.tests.compute import base
@@ -30,6 +31,7 @@
cls.client = cls.os.quotas_client
cls.adm_client = cls.os_adm.quotas_client
cls.identity_admin_client = cls._get_identity_admin_client()
+ cls.sg_client = cls.security_groups_client
resp, tenants = cls.identity_admin_client.list_tenants()
@@ -177,6 +179,60 @@
instances=default_instances_quota)
self.assertRaises(exceptions.OverLimit, self.create_server)
+ @attr(type='negative')
+ def test_security_groups_exceed_limit(self):
+ # Negative test: Creation Security Groups over limit should FAIL
+
+ resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
+ default_sg_quota = quota_set['security_groups']
+ sg_quota = 0 # Set the quota to zero to conserve resources
+
+ resp, quota_set =\
+ self.adm_client.update_quota_set(self.demo_tenant_id,
+ security_groups=sg_quota)
+
+ self.addCleanup(self.adm_client.update_quota_set,
+ self.demo_tenant_id,
+ security_groups=default_sg_quota)
+
+ # Check we cannot create anymore
+ self.assertRaises(exceptions.OverLimit,
+ self.sg_client.create_security_group,
+ "sg-overlimit", "sg-desc")
+
+ @attr(type='negative')
+ def test_security_groups_rules_exceed_limit(self):
+ # Negative test: Creation of Security Group Rules should FAIL
+ # when we reach limit maxSecurityGroupRules
+
+ resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
+ default_sg_rules_quota = quota_set['security_group_rules']
+ sg_rules_quota = 0 # Set the quota to zero to conserve resources
+
+ resp, quota_set =\
+ self.adm_client.update_quota_set(
+ self.demo_tenant_id,
+ security_group_rules=sg_rules_quota)
+
+ self.addCleanup(self.adm_client.update_quota_set,
+ self.demo_tenant_id,
+ security_group_rules=default_sg_rules_quota)
+
+ s_name = rand_name('securitygroup-')
+ s_description = rand_name('description-')
+ resp, securitygroup =\
+ self.sg_client.create_security_group(s_name, s_description)
+ self.addCleanup(self.sg_client.delete_security_group,
+ securitygroup['id'])
+
+ secgroup_id = securitygroup['id']
+ ip_protocol = 'tcp'
+
+ # Check we cannot create SG rule anymore
+ self.assertRaises(exceptions.OverLimit,
+ self.sg_client.create_security_group_rule,
+ secgroup_id, ip_protocol, 1025, 1025)
+
class QuotasAdminTestXML(QuotasAdminTestJSON):
_interface = 'xml'
diff --git a/tempest/tests/compute/admin/test_services.py b/tempest/tests/compute/admin/test_services.py
new file mode 100644
index 0000000..0577164
--- /dev/null
+++ b/tempest/tests/compute/admin/test_services.py
@@ -0,0 +1,52 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import exceptions
+from tempest.test import attr
+from tempest.tests.compute import base
+
+
+class ServicesAdminTestJSON(base.BaseComputeAdminTest):
+
+ """
+ Tests Services API. List and Enable/Disable require admin privileges.
+ """
+
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(ServicesAdminTestJSON, cls).setUpClass()
+ cls.client = cls.os_adm.services_client
+ cls.non_admin_client = cls.services_client
+
+ @attr(type='positive')
+ def test_list_services(self):
+ # List Compute services
+ resp, services = self.client.list_services()
+ self.assertEqual(200, resp.status)
+ self.assertTrue(len(services) >= 2)
+
+ @attr(type='negative')
+ def test_list_services_with_non_admin_user(self):
+ # List Compute service with non admin user
+ self.assertRaises(exceptions.Unauthorized,
+ self.non_admin_client.list_services)
+
+
+class ServicesAdminTestXML(ServicesAdminTestJSON):
+ _interface = 'xml'
diff --git a/tempest/tests/compute/base.py b/tempest/tests/compute/base.py
index 221cfb6..fbefe35 100644
--- a/tempest/tests/compute/base.py
+++ b/tempest/tests/compute/base.py
@@ -62,6 +62,8 @@
cls.interfaces_client = os.interfaces_client
cls.fixed_ips_client = os.fixed_ips_client
cls.availability_zone_client = os.availability_zone_client
+ cls.aggregates_client = os.aggregates_client
+ cls.services_client = os.services_client
cls.build_interval = cls.config.compute.build_interval
cls.build_timeout = cls.config.compute.build_timeout
cls.ssh_user = cls.config.compute.ssh_user
diff --git a/tempest/tests/compute/servers/test_attach_interfaces.py b/tempest/tests/compute/servers/test_attach_interfaces.py
index 5e447c4..c7d4fa0 100644
--- a/tempest/tests/compute/servers/test_attach_interfaces.py
+++ b/tempest/tests/compute/servers/test_attach_interfaces.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest import clients
from tempest.tests.compute import base
import time
@@ -24,11 +23,10 @@
@classmethod
def setUpClass(cls):
- super(AttachInterfacesTestJSON, cls).setUpClass()
- os = clients.Manager(interface=cls._interface)
- if not os.config.network.quantum_available:
+ if not cls.config.network.quantum_available:
raise cls.skipException("Quantum is required")
- cls.client = os.interfaces_client
+ super(AttachInterfacesTestJSON, cls).setUpClass()
+ cls.client = cls.os.interfaces_client
def _check_interface(self, iface, port_id=None, network_id=None,
fixed_ip=None):
diff --git a/tempest/tests/compute/servers/test_list_server_filters.py b/tempest/tests/compute/servers/test_list_server_filters.py
index 852288e..ca5e112 100644
--- a/tempest/tests/compute/servers/test_list_server_filters.py
+++ b/tempest/tests/compute/servers/test_list_server_filters.py
@@ -75,6 +75,8 @@
cls.client.wait_for_server_status(cls.s3['id'], 'ACTIVE')
resp, cls.s3 = cls.client.get_server(cls.s3['id'])
+ cls.fixed_network_name = cls.config.compute.fixed_network_name
+
@classmethod
def tearDownClass(cls):
cls.client.delete_server(cls.s1['id'])
@@ -208,7 +210,7 @@
def test_list_servers_filtered_by_ip(self):
# Filter servers by ip
# Here should be listed 1 server
- ip = self.s1['addresses']['private'][0]['addr']
+ ip = self.s1['addresses'][self.fixed_network_name][0]['addr']
params = {'ip': ip}
resp, body = self.client.list_servers(params)
servers = body['servers']
@@ -222,7 +224,7 @@
# Filter servers by regex ip
# List all servers filtered by part of ip address.
# Here should be listed all servers
- ip = self.s1['addresses']['private'][0]['addr'][0:-3]
+ ip = self.s1['addresses'][self.fixed_network_name][0]['addr'][0:-3]
params = {'ip': ip}
resp, body = self.client.list_servers(params)
servers = body['servers']
diff --git a/tempest/tests/identity/admin/v3/test_endpoints.py b/tempest/tests/identity/admin/v3/test_endpoints.py
old mode 100755
new mode 100644
diff --git a/tempest/tests/identity/admin/v3/test_users.py b/tempest/tests/identity/admin/v3/test_users.py
index 7118241..39b8ca1 100644
--- a/tempest/tests/identity/admin/v3/test_users.py
+++ b/tempest/tests/identity/admin/v3/test_users.py
@@ -74,6 +74,8 @@
fetched_project_ids = list()
_, u_project = self.v3_client.create_project(
rand_name('project-'), description=rand_name('project-desc-'))
+ # Delete the Project at the end of this method
+ self.addCleanup(self.v3_client.delete_project, u_project['id'])
#Create a user.
u_name = rand_name('user-')
u_desc = u_name + 'description'
diff --git a/tempest/tests/network/common.py b/tempest/tests/network/common.py
index 6246f54..22eb1d3 100644
--- a/tempest/tests/network/common.py
+++ b/tempest/tests/network/common.py
@@ -15,14 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import subprocess
-
-import netaddr
-
-from quantumclient.common import exceptions as exc
-from tempest.common.utils.data_utils import rand_name
-from tempest import test
-
class AttributeDict(dict):
@@ -100,212 +92,3 @@
def delete(self):
self.client.delete_port(self.id)
-
-
-class TestNetworkSmokeCommon(test.DefaultClientSmokeTest):
- """
- Base class for network smoke tests
- """
-
- @classmethod
- def check_preconditions(cls):
- if (cls.config.network.quantum_available):
- cls.enabled = True
- #verify that quantum_available is telling the truth
- try:
- cls.network_client.list_networks()
- except exc.EndpointNotFound:
- cls.enabled = False
- raise
- else:
- cls.enabled = False
- msg = 'Quantum not available'
- raise cls.skipException(msg)
-
- @classmethod
- def setUpClass(cls):
- super(TestNetworkSmokeCommon, cls).setUpClass()
- cls.tenant_id = cls.manager._get_identity_client(
- cls.config.identity.username,
- cls.config.identity.password,
- cls.config.identity.tenant_name).tenant_id
-
- def _create_keypair(self, client, namestart='keypair-smoke-'):
- kp_name = rand_name(namestart)
- keypair = client.keypairs.create(kp_name)
- try:
- self.assertEqual(keypair.id, kp_name)
- self.set_resource(kp_name, keypair)
- except AttributeError:
- self.fail("Keypair object not successfully created.")
- return keypair
-
- def _create_security_group(self, client, namestart='secgroup-smoke-'):
- # Create security group
- sg_name = rand_name(namestart)
- sg_desc = sg_name + " description"
- secgroup = client.security_groups.create(sg_name, sg_desc)
- try:
- self.assertEqual(secgroup.name, sg_name)
- self.assertEqual(secgroup.description, sg_desc)
- self.set_resource(sg_name, secgroup)
- except AttributeError:
- self.fail("SecurityGroup object not successfully created.")
-
- # Add rules to the security group
- rulesets = [
- {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- 'group_id': secgroup.id
- },
- {
- # ping
- 'ip_protocol': 'icmp',
- 'from_port': -1,
- 'to_port': -1,
- 'cidr': '0.0.0.0/0',
- 'group_id': secgroup.id
- }
- ]
- for ruleset in rulesets:
- try:
- client.security_group_rules.create(secgroup.id, **ruleset)
- except Exception:
- self.fail("Failed to create rule in security group.")
-
- return secgroup
-
- def _create_network(self, tenant_id, namestart='network-smoke-'):
- name = rand_name(namestart)
- body = dict(
- network=dict(
- name=name,
- tenant_id=tenant_id,
- ),
- )
- result = self.network_client.create_network(body=body)
- network = DeletableNetwork(client=self.network_client,
- **result['network'])
- self.assertEqual(network.name, name)
- self.set_resource(name, network)
- return network
-
- def _list_networks(self):
- nets = self.network_client.list_networks()
- return nets['networks']
-
- def _list_subnets(self):
- subnets = self.network_client.list_subnets()
- return subnets['subnets']
-
- def _list_routers(self):
- routers = self.network_client.list_routers()
- return routers['routers']
-
- def _create_subnet(self, network, namestart='subnet-smoke-'):
- """
- Create a subnet for the given network within the cidr block
- configured for tenant networks.
- """
- cfg = self.config.network
- tenant_cidr = netaddr.IPNetwork(cfg.tenant_network_cidr)
- result = None
- # Repeatedly attempt subnet creation with sequential cidr
- # blocks until an unallocated block is found.
- for subnet_cidr in tenant_cidr.subnet(cfg.tenant_network_mask_bits):
- body = dict(
- subnet=dict(
- ip_version=4,
- network_id=network.id,
- tenant_id=network.tenant_id,
- cidr=str(subnet_cidr),
- ),
- )
- try:
- result = self.network_client.create_subnet(body=body)
- break
- except exc.QuantumClientException as e:
- is_overlapping_cidr = 'overlaps with another subnet' in str(e)
- if not is_overlapping_cidr:
- raise
- self.assertIsNotNone(result, 'Unable to allocate tenant network')
- subnet = DeletableSubnet(client=self.network_client,
- **result['subnet'])
- self.assertEqual(subnet.cidr, str(subnet_cidr))
- self.set_resource(rand_name(namestart), subnet)
- return subnet
-
- def _create_port(self, network, namestart='port-quotatest-'):
- name = rand_name(namestart)
- body = dict(
- port=dict(name=name,
- network_id=network.id,
- tenant_id=network.tenant_id))
- result = self.network_client.create_port(body=body)
- self.assertIsNotNone(result, 'Unable to allocate port')
- port = DeletablePort(client=self.network_client,
- **result['port'])
- self.set_resource(name, port)
- return port
-
- def _create_server(self, client, network, name, key_name, security_groups):
- flavor_id = self.config.compute.flavor_ref
- base_image_id = self.config.compute.image_ref
- create_kwargs = {
- 'nics': [
- {'net-id': network.id},
- ],
- 'key_name': key_name,
- 'security_groups': security_groups,
- }
- server = client.servers.create(name, base_image_id, flavor_id,
- **create_kwargs)
- try:
- self.assertEqual(server.name, name)
- self.set_resource(name, server)
- except AttributeError:
- self.fail("Server not successfully created.")
- test.status_timeout(self, client.servers, server.id, 'ACTIVE')
- # The instance retrieved on creation is missing network
- # details, necessitating retrieval after it becomes active to
- # ensure correct details.
- server = client.servers.get(server.id)
- self.set_resource(name, server)
- return server
-
- def _create_floating_ip(self, server, external_network_id):
- result = self.network_client.list_ports(device_id=server.id)
- ports = result.get('ports', [])
- self.assertEqual(len(ports), 1,
- "Unable to determine which port to target.")
- port_id = ports[0]['id']
- body = dict(
- floatingip=dict(
- floating_network_id=external_network_id,
- port_id=port_id,
- tenant_id=server.tenant_id,
- )
- )
- result = self.network_client.create_floatingip(body=body)
- floating_ip = DeletableFloatingIp(client=self.network_client,
- **result['floatingip'])
- self.set_resource(rand_name('floatingip-'), floating_ip)
- return floating_ip
-
- def _ping_ip_address(self, ip_address):
- cmd = ['ping', '-c1', '-w1', ip_address]
-
- def ping():
- proc = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc.wait()
- if proc.returncode == 0:
- return True
-
- # TODO(mnewby) Allow configuration of execution and sleep duration.
- return test.call_until_true(ping, 20, 1)
diff --git a/tempest/tests/object_storage/test_account_services.py b/tempest/tests/object_storage/test_account_services.py
index 14f94f7..eb66de8 100644
--- a/tempest/tests/object_storage/test_account_services.py
+++ b/tempest/tests/object_storage/test_account_services.py
@@ -22,12 +22,9 @@
class AccountTest(base.BaseObjectTest):
-
@classmethod
def setUpClass(cls):
super(AccountTest, cls).setUpClass()
-
- #Create a container
cls.container_name = rand_name(name='TestContainer')
cls.container_client.create_container(cls.container_name)
@@ -37,8 +34,7 @@
@attr(type='smoke')
def test_list_containers(self):
- # List of all containers should not be empty
-
+ # list of all containers should not be empty
params = {'format': 'json'}
resp, container_list = \
self.account_client.list_account_containers(params=params)
@@ -49,8 +45,7 @@
@attr(type='smoke')
def test_list_account_metadata(self):
- # List all account metadata
-
+ # list all account metadata
resp, metadata = self.account_client.list_account_metadata()
self.assertEqual(resp['status'], '204')
self.assertIn('x-account-object-count', resp)
@@ -59,8 +54,7 @@
@attr(type='smoke')
def test_create_account_metadata(self):
- # Add metadata to account
-
+ # add metadata to account
metadata = {'test-account-meta': 'Meta!'}
resp, _ = \
self.account_client.create_account_metadata(metadata=metadata)
@@ -72,8 +66,7 @@
@attr(type='smoke')
def test_delete_account_metadata(self):
- # Delete metadata from account
-
+ # delete metadata from account
metadata = ['test-account-meta']
resp, _ = \
self.account_client.delete_account_metadata(metadata=metadata)
@@ -84,11 +77,10 @@
@attr(type='negative')
def test_list_containers_with_non_authorized_user(self):
- #Listing containers with using non authorized user
+ # list containers using non-authorized user
- # Randomly creating user
+ # create user
self.data.setup_test_user()
-
resp, body = \
self.token_client.auth(self.data.test_user,
self.data.test_password,
@@ -97,14 +89,11 @@
self.token_client.get_token(self.data.test_user,
self.data.test_password,
self.data.test_tenant)
-
custom_headers = {'X-Auth-Token': new_token}
-
params = {'format': 'json'}
- # Trying to list containers with non authorized user token
+ # list containers with non-authorized user token
self.assertRaises(exceptions.Unauthorized,
self.custom_account_client.list_account_containers,
params=params, metadata=custom_headers)
-
- #Attempt to the delete the user setup created
+ # delete the user which was created
self.data.teardown_all()
diff --git a/tempest/tests/object_storage/test_container_services.py b/tempest/tests/object_storage/test_container_services.py
index 223744c..508132a 100644
--- a/tempest/tests/object_storage/test_container_services.py
+++ b/tempest/tests/object_storage/test_container_services.py
@@ -22,7 +22,6 @@
class ContainerTest(base.BaseObjectTest):
-
@classmethod
def setUpClass(cls):
super(ContainerTest, cls).setUpClass()
@@ -31,73 +30,58 @@
@classmethod
def tearDownClass(cls):
for container in cls.containers:
- #Get list of all object in the container
objlist = \
cls.container_client.list_all_container_objects(container)
-
- #Attempt to delete every object in the container
+ # delete every object in the container
for obj in objlist:
resp, _ = \
cls.object_client.delete_object(container, obj['name'])
-
- #Attempt to delete the container
+ # delete the container
resp, _ = cls.container_client.delete_container(container)
@attr(type='smoke')
def test_create_container(self):
- # Create a container, test responses
-
- #Create a container
container_name = rand_name(name='TestContainer')
resp, body = self.container_client.create_container(container_name)
self.containers.append(container_name)
-
self.assertTrue(resp['status'] in ('202', '201'))
@attr(type='smoke')
def test_delete_container(self):
- # Create and Delete a container, test responses
-
- #Create a container
+ # create a container
container_name = rand_name(name='TestContainer')
resp, _ = self.container_client.create_container(container_name)
self.containers.append(container_name)
-
- #Delete Container
+ # delete container
resp, _ = self.container_client.delete_container(container_name)
self.assertEqual(resp['status'], '204')
self.containers.remove(container_name)
@attr(type='smoke')
def test_list_container_contents_json(self):
- # Add metadata to object
+ # add metadata to an object
- #Create a container
+ # create a container
container_name = rand_name(name='TestContainer')
resp, _ = self.container_client.create_container(container_name)
self.containers.append(container_name)
-
- #Create Object
+ # create object
object_name = rand_name(name='TestObject')
data = arbitrary_string()
resp, _ = self.object_client.create_object(container_name,
object_name, data)
-
- #Set Object Metadata
+ # set object metadata
meta_key = rand_name(name='Meta-Test-')
meta_value = rand_name(name='MetaValue-')
orig_metadata = {meta_key: meta_value}
-
resp, _ = self.object_client.update_object_metadata(container_name,
object_name,
orig_metadata)
-
- #Get Container contents list json format
+ # get container contents list
params = {'format': 'json'}
resp, object_list = \
self.container_client.\
list_container_contents(container_name, params=params)
-
self.assertEqual(resp['status'], '200')
self.assertIsNotNone(object_list)
@@ -106,14 +90,13 @@
@attr(type='smoke')
def test_container_metadata(self):
- # Update/Retrieve/Delete Container Metadata
+ # update/retrieve/delete container metadata
- # Create a container
+ # create a container
container_name = rand_name(name='TestContainer')
resp, _ = self.container_client.create_container(container_name)
self.containers.append(container_name)
-
- # Update container metadata
+ # update container metadata
metadata = {'name': 'Pictures',
'description': 'Travel'
}
@@ -122,7 +105,7 @@
metadata=metadata)
self.assertEqual(resp['status'], '204')
- # List container metadata
+ # list container metadata
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertEqual(resp['status'], '204')
@@ -131,18 +114,19 @@
self.assertEqual(resp['x-container-meta-name'], 'Pictures')
self.assertEqual(resp['x-container-meta-description'], 'Travel')
- # Delete container metadata
+ # delete container metadata
resp, _ = self.container_client.delete_container_metadata(
container_name,
metadata=metadata.keys())
self.assertEqual(resp['status'], '204')
+ # check if the metadata are no longer there
resp, _ = self.container_client.list_container_metadata(container_name)
self.assertEqual(resp['status'], '204')
self.assertNotIn('x-container-meta-name', resp)
self.assertNotIn('x-container-meta-description', resp)
- # Delete Container
+ # delete container
resp, _ = self.container_client.delete_container(container_name)
self.assertEqual(resp['status'], '204')
self.containers.remove(container_name)
diff --git a/tempest/tests/object_storage/test_container_sync.py b/tempest/tests/object_storage/test_container_sync.py
index d5fa96c..666d356 100644
--- a/tempest/tests/object_storage/test_container_sync.py
+++ b/tempest/tests/object_storage/test_container_sync.py
@@ -23,11 +23,9 @@
class ContainerSyncTest(base.BaseObjectTest):
-
@classmethod
def setUpClass(cls):
super(ContainerSyncTest, cls).setUpClass()
-
cls.containers = []
cls.objects = []
container_sync_timeout = \
@@ -36,8 +34,7 @@
int(cls.config.object_storage.container_sync_interval)
cls.attempts = \
int(container_sync_timeout / cls.container_sync_interval)
-
- # Define container and object clients
+ # define container and object clients
cls.clients = {}
cls.clients[rand_name(name='TestContainerSync')] = \
(cls.container_client, cls.object_client)
@@ -50,29 +47,25 @@
@classmethod
def tearDownClass(cls):
for cont_name, client in cls.clients.items():
- #Get list of all object in the container
objlist = client[0].list_all_container_objects(cont_name)
-
- #Attempt to delete every object in the container
+ # delete every object in the container
if objlist:
for obj in objlist:
resp, _ = client[1].delete_object(cont_name, obj['name'])
-
- #Attempt to delete the container
+ # delete the container
resp, _ = client[0].delete_container(cont_name)
@testtools.skip('Until Bug #1093743 is resolved.')
@attr(type='positive')
def test_container_synchronization(self):
- #Container to container synchronization
- #To allow/accept sync requests to/from other accounts
+ # container to container synchronization
+ # to allow/accept sync requests to/from other accounts
- #Switch container synchronization on and create objects in a containers
+ # turn container synchronization on and create object in container
for cont in (self.containers, self.containers[::-1]):
cont_client = [self.clients[c][0] for c in cont]
obj_client = [self.clients[c][1] for c in cont]
-
- #tell first container to syncronize to a second
+ # tell first container to synchronize to a second
headers = {'X-Container-Sync-Key': 'sync_key',
'X-Container-Sync-To': "%s/%s" %
(cont_client[1].base_url, str(cont[1]))}
@@ -81,8 +74,7 @@
self.assertTrue(resp['status'] in ('202', '201'),
'Error installing X-Container-Sync-To '
'for the container "%s"' % (cont[0]))
-
- #Create Object in container
+ # create object in container
object_name = rand_name(name='TestSyncObject')
data = object_name[::-1] # arbitrary_string()
resp, _ = obj_client[0].create_object(cont[0], object_name, data)
@@ -92,7 +84,7 @@
% (object_name, cont[0]))
self.objects.append(object_name)
- #Wait for Container contents list json format will be not empty
+ # wait until container contents list is not empty
cont_client = [self.clients[c][0] for c in self.containers]
params = {'format': 'json'}
while self.attempts > 0:
@@ -112,15 +104,13 @@
'Error listing the destination container`s'
' "%s" contents' % (self.containers[1]))
object_list_1 = dict((obj['name'], obj) for obj in object_list_1)
- # check that containers is not empty and has equal keys()
- # or wait for next attepmt
+ # check that containers are not empty and have equal keys()
+ # or wait for next attempt
if not object_list_0 or not object_list_1 or \
set(object_list_0.keys()) != set(object_list_1.keys()):
time.sleep(self.container_sync_interval)
self.attempts -= 1
else:
break
-
- # Check for synchronization
self.assertEqual(object_list_0, object_list_1,
'Different object lists in containers.')
diff --git a/tempest/tests/object_storage/test_object_expiry.py b/tempest/tests/object_storage/test_object_expiry.py
index e1b1dbd..76370b1 100644
--- a/tempest/tests/object_storage/test_object_expiry.py
+++ b/tempest/tests/object_storage/test_object_expiry.py
@@ -25,12 +25,9 @@
class ObjectExpiryTest(base.BaseObjectTest):
-
@classmethod
def setUpClass(cls):
super(ObjectExpiryTest, cls).setUpClass()
-
- #Create a container
cls.container_name = rand_name(name='TestContainer')
cls.container_client.create_container(cls.container_name)
@@ -41,54 +38,45 @@
But delete action for the expired object is raising
NotFound exception and also non empty container cannot be deleted.
"""
-
- #Get list of all object in the container
objlist = \
cls.container_client.list_all_container_objects(cls.container_name)
-
- #Attempt to delete every object in the container
+ # delete every object in the container
if objlist:
for obj in objlist:
resp, _ = cls.object_client.delete_object(cls.container_name,
obj['name'])
-
- #Attempt to delete the container
+ # delete the container
resp, _ = cls.container_client.delete_container(cls.container_name)
@testtools.skip('Until Bug #1069849 is resolved.')
@attr(type='regression')
def test_get_object_after_expiry_time(self):
- # GET object after expiry time
- #TODO(harika-vakadi): Similar test case has to be created for
+ #TODO(harika-vakadi): similar test case has to be created for
# "X-Delete-At", after this test case works.
- #Create Object
+ # create object
object_name = rand_name(name='TestObject')
data = arbitrary_string()
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
-
- #Update object metadata with expiry time of 3 seconds
+ # update object metadata with expiry time of 3 seconds
metadata = {'X-Delete-After': '3'}
resp, _ = \
self.object_client.update_object_metadata(self.container_name,
object_name, metadata,
metadata_prefix='')
-
resp, _ = \
self.object_client.list_object_metadata(self.container_name,
object_name)
-
self.assertEqual(resp['status'], '200')
self.assertIn('x-delete-at', resp)
-
resp, body = self.object_client.get_object(self.container_name,
object_name)
self.assertEqual(resp['status'], '200')
- # Check data
+ # check data
self.assertEqual(body, data)
- # Sleep for over 5 seconds, so that object is expired
+ # sleep for over 5 seconds, so that object expires
time.sleep(5)
- # Verification of raised exception after object gets expired
+ # object should not be there anymore
self.assertRaises(exceptions.NotFound, self.object_client.get_object,
self.container_name, object_name)
diff --git a/tempest/tests/object_storage/test_object_services.py b/tempest/tests/object_storage/test_object_services.py
index 4fcc617..7cd9ff1 100644
--- a/tempest/tests/object_storage/test_object_services.py
+++ b/tempest/tests/object_storage/test_object_services.py
@@ -25,55 +25,42 @@
class ObjectTest(base.BaseObjectTest):
-
@classmethod
def setUpClass(cls):
super(ObjectTest, cls).setUpClass()
-
- #Create a container
cls.container_name = rand_name(name='TestContainer')
cls.container_client.create_container(cls.container_name)
- # Randomly creating user
cls.data.setup_test_user()
-
resp, body = cls.token_client.auth(cls.data.test_user,
cls.data.test_password,
cls.data.test_tenant)
cls.new_token = cls.token_client.get_token(cls.data.test_user,
cls.data.test_password,
cls.data.test_tenant)
-
cls.custom_headers = {'X-Auth-Token': cls.new_token}
@classmethod
def tearDownClass(cls):
- #Get list of all object in the container
objlist = cls.container_client.list_all_container_objects(
cls.container_name)
-
- #Attempt to delete every object in the container
+ # delete every object in the container
for obj in objlist:
resp, _ = cls.object_client.delete_object(cls.container_name,
obj['name'])
-
- #Attempt to delete the container
+ # delete the container
resp, _ = cls.container_client.delete_container(cls.container_name)
-
- #Attempt to the delete the user setup created
+ # delete the user setup created
cls.data.teardown_all()
@attr(type='smoke')
def test_create_object(self):
- # Create storage object, test response
-
- #Create Object
+ # create object
object_name = rand_name(name='TestObject')
data = arbitrary_string()
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
-
- #Create another Object
+ # create another object
object_name = rand_name(name='TestObject')
data = arbitrary_string()
resp, _ = self.object_client.create_object(self.container_name,
@@ -82,42 +69,36 @@
@attr(type='smoke')
def test_delete_object(self):
- # Create and delete a storage object, test responses
-
- #Create Object
+ # create object
object_name = rand_name(name='TestObject')
data = arbitrary_string()
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
-
+ # delete object
resp, _ = self.object_client.delete_object(self.container_name,
object_name)
self.assertEqual(resp['status'], '204')
@attr(type='smoke')
def test_object_metadata(self):
- # Add metadata to storage object, test if metadata is retrievable
+ # add metadata to storage object, test if metadata is retrievable
- #Create Object
+ # create Object
object_name = rand_name(name='TestObject')
data = arbitrary_string()
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
-
- #Set Object Metadata
+ # set object metadata
meta_key = rand_name(name='test-')
meta_value = rand_name(name='MetaValue-')
orig_metadata = {meta_key: meta_value}
-
resp, _ = self.object_client.update_object_metadata(
- self.container_name, object_name,
- orig_metadata)
+ self.container_name, object_name, orig_metadata)
self.assertEqual(resp['status'], '202')
- #Get Object Metadata
+ # get object metadata
resp, resp_metadata = self.object_client.list_object_metadata(
self.container_name, object_name)
-
self.assertEqual(resp['status'], '200')
actual_meta_key = 'x-object-meta-' + meta_key
self.assertTrue(actual_meta_key in resp)
@@ -125,138 +106,121 @@
@attr(type='smoke')
def test_get_object(self):
- # Retrieve object's data(in response body)
+ # retrieve object's data (in response body)
- #Create Object
+ # create object
object_name = rand_name(name='TestObject')
data = arbitrary_string()
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
-
+ # get object
resp, body = self.object_client.get_object(self.container_name,
object_name)
self.assertEqual(resp['status'], '200')
- # Check data
self.assertEqual(body, data)
@attr(type='smoke')
def test_copy_object_in_same_container(self):
- # Copy storage object
-
- # Create source Object
+ # create source object
src_object_name = rand_name(name='SrcObject')
src_data = arbitrary_string(size=len(src_object_name) * 2,
base_text=src_object_name)
resp, _ = self.object_client.create_object(self.container_name,
- src_object_name, src_data)
-
- # Create destination Object
+ src_object_name,
+ src_data)
+ # create destination object
dst_object_name = rand_name(name='DstObject')
dst_data = arbitrary_string(size=len(dst_object_name) * 3,
base_text=dst_object_name)
resp, _ = self.object_client.create_object(self.container_name,
- dst_object_name, dst_data)
-
- # Copy source object to destination
+ dst_object_name,
+ dst_data)
+ # copy source object to destination
resp, _ = self.object_client.copy_object_in_same_container(
self.container_name, src_object_name, dst_object_name)
self.assertEqual(resp['status'], '201')
-
- # Check data
+ # check data
resp, body = self.object_client.get_object(self.container_name,
dst_object_name)
self.assertEqual(body, src_data)
@attr(type='smoke')
def test_copy_object_to_itself(self):
- # Change the content type of an existing object
+ # change the content type of an existing object
- # Create Object
+ # create object
object_name = rand_name(name='TestObject')
data = arbitrary_string()
- resp, _ = self.object_client.create_object(self.container_name,
- object_name, data)
- # Get the old content type
+ self.object_client.create_object(self.container_name,
+ object_name, data)
+ # get the old content type
resp_tmp, _ = self.object_client.list_object_metadata(
- self.container_name,
- object_name)
- # Change the content type of the object
+ self.container_name, object_name)
+ # change the content type of the object
metadata = {'content-type': 'text/plain; charset=UTF-8'}
self.assertNotEqual(resp_tmp['content-type'], metadata['content-type'])
resp, _ = self.object_client.copy_object_in_same_container(
self.container_name, object_name, object_name, metadata)
self.assertEqual(resp['status'], '201')
-
- # Check the content type
+ # check the content type
resp, _ = self.object_client.list_object_metadata(self.container_name,
object_name)
self.assertEqual(resp['content-type'], metadata['content-type'])
@attr(type='smoke')
def test_copy_object_2d_way(self):
- # Copy storage object
-
- # Create source Object
+ # create source object
src_object_name = rand_name(name='SrcObject')
src_data = arbitrary_string(size=len(src_object_name) * 2,
base_text=src_object_name)
resp, _ = self.object_client.create_object(self.container_name,
src_object_name, src_data)
-
- # Create destination Object
+ # create destination object
dst_object_name = rand_name(name='DstObject')
dst_data = arbitrary_string(size=len(dst_object_name) * 3,
base_text=dst_object_name)
resp, _ = self.object_client.create_object(self.container_name,
dst_object_name, dst_data)
-
- # Copy source object to destination
+ # copy source object to destination
resp, _ = self.object_client.copy_object_2d_way(self.container_name,
src_object_name,
dst_object_name)
self.assertEqual(resp['status'], '201')
-
- # Check data
+ # check data
resp, body = self.object_client.get_object(self.container_name,
dst_object_name)
self.assertEqual(body, src_data)
@attr(type='smoke')
def test_copy_object_across_containers(self):
- # Copy storage object across containers
-
- #Create a container so as to use as source container
+ # create a container to use as asource container
src_container_name = rand_name(name='TestSourceContainer')
self.container_client.create_container(src_container_name)
-
- #Create a container so as to use as destination container
+ # create a container to use as a destination container
dst_container_name = rand_name(name='TestDestinationContainer')
self.container_client.create_container(dst_container_name)
-
- # Create Object in source container
+ # create object in source container
object_name = rand_name(name='Object')
data = arbitrary_string(size=len(object_name) * 2,
base_text=object_name)
resp, _ = self.object_client.create_object(src_container_name,
object_name, data)
- #Set Object Metadata
+ # set object metadata
meta_key = rand_name(name='test-')
meta_value = rand_name(name='MetaValue-')
orig_metadata = {meta_key: meta_value}
-
resp, _ = self.object_client.update_object_metadata(src_container_name,
object_name,
orig_metadata)
self.assertEqual(resp['status'], '202')
-
try:
- # Copy object from source container to destination container
+ # copy object from source container to destination container
resp, _ = self.object_client.copy_object_across_containers(
src_container_name, object_name, dst_container_name,
object_name)
self.assertEqual(resp['status'], '201')
- # Check if object is present in destination container
+ # check if object is present in destination container
resp, body = self.object_client.get_object(dst_container_name,
object_name)
self.assertEqual(body, data)
@@ -268,12 +232,12 @@
self.fail("Got exception :%s ; while copying"
" object across containers" % e)
finally:
- #Delete objects from respective containers
+ # delete objects from respective containers
resp, _ = self.object_client.delete_object(dst_container_name,
object_name)
resp, _ = self.object_client.delete_object(src_container_name,
object_name)
- #Delete containers created in this method
+ # delete containers created in this method
resp, _ = self.container_client.delete_container(
src_container_name)
resp, _ = self.container_client.delete_container(
@@ -281,19 +245,16 @@
@attr(type='smoke')
def test_access_public_container_object_without_using_creds(self):
- # Make container public-readable, and access the object
- # anonymously, e.g. without using credentials
-
+ # make container public-readable and access an object in it object
+ # anonymously, without using credentials
try:
resp_meta = None
- # Update Container Metadata to make public readable
+ # update container metadata to make it publicly readable
cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
resp_meta, body = self.container_client.update_container_metadata(
- self.container_name, metadata=cont_headers,
- metadata_prefix='')
+ self.container_name, metadata=cont_headers, metadata_prefix='')
self.assertEqual(resp_meta['status'], '204')
-
- # Create Object
+ # create object
object_name = rand_name(name='Object')
data = arbitrary_string(size=len(object_name),
base_text=object_name)
@@ -301,25 +262,23 @@
object_name, data)
self.assertEqual(resp['status'], '201')
- # List container metadata
+ # list container metadata
resp_meta, _ = self.container_client.list_container_metadata(
self.container_name)
self.assertEqual(resp_meta['status'], '204')
self.assertIn('x-container-read', resp_meta)
self.assertEqual(resp_meta['x-container-read'], '.r:*,.rlistings')
- # Trying to Get Object with empty Headers as it is public readable
+ # trying to get object with empty headers as it is public readable
resp, body = self.custom_object_client.get_object(
- self.container_name, object_name,
- metadata={})
+ self.container_name, object_name, metadata={})
self.assertEqual(body, data)
finally:
if resp_meta['status'] == '204':
- # Delete updated container metadata, to revert back.
+ # delete updated container metadata, to revert back.
resp, body = self.container_client.delete_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
-
resp, _ = self.container_client.list_container_metadata(
self.container_name)
self.assertEqual(resp['status'], '204')
@@ -328,9 +287,8 @@
@attr(type='smoke')
def test_access_public_object_with_another_user_creds(self):
- #Make container public-readable, and access the object
- #anonymously, e.g. using another user credentials
-
+ # make container public-readable and access an object in it using
+ # another user's credentials
try:
resp_meta = None
cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
@@ -338,7 +296,7 @@
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertEqual(resp_meta['status'], '204')
- # Create Object
+ # create object
object_name = rand_name(name='Object')
data = arbitrary_string(size=len(object_name) * 1,
base_text=object_name)
@@ -346,18 +304,17 @@
object_name, data)
self.assertEqual(resp['status'], '201')
- # List container metadata
+ # list container metadata
resp, _ = self.container_client.list_container_metadata(
self.container_name)
self.assertEqual(resp['status'], '204')
self.assertIn('x-container-read', resp)
self.assertEqual(resp['x-container-read'], '.r:*,.rlistings')
- # Trying to GET Auth Token of Alternate user
+ # get auth token of alternative user
token = self.identity_client_alt.get_auth()
headers = {'X-Auth-Token': token}
-
- # Trying to create object with Alternate user creds
+ # access object using alternate user creds
resp, body = self.custom_object_client.get_object(
self.container_name, object_name,
metadata=headers)
@@ -369,11 +326,10 @@
finally:
if resp_meta['status'] == '204':
- # Delete updated container metadata, to revert back.
+ # delete updated container metadata, to revert back.
resp, body = self.container_client.delete_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
-
resp, _ = self.container_client.list_container_metadata(
self.container_name)
self.assertEqual(resp['status'], '204')
@@ -383,20 +339,18 @@
@testtools.skip('Until Bug #1020722 is resolved.')
@attr(type='smoke')
def test_write_public_object_without_using_creds(self):
- #Make container public-writable, and create object
- #anonymously, e.g. without using credentials
+ # make container public-writable, and create object anonymously, e.g.
+ # without using credentials
try:
resp_meta = None
- # Update Container Metadata to make public readable
+ # update container metadata to make publily writable
cont_headers = {'X-Container-Write': '-*'}
resp_meta, body = self.container_client.update_container_metadata(
- self.container_name, metadata=cont_headers,
- metadata_prefix='')
+ self.container_name, metadata=cont_headers, metadata_prefix='')
self.assertEqual(resp_meta['status'], '204')
- # List container metadata
+ # list container metadata
resp, _ = self.container_client.list_container_metadata(
self.container_name)
-
self.assertEqual(resp['status'], '204')
self.assertIn('x-container-write', resp)
self.assertEqual(resp['x-container-write'], '-*')
@@ -404,14 +358,11 @@
object_name = rand_name(name='Object')
data = arbitrary_string(size=len(object_name),
base_text=object_name)
-
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
-
- #Trying to Create object without using creds
+ # create object as anonymous user
resp, body = self.custom_object_client.create_object(
- self.container_name, object_name,
- data, metadata=headers)
+ self.container_name, object_name, data, metadata=headers)
self.assertEqual(resp['status'], '201')
except Exception as e:
@@ -420,11 +371,10 @@
finally:
if resp_meta['status'] == '204':
- # Delete updated container metadata, to revert back.
+ # delete updated container metadata, to revert back.
resp, body = self.container_client.delete_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
-
resp, _ = self.container_client.list_container_metadata(
self.container_name)
self.assertEqual(resp['status'], '204')
@@ -434,52 +384,45 @@
@testtools.skip('Until Bug #1020722 is resolved.')
@attr(type='smoke')
def test_write_public_with_another_user_creds(self):
- #Make container public-writable, and create object
- #anonymously, e.g. with another user credentials
-
+ # make container public-writable, and create object with another user's
+ # credentials
try:
resp_meta = None
- # Update Container Metadata to make public readable
+ # update container metadata to make it publicly writable
cont_headers = {'X-Container-Write': '-*'}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertEqual(resp_meta['status'], '204')
- # List container metadata
+ # list container metadata
resp, _ = self.container_client.list_container_metadata(
self.container_name)
-
self.assertEqual(resp['status'], '204')
self.assertIn('x-container-write', resp)
self.assertEqual(resp['x-container-write'], '-*')
- #Trying to GET auth token of Alternate user
+ # trying to get auth token of alternative user
token = self.identity_client_alt.get_auth()
-
headers = {'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Auth-Token': token}
- #Trying to Create an object with another user creds
+ # trying to create an object with another user's creds
object_name = rand_name(name='Object')
data = arbitrary_string(size=len(object_name),
base_text=object_name)
resp, body = self.custom_object_client.create_object(
- self.container_name, object_name,
- data, metadata=headers)
+ self.container_name, object_name, data, metadata=headers)
self.assertEqual(resp['status'], '201')
-
except Exception as e:
self.fail("Failed to create public writable object with another"
" user creds raised exception is %s" % e)
-
finally:
if resp_meta['status'] == '204':
- # Delete updated container metadata, to revert back.
+ # delete updated container metadata, to revert back.
resp, body = self.container_client.delete_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
-
resp, _ = self.container_client.list_container_metadata(
self.container_name)
self.assertEqual(resp['status'], '204')
@@ -488,34 +431,26 @@
@attr(type='negative')
def test_access_object_without_using_creds(self):
- # Attempt to access the object anonymously, e.g.
- # not using any credentials
-
- # Create Object
+ # create object
object_name = rand_name(name='Object')
data = arbitrary_string(size=len(object_name),
base_text=object_name)
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
self.assertEqual(resp['status'], '201')
-
- # Trying to Get Object with empty Headers
+ # trying to get object with empty headers
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.get_object,
self.container_name, object_name, metadata={})
@attr(type='negative')
def test_write_object_without_using_creds(self):
- # Attempt to write to the object anonymously, e.g.
- # not using any credentials
-
- # Trying to Create Object with empty Headers
+ # trying to create object with empty headers
object_name = rand_name(name='Object')
data = arbitrary_string(size=len(object_name),
base_text=object_name)
obj_headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
-
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.create_object,
self.container_name, object_name, data,
@@ -523,30 +458,25 @@
@attr(type='negative')
def test_delete_object_without_using_creds(self):
- # Attempt to delete the object anonymously,
- # e.g. not using any credentials
-
- # Create Object
+ # create object
object_name = rand_name(name='Object')
data = arbitrary_string(size=len(object_name),
base_text=object_name)
resp, _ = self.object_client.create_object(self.container_name,
object_name, data)
-
- # Trying to Delete Object with empty Headers
+ # trying to delete object with empty headers
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.delete_object,
self.container_name, object_name)
@attr(type='negative')
def test_write_object_with_non_authorized_user(self):
- #Attempt to upload another file using non authorized user
-
+ # attempt to upload another file using non-authorized user
object_name = rand_name(name='Object')
data = arbitrary_string(size=len(object_name) * 5,
base_text=object_name)
- # Trying to Create Object with non authorized user token
+ # trying to create object with non-authorized user
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.create_object,
self.container_name, object_name, data,
@@ -554,18 +484,14 @@
@attr(type='negative')
def test_read_object_with_non_authorized_user(self):
- #Attempt to download the file using non authorized user
-
object_name = rand_name(name='Object')
data = arbitrary_string(size=len(object_name) * 5,
base_text=object_name)
-
resp, body = self.object_client.create_object(
- self.container_name, object_name,
- data)
+ self.container_name, object_name, data)
self.assertEqual(resp['status'], '201')
- # Trying to Get Object with non authorized user token
+ # trying to get object with non authorized user token
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.get_object,
self.container_name, object_name,
@@ -573,18 +499,13 @@
@attr(type='negative')
def test_delete_object_with_non_authorized_user(self):
- #Attempt to delete container using non authorized user
-
object_name = rand_name(name='Object')
data = arbitrary_string(size=len(object_name) * 5,
base_text=object_name)
-
resp, body = self.object_client.create_object(
- self.container_name, object_name,
- data)
+ self.container_name, object_name, data)
self.assertEqual(resp['status'], '201')
-
- # Trying to Delete Object with non authorized user token
+ # trying to delete object with non-authorized user token
self.assertRaises(exceptions.Unauthorized,
self.custom_object_client.delete_object,
self.container_name, object_name,
@@ -593,11 +514,11 @@
@testtools.skip('Until Bug #1097137 is resolved.')
@attr(type='positive')
def test_get_object_using_temp_url(self):
- #Access object using temp url within expiry time
+ # access object using temporary URL within expiration time
try:
- #Update Account Metadata
- # Flag to check if account metadata got updated
+ # update account metadata
+ # flag to check if account metadata got updated
flag = False
key = 'Meta'
metadata = {'Temp-URL-Key': key}
@@ -605,27 +526,23 @@
metadata=metadata)
self.assertEqual(resp['status'], '204')
flag = True
-
resp, _ = self.account_client.list_account_metadata()
self.assertIn('x-account-meta-temp-url-key', resp)
self.assertEqual(resp['x-account-meta-temp-url-key'], key)
- # Create Object
+ # create object
object_name = rand_name(name='ObjectTemp')
data = arbitrary_string(size=len(object_name),
base_text=object_name)
self.object_client.create_object(self.container_name,
object_name, data)
-
expires = int(time.time() + 10)
- #Trying to GET object using temp URL with in expiry time
+ # trying to get object using temp url with in expiry time
_, body = self.object_client.get_object_using_temp_url(
self.container_name, object_name,
expires, key)
-
self.assertEqual(body, data)
-
finally:
if flag:
resp, _ = self.account_client.delete_account_metadata(
@@ -635,35 +552,29 @@
@attr(type='positive')
def test_object_upload_in_segments(self):
- #Attempt to upload object in segments
-
- #Create Object
+ # create object
object_name = rand_name(name='LObject')
data = arbitrary_string(size=len(object_name),
base_text=object_name)
segments = 10
self.object_client.create_object(self.container_name,
object_name, data)
- #Uploading 10 segments
+ # uploading 10 segments
for i in range(segments):
resp, _ = self.object_client.create_object_segments(
- self.container_name, object_name,
- i, data)
- # Creating a Manifest File (Metadata Update)
-
+ self.container_name, object_name, i, data)
+ # creating a manifest file (metadata update)
metadata = {'X-Object-Manifest': '%s/%s/'
% (self.container_name, object_name)}
resp, _ = self.object_client.update_object_metadata(
- self.container_name, object_name,
- metadata, metadata_prefix='')
+ self.container_name, object_name, metadata, metadata_prefix='')
resp, _ = self.object_client.list_object_metadata(
self.container_name, object_name)
self.assertIn('x-object-manifest', resp)
self.assertEqual(resp['x-object-manifest'],
'%s/%s/' % (self.container_name, object_name))
- #Downloading the object
+ # downloading the object
resp, body = self.object_client.get_object(
self.container_name, object_name)
-
self.assertEqual(data * segments, body)
diff --git a/tempest/tests/object_storage/test_object_version.py b/tempest/tests/object_storage/test_object_version.py
index 80cfc27..4a16965 100644
--- a/tempest/tests/object_storage/test_object_version.py
+++ b/tempest/tests/object_storage/test_object_version.py
@@ -21,7 +21,6 @@
class ContainerTest(base.BaseObjectTest):
-
@classmethod
def setUpClass(cls):
super(ContainerTest, cls).setUpClass()
@@ -30,16 +29,13 @@
@classmethod
def tearDownClass(cls):
for container in cls.containers:
- #Get list of all object in the container
objlist = \
cls.container_client.list_all_container_objects(container)
-
- #Attempt to delete every object in the container
+ # delete every object in the container
for obj in objlist:
resp, _ = \
cls.object_client.delete_object(container, obj['name'])
-
- #Attempt to delete the container
+ # delete the container
resp, _ = cls.container_client.delete_container(container)
def assertContainer(self, container, count, byte, versioned):
@@ -54,16 +50,13 @@
@attr(type='smoke')
def test_versioned_container(self):
- # Versioned container responses tests
-
- # Create a containers
+ # create container
vers_container_name = rand_name(name='TestVersionContainer')
resp, body = self.container_client.create_container(
vers_container_name)
self.containers.append(vers_container_name)
self.assertIn(resp['status'], ('202', '201'))
- self.assertContainer(vers_container_name, '0', '0',
- 'Missing Header')
+ self.assertContainer(vers_container_name, '0', '0', 'Missing Header')
base_container_name = rand_name(name='TestBaseContainer')
headers = {'X-versions-Location': vers_container_name}
@@ -75,18 +68,17 @@
self.assertIn(resp['status'], ('202', '201'))
self.assertContainer(base_container_name, '0', '0',
vers_container_name)
- # Create Object
object_name = rand_name(name='TestObject')
+ # create object
resp, _ = self.object_client.create_object(base_container_name,
object_name, '1')
-
+ # create 2nd version of object
resp, _ = self.object_client.create_object(base_container_name,
object_name, '2')
-
resp, body = self.object_client.get_object(base_container_name,
object_name)
self.assertEqual(body, '2')
- # Delete Object version 2
+ # delete object version 2
resp, _ = self.object_client.delete_object(base_container_name,
object_name)
self.assertContainer(base_container_name, '1', '1',
@@ -94,21 +86,18 @@
resp, body = self.object_client.get_object(base_container_name,
object_name)
self.assertEqual(body, '1')
-
- # Delete Object version 1
+ # delete object version 1
resp, _ = self.object_client.delete_object(base_container_name,
object_name)
- # Containers are Empty
+ # containers should be empty
self.assertContainer(base_container_name, '0', '0',
vers_container_name)
self.assertContainer(vers_container_name, '0', '0',
'Missing Header')
-
- # Delete Containers
+ # delete containers
resp, _ = self.container_client.delete_container(base_container_name)
self.assertEqual(resp['status'], '204')
self.containers.remove(base_container_name)
-
resp, _ = self.container_client.delete_container(vers_container_name)
self.assertEqual(resp['status'], '204')
self.containers.remove(vers_container_name)
diff --git a/tempest/tests/volume/admin/test_multi_backend.py b/tempest/tests/volume/admin/test_multi_backend.py
index 3d5fae4..c50586c 100644
--- a/tempest/tests/volume/admin/test_multi_backend.py
+++ b/tempest/tests/volume/admin/test_multi_backend.py
@@ -22,6 +22,7 @@
from tempest import config
from tempest.services.volume.json.admin import volume_types_client
from tempest.services.volume.json import volumes_client
+from tempest.test import attr
from tempest.tests.volume import base
LOG = logging.getLogger(__name__)
@@ -108,6 +109,7 @@
super(VolumeMultiBackendTest, cls).tearDownClass()
+ @attr(type=['smoke', 'gate'])
def test_multi_backend_enabled(self):
# this test checks that multi backend is enabled for at least the
# computes where the volumes created in setUp were made
@@ -131,6 +133,7 @@
"%(volume_host2)s") % locals()
self.assertTrue(len(volume_host2.split("@")) > 1, msg)
+ @attr(type='gate')
def test_backend_name_distinction(self):
# this test checks that the two volumes created at setUp doesn't
# belong to the same backend (if they are in the same backend, that
diff --git a/tempest/tests/volume/admin/test_volume_types.py b/tempest/tests/volume/admin/test_volume_types.py
index 13efca7..8fccd24 100644
--- a/tempest/tests/volume/admin/test_volume_types.py
+++ b/tempest/tests/volume/admin/test_volume_types.py
@@ -17,6 +17,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest.services.volume.json.admin import volume_types_client
+from tempest.test import attr
from tempest.tests.volume.base import BaseVolumeTest
@@ -37,6 +38,7 @@
auth_url,
adm_tenant)
+ @attr(type=['smoke', 'gate'])
def test_volume_type_list(self):
# List Volume types.
try:
@@ -46,6 +48,7 @@
except Exception:
self.fail("Could not list volume types")
+ @attr(type=['smoke', 'gate'])
def test_create_get_delete_volume_with_volume_type_and_extra_specs(self):
# Create/get/delete volume with volume_type and extra spec.
try:
@@ -97,6 +100,7 @@
resp, _ = self.client.delete_volume_type(body['id'])
self.assertEqual(202, resp.status)
+ @attr(type=['smoke', 'gate'])
def test_volume_type_create_delete(self):
# Create/Delete volume type.
try:
@@ -119,6 +123,7 @@
except Exception:
self.fail("Could not create a volume_type")
+ @attr(type=['smoke', 'gate'])
def test_volume_type_create_get(self):
# Create/get volume type.
try:
diff --git a/tempest/tests/volume/admin/test_volume_types_extra_specs.py b/tempest/tests/volume/admin/test_volume_types_extra_specs.py
index 1cd7653..85edd64 100644
--- a/tempest/tests/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/tests/volume/admin/test_volume_types_extra_specs.py
@@ -16,6 +16,7 @@
# under the License.
from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
from tempest.tests.volume import base
@@ -33,6 +34,7 @@
cls.client.delete_volume_type(cls.volume_type['id'])
super(VolumeTypesExtraSpecsTest, cls).tearDownClass()
+ @attr(type=['smoke', 'gate'])
def test_volume_type_extra_specs_list(self):
# List Volume types extra specs.
try:
@@ -51,6 +53,7 @@
except Exception:
self.fail("Could not list volume types extra specs")
+ @attr(type=['gate'])
def test_volume_type_extra_specs_update(self):
# Update volume type extra specs
try:
@@ -74,6 +77,7 @@
except Exception:
self.fail("Couldnt update volume type extra spec")
+ @attr(type=['smoke', 'gate'])
def test_volume_type_extra_spec_create_get_delete(self):
# Create/Get/Delete volume type extra spec.
try:
diff --git a/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
index bd6e279..4a1a0b2 100644
--- a/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
@@ -19,6 +19,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
+from tempest.test import attr
from tempest.tests.volume import base
@@ -39,6 +40,7 @@
cls.client.delete_volume_type(cls.volume_type['id'])
super(ExtraSpecsNegativeTest, cls).tearDownClass()
+ @attr(type='gate')
def test_update_no_body(self):
# Should not update volume type extra specs with no body
extra_spec = {"spec1": "val2"}
@@ -46,6 +48,7 @@
self.client.update_volume_type_extra_specs,
self.volume_type['id'], extra_spec.keys()[0], None)
+ @attr(type='gate')
def test_update_nonexistent_extra_spec_id(self):
# Should not update volume type extra specs with nonexistent id.
extra_spec = {"spec1": "val2"}
@@ -54,6 +57,7 @@
self.volume_type['id'], str(uuid.uuid4()),
extra_spec)
+ @attr(type='gate')
def test_update_none_extra_spec_id(self):
# Should not update volume type extra specs with none id.
extra_spec = {"spec1": "val2"}
@@ -61,6 +65,7 @@
self.client.update_volume_type_extra_specs,
self.volume_type['id'], None, extra_spec)
+ @attr(type='gate')
def test_update_multiple_extra_spec(self):
# Should not update volume type extra specs with multiple specs as
# body.
@@ -70,6 +75,7 @@
self.volume_type['id'], extra_spec.keys()[0],
extra_spec)
+ @attr(type='gate')
def test_create_nonexistent_type_id(self):
# Should not create volume type extra spec for nonexistent volume
# type id.
@@ -78,18 +84,21 @@
self.client.create_volume_type_extra_specs,
str(uuid.uuid4()), extra_specs)
+ @attr(type='gate')
def test_create_none_body(self):
# Should not create volume type extra spec for none POST body.
self.assertRaises(exceptions.BadRequest,
self.client.create_volume_type_extra_specs,
self.volume_type['id'], None)
+ @attr(type='gate')
def test_create_invalid_body(self):
# Should not create volume type extra spec for invalid POST body.
self.assertRaises(exceptions.BadRequest,
self.client.create_volume_type_extra_specs,
self.volume_type['id'], ['invalid'])
+ @attr(type='gate')
def test_delete_nonexistent_volume_type_id(self):
# Should not delete volume type extra spec for nonexistent
# type id.
@@ -98,12 +107,14 @@
self.client.delete_volume_type_extra_specs,
str(uuid.uuid4()), extra_specs.keys()[0])
+ @attr(type='gate')
def test_list_nonexistent_volume_type_id(self):
# Should not list volume type extra spec for nonexistent type id.
self.assertRaises(exceptions.NotFound,
self.client.list_volume_types_extra_specs,
str(uuid.uuid4()))
+ @attr(type='gate')
def test_get_nonexistent_volume_type_id(self):
# Should not get volume type extra spec for nonexistent type id.
extra_specs = {"spec1": "val1"}
@@ -111,6 +122,7 @@
self.client.get_volume_type_extra_specs,
str(uuid.uuid4()), extra_specs.keys()[0])
+ @attr(type='gate')
def test_get_nonexistent_extra_spec_id(self):
# Should not get volume type extra spec for nonexistent extra spec
# id.
diff --git a/tempest/tests/volume/admin/test_volume_types_negative.py b/tempest/tests/volume/admin/test_volume_types_negative.py
index daf804d..bd358b8 100644
--- a/tempest/tests/volume/admin/test_volume_types_negative.py
+++ b/tempest/tests/volume/admin/test_volume_types_negative.py
@@ -18,12 +18,14 @@
import uuid
from tempest import exceptions
+from tempest.test import attr
from tempest.tests.volume import base
class VolumeTypesNegativeTest(base.BaseVolumeAdminTest):
_interface = 'json'
+ @attr(type='gate')
def test_create_with_nonexistent_volume_type(self):
# Should not be able to create volume with nonexistent volume_type.
self.assertRaises(exceptions.NotFound,
@@ -31,16 +33,19 @@
display_name=str(uuid.uuid4()),
volume_type=str(uuid.uuid4()))
+ @attr(type='gate')
def test_create_with_empty_name(self):
# Should not be able to create volume type with an empty name.
self.assertRaises(exceptions.BadRequest,
self.client.create_volume_type, '')
+ @attr(type='gate')
def test_get_nonexistent_type_id(self):
# Should not be able to get volume type with nonexistent type id.
self.assertRaises(exceptions.NotFound, self.client.get_volume_type,
str(uuid.uuid4()))
+ @attr(type='gate')
def test_delete_nonexistent_type_id(self):
# Should not be able to delete volume type with nonexistent type id.
self.assertRaises(exceptions.NotFound, self.client.delete_volume_type,
diff --git a/tempest/tests/volume/test_volumes_actions.py b/tempest/tests/volume/test_volumes_actions.py
index e6eb8d8..5396fa4 100644
--- a/tempest/tests/volume/test_volumes_actions.py
+++ b/tempest/tests/volume/test_volumes_actions.py
@@ -52,7 +52,7 @@
super(VolumesActionsTest, cls).tearDownClass()
- @attr(type='smoke')
+ @attr(type=['smoke', 'gate'])
def test_attach_detach_volume_to_instance(self):
# Volume is attached and detached successfully from an instance
try:
@@ -70,6 +70,7 @@
self.assertEqual(202, resp.status)
self.client.wait_for_volume_status(self.volume['id'], 'available')
+ @attr(type='gate')
def test_get_volume_attachment(self):
# Verify that a volume's attachment information is retrieved
mountpoint = '/dev/vdc'
diff --git a/tempest/tests/volume/test_volumes_get.py b/tempest/tests/volume/test_volumes_get.py
index 8e80e18..fdaf09b 100644
--- a/tempest/tests/volume/test_volumes_get.py
+++ b/tempest/tests/volume/test_volumes_get.py
@@ -78,7 +78,7 @@
self.assertEqual(202, resp.status)
self.client.wait_for_resource_deletion(volume['id'])
- @attr(type='positive')
+ @attr(type='gate')
def test_volume_get_metadata_none(self):
# Create a volume without passing metadata, get details, and delete
try:
@@ -105,11 +105,11 @@
self.assertEqual(202, resp.status)
self.client.wait_for_resource_deletion(volume['id'])
- @attr(type='smoke')
+ @attr(type=['smoke', 'gate'])
def test_volume_create_get_delete(self):
self._volume_create_get_delete(image_ref=None)
- @attr(type='smoke')
+ @attr(type=['smoke', 'gate'])
def test_volume_from_image(self):
self._volume_create_get_delete(image_ref=self.config.compute.image_ref)
diff --git a/tempest/tests/volume/test_volumes_list.py b/tempest/tests/volume/test_volumes_list.py
index a8fedb9..2468705 100644
--- a/tempest/tests/volume/test_volumes_list.py
+++ b/tempest/tests/volume/test_volumes_list.py
@@ -76,7 +76,7 @@
cls.client.wait_for_resource_deletion(volid)
super(VolumesListTest, cls).tearDownClass()
- @attr(type='smoke')
+ @attr(type=['smoke', 'gate'])
def test_volume_list(self):
# Get a list of Volumes
# Fetch all volumes
@@ -89,7 +89,7 @@
', '.join(m_vol['display_name']
for m_vol in missing_vols))
- @attr(type='smoke')
+ @attr(type='gate')
def test_volume_list_with_details(self):
# Get a list of Volumes with details
# Fetch all Volumes
diff --git a/tempest/tests/volume/test_volumes_negative.py b/tempest/tests/volume/test_volumes_negative.py
index c7d4374..f02bb3f 100644
--- a/tempest/tests/volume/test_volumes_negative.py
+++ b/tempest/tests/volume/test_volumes_negative.py
@@ -17,6 +17,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
+from tempest.test import attr
from tempest.tests.volume import base
@@ -28,6 +29,7 @@
super(VolumesNegativeTest, cls).setUpClass()
cls.client = cls.volumes_client
+ @attr(type='gate')
def test_volume_get_nonexistant_volume_id(self):
# Should not be able to get a nonexistant volume
#Creating a nonexistant volume id
@@ -43,6 +45,7 @@
self.assertRaises(exceptions.NotFound, self.client.get_volume,
non_exist_id)
+ @attr(type='gate')
def test_volume_delete_nonexistant_volume_id(self):
# Should not be able to delete a nonexistant Volume
# Creating nonexistant volume id
@@ -58,6 +61,7 @@
self.assertRaises(exceptions.NotFound, self.client.delete_volume,
non_exist_id)
+ @attr(type='gate')
def test_create_volume_with_invalid_size(self):
# Should not be able to create volume with invalid size
# in request
@@ -66,6 +70,7 @@
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='#$%', display_name=v_name, metadata=metadata)
+ @attr(type='gate')
def test_create_volume_with_out_passing_size(self):
# Should not be able to create volume without passing size
# in request
@@ -74,6 +79,7 @@
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='', display_name=v_name, metadata=metadata)
+ @attr(type='gate')
def test_create_volume_with_size_zero(self):
# Should not be able to create volume with size zero
v_name = rand_name('Volume-')
@@ -81,20 +87,24 @@
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='0', display_name=v_name, metadata=metadata)
+ @attr(type='gate')
def test_get_invalid_volume_id(self):
# Should not be able to get volume with invalid id
self.assertRaises(exceptions.NotFound, self.client.get_volume,
'#$%%&^&^')
+ @attr(type='gate')
def test_get_volume_without_passing_volume_id(self):
# Should not be able to get volume when empty ID is passed
self.assertRaises(exceptions.NotFound, self.client.get_volume, '')
+ @attr(type='gate')
def test_delete_invalid_volume_id(self):
# Should not be able to delete volume when invalid ID is passed
self.assertRaises(exceptions.NotFound, self.client.delete_volume,
'!@#$%^&*()')
+ @attr(type='gate')
def test_delete_volume_without_passing_volume_id(self):
# Should not be able to delete volume when empty ID is passed
self.assertRaises(exceptions.NotFound, self.client.delete_volume, '')
diff --git a/tempest/tests/volume/test_volumes_snapshots.py b/tempest/tests/volume/test_volumes_snapshots.py
index ba8ba6c..edc02ac 100644
--- a/tempest/tests/volume/test_volumes_snapshots.py
+++ b/tempest/tests/volume/test_volumes_snapshots.py
@@ -37,7 +37,7 @@
def tearDownClass(cls):
super(VolumesSnapshotTest, cls).tearDownClass()
- @attr(type='smoke')
+ @attr(type=['smoke', 'gate'])
def test_snapshot_create_get_delete(self):
# Create a snapshot, get some of the details and then deletes it
resp, snapshot = self.snapshots_client.create_snapshot(
@@ -52,6 +52,7 @@
self.snapshots_client.delete_snapshot(snapshot['id'])
self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
+ @attr(type=['smoke', 'gate'])
def test_volume_from_snapshot(self):
# Create a temporary snap using wrapper method from base, then
# create a snap based volume, check resp code and deletes it
diff --git a/tempest/thirdparty/README.rst b/tempest/thirdparty/README.rst
new file mode 100644
index 0000000..41d31f3
--- /dev/null
+++ b/tempest/thirdparty/README.rst
@@ -0,0 +1,33 @@
+Tempest Guide to Third Party API tests
+========
+
+
+What are these tests?
+--------
+
+Third party tests are tests for non native OpenStack APIs that are
+part of OpenStack projects. If we ship an API, we're really required
+to ensure that it's working.
+
+An example is that Nova Compute currently has EC2 API support in tree,
+which should be tested as part of normal process.
+
+
+Why are these tests in tempest?
+--------
+
+If we ship an API in an OpenStack component, there should be tests in
+tempest to exercise it in some way.
+
+
+Scope of these tests
+--------
+
+Third party API testing should be limited to the functional testing of
+third party API compliance. Complex scenarios should be avoided, and
+instead exercised with the OpenStack API, unless the third party API
+can't be tested without those scenarios.
+
+Whenever possible third party API testing should use a client as close
+to the third party API as possible. The point of these tests is API
+validation.
diff --git a/tempest/tests/boto/__init__.py b/tempest/thirdparty/__init__.py
similarity index 100%
copy from tempest/tests/boto/__init__.py
copy to tempest/thirdparty/__init__.py
diff --git a/tempest/tests/boto/__init__.py b/tempest/thirdparty/boto/__init__.py
similarity index 100%
rename from tempest/tests/boto/__init__.py
rename to tempest/thirdparty/boto/__init__.py
diff --git a/tempest/testboto.py b/tempest/thirdparty/boto/test.py
similarity index 98%
rename from tempest/testboto.py
rename to tempest/thirdparty/boto/test.py
index 9e652cb..afa5c69 100644
--- a/tempest/testboto.py
+++ b/tempest/thirdparty/boto/test.py
@@ -32,9 +32,9 @@
import tempest.config
from tempest import exceptions
import tempest.test
-from tempest.tests.boto.utils.wait import re_search_wait
-from tempest.tests.boto.utils.wait import state_wait
-from tempest.tests.boto.utils.wait import wait_exception
+from tempest.thirdparty.boto.utils.wait import re_search_wait
+from tempest.thirdparty.boto.utils.wait import state_wait
+from tempest.thirdparty.boto.utils.wait import wait_exception
LOG = logging.getLogger(__name__)
diff --git a/tempest/tests/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
similarity index 97%
rename from tempest/tests/boto/test_ec2_instance_run.py
rename to tempest/thirdparty/boto/test_ec2_instance_run.py
index b6b93d8..bbe11d1 100644
--- a/tempest/tests/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -25,10 +25,10 @@
from tempest.common.utils.linux.remote_client import RemoteClient
from tempest import exceptions
from tempest.test import attr
-from tempest.testboto import BotoTestCase
-from tempest.tests.boto.utils.s3 import s3_upload_dir
-from tempest.tests.boto.utils.wait import re_search_wait
-from tempest.tests.boto.utils.wait import state_wait
+from tempest.thirdparty.boto.test import BotoTestCase
+from tempest.thirdparty.boto.utils.s3 import s3_upload_dir
+from tempest.thirdparty.boto.utils.wait import re_search_wait
+from tempest.thirdparty.boto.utils.wait import state_wait
LOG = logging.getLogger(__name__)
diff --git a/tempest/tests/boto/test_ec2_keys.py b/tempest/thirdparty/boto/test_ec2_keys.py
similarity index 97%
rename from tempest/tests/boto/test_ec2_keys.py
rename to tempest/thirdparty/boto/test_ec2_keys.py
index d96ee11..5304649 100644
--- a/tempest/tests/boto/test_ec2_keys.py
+++ b/tempest/thirdparty/boto/test_ec2_keys.py
@@ -20,7 +20,7 @@
from tempest import clients
from tempest.common.utils.data_utils import rand_name
from tempest.test import attr
-from tempest.testboto import BotoTestCase
+from tempest.thirdparty.boto.test import BotoTestCase
def compare_key_pairs(a, b):
diff --git a/tempest/tests/boto/test_ec2_network.py b/tempest/thirdparty/boto/test_ec2_network.py
similarity index 96%
rename from tempest/tests/boto/test_ec2_network.py
rename to tempest/thirdparty/boto/test_ec2_network.py
index ef307a1..6878df1 100644
--- a/tempest/tests/boto/test_ec2_network.py
+++ b/tempest/thirdparty/boto/test_ec2_network.py
@@ -19,7 +19,7 @@
from tempest import clients
from tempest.test import attr
-from tempest.testboto import BotoTestCase
+from tempest.thirdparty.boto.test import BotoTestCase
@attr("EC2")
diff --git a/tempest/tests/boto/test_ec2_security_groups.py b/tempest/thirdparty/boto/test_ec2_security_groups.py
similarity index 98%
rename from tempest/tests/boto/test_ec2_security_groups.py
rename to tempest/thirdparty/boto/test_ec2_security_groups.py
index dd46a91..54a94f8 100644
--- a/tempest/tests/boto/test_ec2_security_groups.py
+++ b/tempest/thirdparty/boto/test_ec2_security_groups.py
@@ -18,7 +18,7 @@
from tempest import clients
from tempest.common.utils.data_utils import rand_name
from tempest.test import attr
-from tempest.testboto import BotoTestCase
+from tempest.thirdparty.boto.test import BotoTestCase
@attr("EC2")
diff --git a/tempest/tests/boto/test_ec2_volumes.py b/tempest/thirdparty/boto/test_ec2_volumes.py
similarity index 97%
rename from tempest/tests/boto/test_ec2_volumes.py
rename to tempest/thirdparty/boto/test_ec2_volumes.py
index 37a913e..b4d763d 100644
--- a/tempest/tests/boto/test_ec2_volumes.py
+++ b/tempest/thirdparty/boto/test_ec2_volumes.py
@@ -19,7 +19,7 @@
from tempest import clients
from tempest.test import attr
-from tempest.testboto import BotoTestCase
+from tempest.thirdparty.boto.test import BotoTestCase
LOG = logging.getLogger(__name__)
diff --git a/tempest/tests/boto/test_s3_buckets.py b/tempest/thirdparty/boto/test_s3_buckets.py
similarity index 96%
rename from tempest/tests/boto/test_s3_buckets.py
rename to tempest/thirdparty/boto/test_s3_buckets.py
index 0a05ae0..3b7c5a7 100644
--- a/tempest/tests/boto/test_s3_buckets.py
+++ b/tempest/thirdparty/boto/test_s3_buckets.py
@@ -20,7 +20,7 @@
from tempest import clients
from tempest.common.utils.data_utils import rand_name
from tempest.test import attr
-from tempest.testboto import BotoTestCase
+from tempest.thirdparty.boto.test import BotoTestCase
@attr("S3")
diff --git a/tempest/tests/boto/test_s3_ec2_images.py b/tempest/thirdparty/boto/test_s3_ec2_images.py
similarity index 96%
rename from tempest/tests/boto/test_s3_ec2_images.py
rename to tempest/thirdparty/boto/test_s3_ec2_images.py
index f77743e..594f416 100644
--- a/tempest/tests/boto/test_s3_ec2_images.py
+++ b/tempest/thirdparty/boto/test_s3_ec2_images.py
@@ -22,9 +22,9 @@
from tempest import clients
from tempest.common.utils.data_utils import rand_name
from tempest.test import attr
-from tempest.testboto import BotoTestCase
-from tempest.tests.boto.utils.s3 import s3_upload_dir
-from tempest.tests.boto.utils.wait import state_wait
+from tempest.thirdparty.boto.test import BotoTestCase
+from tempest.thirdparty.boto.utils.s3 import s3_upload_dir
+from tempest.thirdparty.boto.utils.wait import state_wait
@attr("S3", "EC2")
diff --git a/tempest/tests/boto/test_s3_objects.py b/tempest/thirdparty/boto/test_s3_objects.py
similarity index 96%
rename from tempest/tests/boto/test_s3_objects.py
rename to tempest/thirdparty/boto/test_s3_objects.py
index 9d4d79c..b256bc4 100644
--- a/tempest/tests/boto/test_s3_objects.py
+++ b/tempest/thirdparty/boto/test_s3_objects.py
@@ -22,7 +22,7 @@
from tempest import clients
from tempest.common.utils.data_utils import rand_name
from tempest.test import attr
-from tempest.testboto import BotoTestCase
+from tempest.thirdparty.boto.test import BotoTestCase
@attr("S3")
diff --git a/tempest/tests/boto/utils/__init__.py b/tempest/thirdparty/boto/utils/__init__.py
similarity index 100%
rename from tempest/tests/boto/utils/__init__.py
rename to tempest/thirdparty/boto/utils/__init__.py
diff --git a/tempest/tests/boto/utils/s3.py b/tempest/thirdparty/boto/utils/s3.py
similarity index 100%
rename from tempest/tests/boto/utils/s3.py
rename to tempest/thirdparty/boto/utils/s3.py
diff --git a/tempest/tests/boto/utils/wait.py b/tempest/thirdparty/boto/utils/wait.py
similarity index 100%
rename from tempest/tests/boto/utils/wait.py
rename to tempest/thirdparty/boto/utils/wait.py
diff --git a/tempest/whitebox/README.rst b/tempest/whitebox/README.rst
new file mode 100644
index 0000000..dabf758
--- /dev/null
+++ b/tempest/whitebox/README.rst
@@ -0,0 +1,46 @@
+Tempest Guide to Whitebox tests
+========
+
+
+What are these tests?
+--------
+
+When you hit the OpenStack API, this causes internal state changes in
+the system. This might be database transitions, vm modifications,
+other deep state changes which aren't really accessible from the
+OpenStack API. These side effects are sometimes important to
+validate.
+
+White box testing is an approach there. In white box testing you are
+given database access to the environment, and can verify internal
+record changes after an API call.
+
+This is an optional part of testing, and requires extra setup, but can
+be useful for validating Tempest internals.
+
+
+Why are these tests in tempest?
+--------
+
+Especially when it comes to something like VM state changing, which is
+a coordination of numerous running daemons, and a functioning VM, it's
+very difficult to get a realistic test like this in unit tests.
+
+
+Scope of these tests
+--------
+
+White box tests should be limitted to tests where black box testing
+(using the OpenStack API to verify results) isn't sufficient.
+
+As these poke at internals of OpenStack, it should also be realized
+that these tests are very tightly coupled to current implementation of
+OpenStack. They will need to be maintained agressively to keep up with
+internals changes in OpenStack projects.
+
+
+Example of a good test
+--------
+
+Pushing VMs through a series of state transitions, and ensuring along
+the way the database state transitions match what's expected.
diff --git a/tempest/tests/boto/__init__.py b/tempest/whitebox/__init__.py
similarity index 100%
copy from tempest/tests/boto/__init__.py
copy to tempest/whitebox/__init__.py
diff --git a/tempest/whitebox.py b/tempest/whitebox/manager.py
similarity index 98%
rename from tempest/whitebox.py
rename to tempest/whitebox/manager.py
index cf9fff0..a75edb0 100644
--- a/tempest/whitebox.py
+++ b/tempest/whitebox/manager.py
@@ -27,7 +27,6 @@
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest import test
-from tempest.tests import compute
LOG = logging.getLogger(__name__)
@@ -56,12 +55,11 @@
@classmethod
def setUpClass(cls):
- if not compute.WHITEBOX_ENABLED:
+ super(ComputeWhiteboxTest, cls).setUpClass()
+ if not cls.config.whitebox.whitebox_enabled:
msg = "Whitebox testing disabled"
raise cls.skipException(msg)
- super(ComputeWhiteboxTest, cls).setUpClass()
-
# Add some convenience attributes that tests use...
cls.nova_dir = cls.config.whitebox.source_dir
cls.compute_bin_dir = cls.config.whitebox.bin_dir
diff --git a/tempest/tests/compute/images/test_images_whitebox.py b/tempest/whitebox/test_images_whitebox.py
similarity index 98%
rename from tempest/tests/compute/images/test_images_whitebox.py
rename to tempest/whitebox/test_images_whitebox.py
index 9ec05dd..304677f 100644
--- a/tempest/tests/compute/images/test_images_whitebox.py
+++ b/tempest/whitebox/test_images_whitebox.py
@@ -19,11 +19,11 @@
from tempest import exceptions
from tempest.test import attr
from tempest.tests.compute import base
-from tempest import whitebox
+from tempest.whitebox import manager
@attr(type='whitebox')
-class ImagesWhiteboxTest(whitebox.ComputeWhiteboxTest, base.BaseComputeTest):
+class ImagesWhiteboxTest(manager.ComputeWhiteboxTest, base.BaseComputeTest):
_interface = 'json'
@classmethod
diff --git a/tempest/tests/compute/servers/test_servers_whitebox.py b/tempest/whitebox/test_servers_whitebox.py
similarity index 98%
rename from tempest/tests/compute/servers/test_servers_whitebox.py
rename to tempest/whitebox/test_servers_whitebox.py
index 6b192dd..2eab393 100644
--- a/tempest/tests/compute/servers/test_servers_whitebox.py
+++ b/tempest/whitebox/test_servers_whitebox.py
@@ -18,11 +18,11 @@
from tempest import exceptions
from tempest.test import attr
from tempest.tests.identity.base import BaseIdentityAdminTest
-from tempest import whitebox
+from tempest.whitebox import manager
@attr(type='whitebox')
-class ServersWhiteboxTest(whitebox.ComputeWhiteboxTest):
+class ServersWhiteboxTest(manager.ComputeWhiteboxTest):
_interface = 'json'
@classmethod
diff --git a/tempest/tests/boto/utils/__init__.py b/tools/__init__.py
similarity index 100%
copy from tempest/tests/boto/utils/__init__.py
copy to tools/__init__.py
diff --git a/tempest/tests/boto/__init__.py b/tools/hacking/__init__.py
similarity index 100%
copy from tempest/tests/boto/__init__.py
copy to tools/hacking/__init__.py
diff --git a/tools/hacking/tempest.py b/tools/hacking/tempest.py
new file mode 100644
index 0000000..1db8419
--- /dev/null
+++ b/tools/hacking/tempest.py
@@ -0,0 +1,42 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+
+
+SKIP_DECORATOR = '@testtools.skip('
+
+
+def skip_bugs(physical_line):
+ """Check skip lines for proper bug entries
+
+ T101: Bug not in skip line
+ T102: Bug in message formatted incorrectly
+ """
+
+ pos = physical_line.find(SKIP_DECORATOR)
+
+ skip_re = re.compile(r'^\s*@testtools.skip.*')
+
+ if pos != -1 and skip_re.match(physical_line):
+ bug = re.compile(r'^.*\bbug\b.*', re.IGNORECASE)
+ if bug.match(physical_line) is None:
+ return (pos, 'T101: skips must have an associated bug')
+
+ bug_re = re.compile(r'.*skip\(.*Bug\s\#\d+', re.IGNORECASE)
+
+ if bug_re.match(physical_line) is None:
+ return (pos, 'T102: Bug number formatted incorrectly')
diff --git a/tox.ini b/tox.ini
index 565a9ad..2449c86 100644
--- a/tox.ini
+++ b/tox.ini
@@ -20,8 +20,7 @@
NOSE_OPENSTACK_SHOW_ELAPSED=1
NOSE_OPENSTACK_STDOUT=1
commands =
- nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest
- nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-cli.xml -sv cli
+ nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest/tests tempest/scenario tempest/thirdparty tempest/cli
[testenv:smoke]
sitepackages = True
@@ -47,8 +46,7 @@
NOSE_OPENSTACK_STDOUT=1
commands =
python -m tools/tempest_coverage -c start --combine
- nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest
- nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-cli.xml -sv cli
+ nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest/tests tempest/scenario tempest/thirdparty tempest/cli
python -m tools/tempest_coverage -c report --html
[testenv:pep8]
@@ -56,6 +54,9 @@
deps = -r{toxinidir}/tools/pip-requires
-r{toxinidir}/tools/test-requires
+[hacking]
+local-check = tools.hacking.tempest.skip_bugs
+
[flake8]
ignore = E125,H302,H404
show-source = True