Merge "Adding extra_specs to flavor format"
diff --git a/.testr.conf b/.testr.conf
index a0262d8..510f4c9 100644
--- a/.testr.conf
+++ b/.testr.conf
@@ -1,4 +1,8 @@
[DEFAULT]
-test_command=${PYTHON:-python} -m subunit.run discover -t ./ ./tempest $LISTOPT $IDOPTION
+test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
+ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
+ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-250} \
+ ${PYTHON:-python} -m subunit.run discover -t ./ ./tempest $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list
+group_regex=([^\.]*\.)*
diff --git a/HACKING.rst b/HACKING.rst
index d69f935..1eb2d4f 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -1,6 +1,25 @@
Tempest Coding Guide
====================
+- Step 1: Read the OpenStack Style Commandments
+ https://github.com/openstack-dev/hacking/blob/master/HACKING.rst
+- Step 2: Read on
+
+Tempest Specific Commandments
+------------------------------
+
+[T101] If a test is broken because of a bug it is appropriate to skip the test until
+bug has been fixed. However, the skip message should be formatted so that
+Tempest's skip tracking tool can watch the bug status. The skip message should
+contain the string 'Bug' immediately followed by a space. Then the bug number
+should be included in the message '#' in front of the number.
+
+Example::
+
+ @testtools.skip("Skipped until the Bug #980688 is resolved")
+
+- [T102] Cannot import OpenStack python clients in tempest/api tests
+- [T103] tempest/tests is deprecated
Test Data/Configuration
-----------------------
@@ -10,157 +29,10 @@
- Use configuration files for values that will vary by environment
-General
--------
-- Put two newlines between top-level code (funcs, classes, etc)
-- Put one newline between methods in classes and anywhere else
-- Long lines should be wrapped in parentheses
- in preference to using a backslash for line continuation.
-- Do not write "except:", use "except Exception:" at the very least
-- Include your name with TODOs as in "#TODO(termie)"
-- Do not name anything the same name as a built-in or reserved word Example::
-
- def list():
- return [1, 2, 3]
-
- mylist = list() # BAD, shadows `list` built-in
-
- class Foo(object):
- def list(self):
- return [1, 2, 3]
-
- mylist = Foo().list() # OKAY, does not shadow built-in
-
-Imports
--------
-- Do not import objects, only modules (*)
-- Do not import more than one module per line (*)
-- Do not make relative imports
-- Order your imports by the full module path
-- Organize your imports according to the following template
-
-Example::
-
- # vim: tabstop=4 shiftwidth=4 softtabstop=4
- {{stdlib imports in human alphabetical order}}
- \n
- {{third-party lib imports in human alphabetical order}}
- \n
- {{tempest imports in human alphabetical order}}
- \n
- \n
- {{begin your code}}
-
-
-Human Alphabetical Order Examples
----------------------------------
-Example::
-
- import httplib
- import logging
- import random
- import StringIO
- import testtools
- import time
-
- import eventlet
- import webob.exc
-
- import tempest.config
- from tempest.services.compute.json.limits_client import LimitsClientJSON
- from tempest.services.compute.xml.limits_client import LimitsClientXML
- from tempest.services.volume.volumes_client import VolumesClientJSON
- import tempest.test
-
-
-Docstrings
-----------
-Example::
-
- """A one line docstring looks like this and ends in a period."""
-
-
- """A multi line docstring has a one-line summary, less than 80 characters.
-
- Then a new paragraph after a newline that explains in more detail any
- general information about the function, class or method. Example usages
- are also great to have here if it is a complex class for function.
-
- When writing the docstring for a class, an extra line should be placed
- after the closing quotations. For more in-depth explanations for these
- decisions see http://www.python.org/dev/peps/pep-0257/
-
- If you are going to describe parameters and return values, use Sphinx, the
- appropriate syntax is as follows.
-
- :param foo: the foo parameter
- :param bar: the bar parameter
- :returns: return_type -- description of the return value
- :returns: description of the return value
- :raises: AttributeError, KeyError
- """
-
-
-Dictionaries/Lists
-------------------
-If a dictionary (dict) or list object is longer than 80 characters, its items
-should be split with newlines. Embedded iterables should have their items
-indented. Additionally, the last item in the dictionary should have a trailing
-comma. This increases readability and simplifies future diffs.
-
-Example::
-
- my_dictionary = {
- "image": {
- "name": "Just a Snapshot",
- "size": 2749573,
- "properties": {
- "user_id": 12,
- "arch": "x86_64",
- },
- "things": [
- "thing_one",
- "thing_two",
- ],
- "status": "ACTIVE",
- },
- }
-
-
-Calling Methods
----------------
-Calls to methods 80 characters or longer should format each argument with
-newlines. This is not a requirement, but a guideline::
-
- unnecessarily_long_function_name('string one',
- 'string two',
- kwarg1=constants.ACTIVE,
- kwarg2=['a', 'b', 'c'])
-
-
-Rather than constructing parameters inline, it is better to break things up::
-
- list_of_strings = [
- 'what_a_long_string',
- 'not as long',
- ]
-
- dict_of_numbers = {
- 'one': 1,
- 'two': 2,
- 'twenty four': 24,
- }
-
- object_one.call_a_method('string three',
- 'string four',
- kwarg1=list_of_strings,
- kwarg2=dict_of_numbers)
-
-
Exception Handling
------------------
According to the ``The Zen of Python`` the
- ``Errors should never pass silently.``
+``Errors should never pass silently.``
Tempest usually runs in special environment (jenkins gate jobs), in every
error or failure situation we should provide as much error related
information as possible, because we usually do not have the chance to
@@ -185,6 +57,10 @@
exception at least logged. When the exception is logged you usually need
to ``raise`` the same or a different exception anyway.
+Use of ``self.addCleanup`` is often a good way to avoid having to catch
+exceptions and still ensure resources are correctly cleaned up if the
+test fails part way through.
+
Use the ``self.assert*`` methods provided by the unit test framework
the signal failures early.
@@ -202,72 +78,10 @@
This and the service logs are your only guide to find the root cause of flaky
issue.
-
-Test Skips
+Guidelines
----------
-If a test is broken because of a bug it is appropriate to skip the test until
-bug has been fixed. However, the skip message should be formatted so that
-Tempest's skip tracking tool can watch the bug status. The skip message should
-contain the string 'Bug' immediately followed by a space. Then the bug number
-should be included in the message '#' in front of the number.
-
-Example::
-
- @testtools.skip("Skipped until the Bug #980688 is resolved")
-
-
-openstack-common
-----------------
-
-A number of modules from openstack-common are imported into the project.
-
-These modules are "incubating" in openstack-common and are kept in sync
-with the help of openstack-common's update.py script. See:
-
- http://wiki.openstack.org/CommonLibrary#Incubation
-
-The copy of the code should never be directly modified here. Please
-always update openstack-common first and then run the script to copy
-the changes across.
-
-
-OpenStack Trademark
--------------------
-
-OpenStack is a registered trademark of the OpenStack Foundation, and uses the
-following capitalization:
-
- OpenStack
-
-
-Commit Messages
----------------
-Using a common format for commit messages will help keep our git history
-readable. Follow these guidelines:
-
- First, provide a brief summary (it is recommended to keep the commit title
- under 50 chars).
-
- The first line of the commit message should provide an accurate
- description of the change, not just a reference to a bug or
- blueprint. It must be followed by a single blank line.
-
- If the change relates to a specific driver (libvirt, xenapi, qpid, etc...),
- begin the first line of the commit message with the driver name, lowercased,
- followed by a colon.
-
- Following your brief summary, provide a more detailed description of
- the patch, manually wrapping the text at 72 characters. This
- description should provide enough detail that one does not have to
- refer to external resources to determine its high-level functionality.
-
- Once you use 'git review', two lines will be appended to the commit
- message: a blank line followed by a 'Change-Id'. This is important
- to correlate this commit with a specific review in Gerrit, and it
- should not be modified.
-
-For further information on constructing high quality commit messages,
-and how to split up commits into a series of changes, consult the
-project wiki:
-
- http://wiki.openstack.org/GitCommitMessages
+- Do not submit changesets with only testcases which are skipped as
+ they will not be merged.
+- Consistently check the status code of responses in testcases. The
+ earlier a problem is detected the easier it is to debug, especially
+ where there is complicated setup required.
diff --git a/README.rst b/README.rst
index da0f5f3..f18628a 100644
--- a/README.rst
+++ b/README.rst
@@ -1,5 +1,3 @@
-::
-
Tempest - The OpenStack Integration Test Suite
==============================================
@@ -37,9 +35,11 @@
Tempest is not tied to any single test runner, but Nose been the most commonly
used tool. After setting up your configuration file, you can execute
the set of Tempest tests by using ``nosetests`` ::
+
$> nosetests tempest
To run one single test ::
+
$> nosetests -sv tempest.api.compute.servers.test_server_actions.py:
ServerActionsTestJSON.test_rebuild_nonexistent_server
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f012097..00c4e9a 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -33,6 +33,13 @@
field_guide/thirdparty
field_guide/whitebox
+------------------
+API and test cases
+------------------
+.. toctree::
+ :maxdepth: 1
+
+ api/modules
==================
Indices and tables
diff --git a/etc/logging.conf.sample b/etc/logging.conf.sample
index 685dd36..3b468f1 100644
--- a/etc/logging.conf.sample
+++ b/etc/logging.conf.sample
@@ -1,5 +1,5 @@
[loggers]
-keys=root,tempest
+keys=root,tempest,tempest_stress
[handlers]
keys=file,syslog,devel
@@ -16,6 +16,11 @@
handlers=file
qualname=tempest
+[logger_tempest_stress]
+level=INFO
+handlers=file,devel
+qualname=tempest.stress
+
[handler_file]
class=FileHandler
level=DEBUG
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 12aa399..f1aaa07 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -1,3 +1,17 @@
+[DEFAULT]
+# log_config = /opt/stack/tempest/etc/logging.conf.sample
+
+# disable logging to the stderr
+use_stderr = False
+
+# log file
+log_file = tempest.log
+
+# lock/semaphore base directory
+lock_path=/tmp
+
+default_log_levels=tempest.stress=INFO,amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN
+
[identity]
# This section contains configuration options that a variety of Tempest
# test clients use when authenticating with different user/tenant
@@ -86,17 +100,23 @@
fixed_network_name = private
# Network id used for SSH (public, private, etc)
-network_for_ssh = private
+network_for_ssh = public
# IP version of the address used for SSH
ip_version_for_ssh = 4
+# Number of seconds to wait to ping to an instance
+ping_timeout = 60
+
# Number of seconds to wait to authenticate to an instance
ssh_timeout = 300
# Number of seconds to wait for output from ssh channel
ssh_channel_timeout = 60
+# Dose the SSH uses Floating IP?
+use_floatingip_for_ssh = True
+
# The type of endpoint for a Compute API service. Unless you have a
# custom Keystone service catalog implementation, you probably want to leave
# this value as "compute"
@@ -206,8 +226,6 @@
# for each tenant to have their own router.
public_router_id = {$PUBLIC_ROUTER_ID}
-# Whether or not neutron is expected to be available
-neutron_available = false
[volume]
# This section contains the configuration options used when executing tests
@@ -251,6 +269,8 @@
# Number of seconds to wait while looping to check the status of a
# container to container synchronization
container_sync_interval = 5
+# Set to True if the Account Quota middleware is enabled
+accounts_quotas_available = True
[boto]
# This section contains configuration options used when executing tests
@@ -305,9 +325,6 @@
# tests spawn full VMs, which could be slow if the test is already in a VM.
build_timeout = 300
-# Whether or not Heat is expected to be available
-heat_available = false
-
# Instance type for tests. Needs to be big enough for a
# full OS plus the test workload
instance_type = m1.micro
@@ -320,6 +337,13 @@
# any key, which will generate a keypair for each test class
#keypair_name = heat_key
+[dashboard]
+# URL where to find the dashboard home page
+dashboard_url = 'http://localhost/'
+
+# URL where to submit the login form
+login_url = 'http://localhost/auth/login/'
+
[scenario]
# Directory containing image files
img_dir = /opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec
@@ -336,8 +360,30 @@
# ssh username for the image file
ssh_user = cirros
+# specifies how many resources to request at once. Used for large operations
+# testing."
+large_ops_number = 0
+
[cli]
# Enable cli tests
enabled = True
# directory where python client binaries are located
cli_dir = /usr/local/bin
+# Number of seconds to wait on a CLI timeout
+timeout = 15
+
+[service_available]
+# Whether or not cinder is expected to be available
+cinder = True
+# Whether or not neutron is expected to be available
+neutron = false
+# Whether or not glance is expected to be available
+glance = True
+# Whether or not swift is expected to be available
+swift = True
+# Whether or not nova is expected to be available
+nova = True
+# Whether or not Heat is expected to be available
+heat = false
+# Whether or not horizon is expected to be available
+horizon = True
diff --git a/openstack-common.conf b/openstack-common.conf
index 24af119..ff84404 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,10 @@
[DEFAULT]
# The list of modules to copy from openstack-common
-modules=install_venv_common
+module=install_venv_common
+module=lockutils
+module=log
+module=importlib
# The base module to hold the copy of openstack.common
base=tempest
diff --git a/requirements.txt b/requirements.txt
index 06aa9f3..cc61b01 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -19,3 +19,4 @@
oslo.config>=1.1.0
# Needed for whitebox testing
sqlalchemy
+eventlet>=0.12.0
diff --git a/run_tests.sh b/run_tests.sh
index 366564e..f8636c1 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -11,21 +11,22 @@
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -s, --smoke Only run smoke tests"
echo " -w, --whitebox Only run whitebox tests"
+ echo " -t, --parallel Run testr parallel"
echo " -c, --nova-coverage Enable Nova coverage collection"
echo " -C, --config Config file location"
echo " -p, --pep8 Just run pep8"
echo " -h, --help Print this usage message"
echo " -d, --debug Debug this script -- set -o xtrace"
- echo " -S, --stdout Don't capture stdout"
echo " -l, --logging Enable logging"
echo " -L, --logging-config Logging config file location. Default is etc/logging.conf"
- echo " -- [NOSEOPTIONS] After the first '--' you can pass arbitrary arguments to nosetests "
+ echo " -- [TESTROPTIONS] After the first '--' you can pass arbitrary arguments to testr "
}
-noseargs=""
+testrargs=""
just_pep8=0
venv=.venv
with_venv=tools/with_venv.sh
+parallel=0
always_venv=0
never_venv=0
no_site_packages=0
@@ -37,7 +38,7 @@
logging=0
logging_config=etc/logging.conf
-if ! options=$(getopt -o VNnfuswcphdSC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,whitebox,nova-coverage,pep8,help,debug,stdout,config:,logging,logging-config: -- "$@")
+if ! options=$(getopt -o VNnfuswtcphdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,whitebox,parallel,nova-coverage,pep8,help,debug,config:,logging,logging-config: -- "$@")
then
# parse error
usage
@@ -58,13 +59,13 @@
-c|--nova-coverage) let nova_coverage=1;;
-C|--config) config_file=$2; shift;;
-p|--pep8) let just_pep8=1;;
- -s|--smoke) noseargs="$noseargs --attr=type=smoke";;
- -w|--whitebox) noseargs="$noseargs --attr=type=whitebox";;
- -S|--stdout) noseargs="$noseargs -s";;
+ -s|--smoke) testrargs="$testrargs smoke";;
+ -w|--whitebox) testrargs="$testrargs whitebox";;
+ -t|--parallel) parallel=1;;
-l|--logging) logging=1;;
-L|--logging-config) logging_config=$2; shift;;
- --) [ "yes" == "$first_uu" ] || noseargs="$noseargs $1"; first_uu=no ;;
- *) noseargs="$noseargs $1"
+ --) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no ;;
+ *) testrargs="$testrargs $1"
esac
shift
done
@@ -87,26 +88,24 @@
cd `dirname "$0"`
-export NOSE_WITH_OPENSTACK=1
-export NOSE_OPENSTACK_COLOR=1
-export NOSE_OPENSTACK_RED=15.00
-export NOSE_OPENSTACK_YELLOW=3.00
-export NOSE_OPENSTACK_SHOW_ELAPSED=1
-export NOSE_OPENSTACK_STDOUT=1
-
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
-# only add tempest default if we don't specify a test
-if [[ "x$noseargs" =~ "tempest" ]]; then
- noseargs="$noseargs"
-else
- noseargs="$noseargs tempest"
-fi
+function testr_init {
+ if [ ! -d .testrepository ]; then
+ ${wrapper} testr init
+ fi
+}
function run_tests {
- ${wrapper} $NOSETESTS
+ testr_init
+ ${wrapper} find . -type f -name "*.pyc" -delete
+ if [ $parallel -eq 1 ]; then
+ ${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
+ else
+ ${wrapper} testr run --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
+ fi
}
function run_pep8 {
@@ -124,8 +123,6 @@
${wrapper} python tools/tempest_coverage.py -c report
}
-NOSETESTS="nosetests $noseargs"
-
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
@@ -172,7 +169,7 @@
run_coverage_report
fi
-if [ -z "$noseargs" ]; then
+if [ -z "$testrargs" ]; then
run_pep8
fi
diff --git a/setup.cfg b/setup.cfg
index 3b13b60..7cfc4ce 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -35,3 +35,6 @@
# coverage http://pypi.python.org/pypi/coverage
# openstack-nose https://github.com/openstack-dev/openstack-nose
verbosity=2
+
+[pbr]
+autodoc_tree_index_modules=true
diff --git a/tempest/README.rst b/tempest/README.rst
index 8f07a07..33021c8 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -1,6 +1,6 @@
-============
+============================
Tempest Field Guide Overview
-============
+============================
Tempest is designed to be useful for a large number of different
environments. This includes being useful for gating commits to
@@ -26,7 +26,7 @@
api
-------------
+---
API tests are validation tests for the OpenStack API. They should not
use the existing python clients for OpenStack, but should instead use
@@ -41,7 +41,7 @@
cli
-------------
+---
CLI tests use the openstack CLI to interact with the OpenStack
cloud. CLI testing in unit tests is somewhat difficult because unlike
@@ -51,7 +51,7 @@
scenario
-------------
+--------
Scenario tests are complex "through path" tests for OpenStack
functionality. They are typically a series of steps where complicated
@@ -61,7 +61,7 @@
stress
------------
+------
Stress tests are designed to stress an OpenStack environment by
running a high workload against it and seeing what breaks. Tools may
@@ -72,7 +72,7 @@
thirdparty
-------------
+----------
Many openstack components include 3rdparty API support. It is
completely legitimate for Tempest to include tests of 3rdparty APIs,
@@ -81,7 +81,7 @@
whitebox
-----------
+--------
Whitebox tests are tests which require access to the database of the
target OpenStack machine to verify internal state after operations
diff --git a/tempest/api/compute/__init__.py b/tempest/api/compute/__init__.py
index fb96b4a..fd26081 100644
--- a/tempest/api/compute/__init__.py
+++ b/tempest/api/compute/__init__.py
@@ -15,9 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
from tempest import config
from tempest.exceptions import InvalidConfiguration
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index b66bd7e..303bc0c 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -18,6 +18,7 @@
from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
+from tempest.openstack.common import lockutils
from tempest.test import attr
@@ -145,6 +146,7 @@
self.client.get_aggregate, -1)
@attr(type='gate')
+ @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_remove_host(self):
# Add an host to the given aggregate and remove.
aggregate_name = rand_name(self.aggregate_name_prefix)
@@ -166,6 +168,7 @@
self.assertNotIn(self.host, body['hosts'])
@attr(type='gate')
+ @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_host_list(self):
# Add an host to the given aggregate and list.
aggregate_name = rand_name(self.aggregate_name_prefix)
@@ -183,6 +186,7 @@
self.assertIn(self.host, agg['hosts'])
@attr(type='gate')
+ @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_host_get_details(self):
# Add an host to the given aggregate and get details.
aggregate_name = rand_name(self.aggregate_name_prefix)
@@ -197,6 +201,7 @@
self.assertIn(self.host, body['hosts'])
@attr(type='gate')
+ @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_host_create_server_with_az(self):
# Add an host to the given aggregate and create a server.
aggregate_name = rand_name(self.aggregate_name_prefix)
@@ -205,7 +210,6 @@
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], self.host)
self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
-
server_name = rand_name('test_server_')
servers_client = self.servers_client
admin_servers_client = self.os_adm.servers_client
@@ -244,6 +248,7 @@
aggregate['id'], self.host)
@attr(type=['negative', 'gate'])
+ @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_remove_host_as_user(self):
# Regular user is not allowed to remove a host from an aggregate.
aggregate_name = rand_name(self.aggregate_name_prefix)
diff --git a/tempest/api/compute/admin/test_fixed_ips.py b/tempest/api/compute/admin/test_fixed_ips.py
index 2eaf3b0..8b96370 100644
--- a/tempest/api/compute/admin/test_fixed_ips.py
+++ b/tempest/api/compute/admin/test_fixed_ips.py
@@ -56,7 +56,7 @@
CONF = config.TempestConfig()
- @testtools.skipIf(CONF.network.neutron_available, "This feature is not" +
+ @testtools.skipIf(CONF.service_available.neutron, "This feature is not" +
"implemented by Neutron. See bug: #1194569")
@attr(type='gate')
def test_list_fixed_ip_details(self):
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 6db20f9..6d0a5b5 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -60,7 +60,7 @@
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- #Create the flavor
+ # Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
@@ -87,7 +87,7 @@
if self._interface == "json":
self.assertEqual(flavor['os-flavor-access:is_public'], True)
- #Verify flavor is retrieved
+ # Verify flavor is retrieved
resp, flavor = self.client.get_flavor_details(new_flavor_id)
self.assertEqual(resp.status, 200)
self.assertEqual(flavor['name'], flavor_name)
@@ -99,7 +99,7 @@
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- #Create the flavor
+ # Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
@@ -109,7 +109,7 @@
rxtx=self.rxtx)
self.addCleanup(self.flavor_clean_up, flavor['id'])
flag = False
- #Verify flavor is retrieved
+ # Verify flavor is retrieved
resp, flavors = self.client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
@@ -153,12 +153,12 @@
@attr(type='gate')
def test_create_list_flavor_without_extra_data(self):
- #Create a flavor and ensure it is listed
- #This operation requires the user to have 'admin' role
+ # Create a flavor and ensure it is listed
+ # This operation requires the user to have 'admin' role
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- #Create the flavor
+ # Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
@@ -181,11 +181,11 @@
if self._interface == "json":
self.assertEqual(flavor['os-flavor-access:is_public'], True)
- #Verify flavor is retrieved
+ # Verify flavor is retrieved
resp, flavor = self.client.get_flavor_details(new_flavor_id)
self.assertEqual(resp.status, 200)
self.assertEqual(flavor['name'], flavor_name)
- #Check if flavor is present in list
+ # Check if flavor is present in list
resp, flavors = self.client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
@@ -195,13 +195,13 @@
@attr(type='gate')
def test_flavor_not_public_verify_entry_not_in_list_details(self):
- #Create a flavor with os-flavor-access:is_public false should not
- #be present in list_details.
- #This operation requires the user to have 'admin' role
+ # Create a flavor with os-flavor-access:is_public false should not
+ # be present in list_details.
+ # This operation requires the user to have 'admin' role
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- #Create the flavor
+ # Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
@@ -209,7 +209,7 @@
is_public="False")
self.addCleanup(self.flavor_clean_up, flavor['id'])
flag = False
- #Verify flavor is retrieved
+ # Verify flavor is retrieved
resp, flavors = self.client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
@@ -219,12 +219,12 @@
@attr(type='gate')
def test_list_public_flavor_with_other_user(self):
- #Create a Flavor with public access.
- #Try to List/Get flavor with another user
+ # Create a Flavor with public access.
+ # Try to List/Get flavor with another user
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- #Create the flavor
+ # Create the flavor
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
@@ -233,7 +233,7 @@
self.addCleanup(self.flavor_clean_up, flavor['id'])
flag = False
self.new_client = self.flavors_client
- #Verify flavor is retrieved with new user
+ # Verify flavor is retrieved with new user
resp, flavors = self.new_client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
diff --git a/tempest/api/compute/admin/test_flavors_access.py b/tempest/api/compute/admin/test_flavors_access.py
index 63d5025..107b23d 100644
--- a/tempest/api/compute/admin/test_flavors_access.py
+++ b/tempest/api/compute/admin/test_flavors_access.py
@@ -52,7 +52,7 @@
@attr(type='gate')
def test_flavor_access_add_remove(self):
- #Test to add and remove flavor access to a given tenant.
+ # Test to add and remove flavor access to a given tenant.
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
@@ -61,7 +61,7 @@
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
- #Add flavor access to a tenant.
+ # Add flavor access to a tenant.
resp_body = {
"tenant_id": str(self.tenant_id),
"flavor_id": str(new_flavor['id']),
@@ -71,25 +71,25 @@
self.assertEqual(add_resp.status, 200)
self.assertIn(resp_body, add_body)
- #The flavor is present in list.
+ # The flavor is present in list.
resp, flavors = self.flavors_client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
self.assertIn(new_flavor['id'], map(lambda x: x['id'], flavors))
- #Remove flavor access from a tenant.
+ # Remove flavor access from a tenant.
remove_resp, remove_body = \
self.client.remove_flavor_access(new_flavor['id'], self.tenant_id)
self.assertEqual(remove_resp.status, 200)
self.assertNotIn(resp_body, remove_body)
- #The flavor is not present in list.
+ # The flavor is not present in list.
resp, flavors = self.flavors_client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
self.assertNotIn(new_flavor['id'], map(lambda x: x['id'], flavors))
@attr(type=['negative', 'gate'])
def test_flavor_non_admin_add(self):
- #Test to add flavor access as a user without admin privileges.
+ # Test to add flavor access as a user without admin privileges.
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
@@ -105,7 +105,7 @@
@attr(type=['negative', 'gate'])
def test_flavor_non_admin_remove(self):
- #Test to remove flavor access as a user without admin privileges.
+ # Test to remove flavor access as a user without admin privileges.
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
@@ -114,7 +114,7 @@
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
- #Add flavor access to a tenant.
+ # Add flavor access to a tenant.
self.client.add_flavor_access(new_flavor['id'], self.tenant_id)
self.addCleanup(self.client.remove_flavor_access,
new_flavor['id'], self.tenant_id)
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index db376b5..7b79a12 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -47,7 +47,7 @@
cls.new_flavor_id = 12345
swap = 1024
rxtx = 1
- #Create a flavor so as to set/get/unset extra specs
+ # Create a flavor so as to set/get/unset extra specs
resp, cls.flavor = cls.client.create_flavor(flavor_name,
ram, vcpus,
disk,
@@ -62,28 +62,28 @@
@attr(type='gate')
def test_flavor_set_get_unset_keys(self):
- #Test to SET, GET UNSET flavor extra spec as a user
- #with admin privileges.
- #Assigning extra specs values that are to be set
+ # Test to SET, GET UNSET flavor extra spec as a user
+ # with admin privileges.
+ # Assigning extra specs values that are to be set
specs = {"key1": "value1", "key2": "value2"}
- #SET extra specs to the flavor created in setUp
+ # SET extra specs to the flavor created in setUp
set_resp, set_body = \
self.client.set_flavor_extra_spec(self.flavor['id'], specs)
self.assertEqual(set_resp.status, 200)
self.assertEqual(set_body, specs)
- #GET extra specs and verify
+ # GET extra specs and verify
get_resp, get_body = \
self.client.get_flavor_extra_spec(self.flavor['id'])
self.assertEqual(get_resp.status, 200)
self.assertEqual(get_body, specs)
- #UNSET extra specs that were set in this test
+ # UNSET extra specs that were set in this test
unset_resp, _ = \
self.client.unset_flavor_extra_spec(self.flavor['id'], "key1")
self.assertEqual(unset_resp.status, 200)
@attr(type=['negative', 'gate'])
def test_flavor_non_admin_set_keys(self):
- #Test to SET flavor extra spec as a user without admin privileges.
+ # Test to SET flavor extra spec as a user without admin privileges.
specs = {"key1": "value1", "key2": "value2"}
self.assertRaises(exceptions.Unauthorized,
self.flavors_client.set_flavor_extra_spec,
diff --git a/tempest/api/compute/admin/test_hosts.py b/tempest/api/compute/admin/test_hosts.py
index a47e6c9..849cebb 100644
--- a/tempest/api/compute/admin/test_hosts.py
+++ b/tempest/api/compute/admin/test_hosts.py
@@ -48,7 +48,7 @@
resp, hosts = self.client.list_hosts(params)
self.assertEqual(200, resp.status)
self.assertTrue(len(hosts) >= 1)
- self.assertTrue(host in hosts)
+ self.assertIn(host, hosts)
@attr(type='negative')
def test_list_hosts_with_non_existent_zone(self):
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index a6b4e31..156274d 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -36,21 +36,22 @@
resp, tenants = cls.identity_admin_client.list_tenants()
- #NOTE(afazekas): these test cases should always create and use a new
+ # NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
if cls.config.compute.allow_tenant_isolation:
- cls.demo_tenant_id = cls.isolated_creds[0][0]['tenantId']
+ cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
+ 'tenantId')
else:
cls.demo_tenant_id = [tnt['id'] for tnt in tenants if tnt['name']
== cls.config.identity.tenant_name][0]
- cls.default_quota_set = {'injected_file_content_bytes': 10240,
- 'metadata_items': 128, 'injected_files': 5,
- 'ram': 51200, 'floating_ips': 10,
- 'fixed_ips': -1, 'key_pairs': 100,
- 'injected_file_path_bytes': 255,
- 'instances': 10, 'security_group_rules': 20,
- 'cores': 20, 'security_groups': 10}
+ cls.default_quota_set = set(('injected_file_content_bytes',
+ 'metadata_items', 'injected_files',
+ 'ram', 'floating_ips',
+ 'fixed_ips', 'key_pairs',
+ 'injected_file_path_bytes',
+ 'instances', 'security_group_rules',
+ 'cores', 'security_groups'))
@classmethod
def tearDownClass(cls):
@@ -64,12 +65,13 @@
@attr(type='smoke')
def test_get_default_quotas(self):
# Admin can get the default resource quota set for a tenant
- expected_quota_set = self.default_quota_set.copy()
- expected_quota_set['id'] = self.demo_tenant_id
+ expected_quota_set = self.default_quota_set | set(['id'])
resp, quota_set = self.client.get_default_quota_set(
self.demo_tenant_id)
self.assertEqual(200, resp.status)
- self.assertEqual(expected_quota_set, quota_set)
+ self.assertEqual(sorted(expected_quota_set),
+ sorted(quota_set.keys()))
+ self.assertEqual(quota_set['id'], self.demo_tenant_id)
@testtools.skip("Skipped until the Bug #1160749 is resolved")
@attr(type='gate')
@@ -82,47 +84,36 @@
'key_pairs': 200, 'injected_file_path_bytes': 512,
'instances': 20, 'security_group_rules': 20,
'cores': 2, 'security_groups': 20}
- try:
- # Update limits for all quota resources
- resp, quota_set = self.adm_client.update_quota_set(
- self.demo_tenant_id,
- **new_quota_set)
- self.addCleanup(self.adm_client.update_quota_set,
- self.demo_tenant_id, **self.default_quota_set)
- self.assertEqual(200, resp.status)
- self.assertEqual(new_quota_set, quota_set)
- except Exception:
- self.fail("Admin could not update quota set for the tenant")
- finally:
- # Reset quota resource limits to default values
- resp, quota_set = self.adm_client.update_quota_set(
- self.demo_tenant_id,
- **self.default_quota_set)
- self.assertEqual(200, resp.status, "Failed to reset quota "
- "defaults")
+ # Update limits for all quota resources
+ resp, quota_set = self.adm_client.update_quota_set(
+ self.demo_tenant_id,
+ **new_quota_set)
+ self.addCleanup(self.adm_client.update_quota_set,
+ self.demo_tenant_id, **self.default_quota_set)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(new_quota_set, quota_set)
- #TODO(afazekas): merge these test cases
+ # TODO(afazekas): merge these test cases
@attr(type='gate')
def test_get_updated_quotas(self):
# Verify that GET shows the updated quota set
- self.adm_client.update_quota_set(self.demo_tenant_id,
- ram='5120')
- self.addCleanup(self.adm_client.update_quota_set,
- self.demo_tenant_id, **self.default_quota_set)
- try:
- resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
- self.assertEqual(200, resp.status)
- self.assertEqual(quota_set['ram'], 5120)
- except Exception:
- self.fail("Could not get the update quota limit for resource")
- finally:
- # Reset quota resource limits to default values
- resp, quota_set = self.adm_client.update_quota_set(
- self.demo_tenant_id,
- **self.default_quota_set)
- self.assertEqual(200, resp.status, "Failed to reset quota "
- "defaults")
+ tenant_name = rand_name('cpu_quota_tenant_')
+ tenant_desc = tenant_name + '-desc'
+ identity_client = self.os_adm.identity_client
+ _, tenant = identity_client.create_tenant(name=tenant_name,
+ description=tenant_desc)
+ tenant_id = tenant['id']
+ self.addCleanup(identity_client.delete_tenant,
+ tenant_id)
+ self.adm_client.update_quota_set(tenant_id,
+ ram='5120')
+ resp, quota_set = self.adm_client.get_quota_set(tenant_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(quota_set['ram'], 5120)
+
+ # TODO(afazekas): Add dedicated tenant to the skiped quota tests
+ # it can be moved into the setUpClass as well
@testtools.skip("Skipped until the Bug #1160749 is resolved")
@attr(type='gate')
def test_create_server_when_cpu_quota_is_full(self):
@@ -155,12 +146,12 @@
ram=default_mem_quota)
self.assertRaises(exceptions.OverLimit, self.create_server)
-#TODO(afazekas): Add test that tried to update the quota_set as a regular user
+# TODO(afazekas): Add test that tried to update the quota_set as a regular user
@testtools.skip("Skipped until the Bug #1160749 is resolved")
@attr(type=['negative', 'gate'])
def test_create_server_when_instances_quota_is_full(self):
- #Once instances quota limit is reached, disallow server creation
+ # Once instances quota limit is reached, disallow server creation
resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
default_instances_quota = quota_set['instances']
instances_quota = 0 # Set quota to zero to disallow server creation
diff --git a/tempest/api/compute/admin/test_services.py b/tempest/api/compute/admin/test_services.py
index 78dac21..434ea2f 100644
--- a/tempest/api/compute/admin/test_services.py
+++ b/tempest/api/compute/admin/test_services.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 NEC Corporation
+# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -36,17 +37,99 @@
@attr(type='gate')
def test_list_services(self):
- # List Compute services
resp, services = self.client.list_services()
self.assertEqual(200, resp.status)
- self.assertTrue(len(services) >= 2)
+ self.assertNotEqual(0, len(services))
@attr(type=['negative', 'gate'])
def test_list_services_with_non_admin_user(self):
- # List Compute service with non admin user
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.list_services)
+ @attr(type='gate')
+ def test_get_service_by_service_binary_name(self):
+ binary_name = 'nova-compute'
+ params = {'binary': binary_name}
+ resp, services = self.client.list_services(params)
+ self.assertEqual(200, resp.status)
+ self.assertNotEqual(0, len(services))
+ for service in services:
+ self.assertEqual(binary_name, service['binary'])
+
+ @attr(type='gate')
+ def test_get_service_by_host_name(self):
+ resp, services = self.client.list_services()
+ host_name = services[0]['host']
+ services_on_host = [service for service in services if
+ service['host'] == host_name]
+ params = {'host': host_name}
+ resp, services = self.client.list_services(params)
+
+ # we could have a periodic job checkin between the 2 service
+ # lookups, so only compare binary lists.
+ s1 = map(lambda x: x['binary'], services)
+ s2 = map(lambda x: x['binary'], services_on_host)
+
+ # sort the lists before comparing, to take out dependency
+ # on order.
+ self.assertEqual(sorted(s1), sorted(s2))
+
+ @attr(type=['negative', 'gate'])
+ def test_get_service_by_invalid_params(self):
+ # return all services if send the request with invalid parameter
+ resp, services = self.client.list_services()
+ params = {'xxx': 'nova-compute'}
+ resp, services_xxx = self.client.list_services(params)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(len(services), len(services_xxx))
+
+ @attr(type='gate')
+ def test_get_service_by_service_and_host_name(self):
+ resp, services = self.client.list_services()
+ host_name = services[0]['host']
+ binary_name = services[0]['binary']
+ params = {'host': host_name, 'binary': binary_name}
+ resp, services = self.client.list_services(params)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(1, len(services))
+ self.assertEqual(host_name, services[0]['host'])
+ self.assertEqual(binary_name, services[0]['binary'])
+
+ @attr(type=['negative', 'gate'])
+ def test_get_service_by_invalid_service_and_valid_host(self):
+ resp, services = self.client.list_services()
+ host_name = services[0]['host']
+ params = {'host': host_name, 'binary': 'xxx'}
+ resp, services = self.client.list_services(params)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(0, len(services))
+
+ @attr(type=['negative', 'gate'])
+ def test_get_service_with_valid_service_and_invalid_host(self):
+ resp, services = self.client.list_services()
+ binary_name = services[0]['binary']
+ params = {'host': 'xxx', 'binary': binary_name}
+ resp, services = self.client.list_services(params)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(0, len(services))
+
+ @attr(type='gate')
+ def test_service_enable_disable(self):
+ resp, services = self.client.list_services()
+ host_name = services[0]['host']
+ binary_name = services[0]['binary']
+
+ resp, service = self.client.disable_service(host_name, binary_name)
+ self.assertEqual(200, resp.status)
+ params = {'host': host_name, 'binary': binary_name}
+ resp, services = self.client.list_services(params)
+ self.assertEqual('disabled', services[0]['status'])
+
+ resp, service = self.client.enable_service(host_name, binary_name)
+ self.assertEqual(200, resp.status)
+ resp, services = self.client.list_services(params)
+ self.assertEqual('enabled', services[0]['status'])
+
class ServicesAdminTestXML(ServicesAdminTestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index abc5899..acf0275 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -19,10 +19,10 @@
from tempest.api import compute
from tempest import clients
-from tempest.common import log as logging
+from tempest.common import isolated_creds
from tempest.common.utils.data_utils import parse_image_id
from tempest.common.utils.data_utils import rand_name
-from tempest import exceptions
+from tempest.openstack.common import log as logging
import tempest.test
@@ -36,10 +36,14 @@
@classmethod
def setUpClass(cls):
- cls.isolated_creds = []
+ super(BaseComputeTest, cls).setUpClass()
+ if not cls.config.service_available.nova:
+ skip_msg = ("%s skipped as nova is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
+ cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__)
if cls.config.compute.allow_tenant_isolation:
- creds = cls._get_isolated_creds()
+ creds = cls.isolated_creds.get_primary_creds()
username, tenant_name, password = creds
os = clients.Manager(username=username,
password=password,
@@ -79,89 +83,6 @@
cls.servers_client_v3_auth = os.servers_client_v3_auth
@classmethod
- def _get_identity_admin_client(cls):
- """
- Returns an instance of the Identity Admin API client
- """
- os = clients.AdminManager(interface=cls._interface)
- admin_client = os.identity_client
- return admin_client
-
- @classmethod
- def _get_client_args(cls):
-
- return (
- cls.config,
- cls.config.identity.admin_username,
- cls.config.identity.admin_password,
- cls.config.identity.uri
- )
-
- @classmethod
- def _get_isolated_creds(cls):
- """
- Creates a new set of user/tenant/password credentials for a
- **regular** user of the Compute API so that a test case can
- operate in an isolated tenant container.
- """
- admin_client = cls._get_identity_admin_client()
- password = "pass"
-
- while True:
- try:
- rand_name_root = rand_name(cls.__name__)
- if cls.isolated_creds:
- # Main user already created. Create the alt one...
- rand_name_root += '-alt'
- tenant_name = rand_name_root + "-tenant"
- tenant_desc = tenant_name + "-desc"
-
- resp, tenant = admin_client.create_tenant(
- name=tenant_name, description=tenant_desc)
- break
- except exceptions.Duplicate:
- if cls.config.compute.allow_tenant_reuse:
- tenant = admin_client.get_tenant_by_name(tenant_name)
- LOG.info('Re-using existing tenant %s', tenant)
- break
-
- while True:
- try:
- rand_name_root = rand_name(cls.__name__)
- if cls.isolated_creds:
- # Main user already created. Create the alt one...
- rand_name_root += '-alt'
- username = rand_name_root + "-user"
- email = rand_name_root + "@example.com"
- resp, user = admin_client.create_user(username,
- password,
- tenant['id'],
- email)
- break
- except exceptions.Duplicate:
- if cls.config.compute.allow_tenant_reuse:
- user = admin_client.get_user_by_username(tenant['id'],
- username)
- LOG.info('Re-using existing user %s', user)
- break
- # Store the complete creds (including UUID ids...) for later
- # but return just the username, tenant_name, password tuple
- # that the various clients will use.
- cls.isolated_creds.append((user, tenant))
-
- return username, tenant_name, password
-
- @classmethod
- def clear_isolated_creds(cls):
- if not cls.isolated_creds:
- return
- admin_client = cls._get_identity_admin_client()
-
- for user, tenant in cls.isolated_creds:
- admin_client.delete_user(user['id'])
- admin_client.delete_tenant(tenant['id'])
-
- @classmethod
def clear_servers(cls):
for server in cls.servers:
try:
@@ -189,7 +110,8 @@
def tearDownClass(cls):
cls.clear_images()
cls.clear_servers()
- cls.clear_isolated_creds()
+ cls.isolated_creds.clear_isolated_creds()
+ super(BaseComputeTest, cls).tearDownClass()
@classmethod
def create_server(cls, **kwargs):
@@ -263,10 +185,16 @@
admin_username = cls.config.compute_admin.username
admin_password = cls.config.compute_admin.password
admin_tenant = cls.config.compute_admin.tenant_name
-
if not (admin_username and admin_password and admin_tenant):
msg = ("Missing Compute Admin API credentials "
"in configuration.")
raise cls.skipException(msg)
-
- cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
+ if cls.config.compute.allow_tenant_isolation:
+ creds = cls.isolated_creds.get_admin_creds()
+ admin_username, admin_tenant_name, admin_password = creds
+ cls.os_adm = clients.Manager(username=admin_username,
+ password=admin_password,
+ tenant_name=admin_tenant_name,
+ interface=cls._interface)
+ else:
+ cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index 27526eb..51ce20c 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -35,14 +35,14 @@
resp, flavor = self.client.get_flavor_details(self.flavor_ref)
flavor_min_detail = {'id': flavor['id'], 'links': flavor['links'],
'name': flavor['name']}
- self.assertTrue(flavor_min_detail in flavors)
+ self.assertIn(flavor_min_detail, flavors)
@attr(type='smoke')
def test_list_flavors_with_detail(self):
# Detailed list of all flavors should contain the expected flavor
resp, flavors = self.client.list_flavors_with_detail()
resp, flavor = self.client.get_flavor_details(self.flavor_ref)
- self.assertTrue(flavor in flavors)
+ self.assertIn(flavor, flavors)
@attr(type='smoke')
def test_get_flavor(self):
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions.py b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
index 0d7f26d..930ebcb 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
@@ -32,15 +32,15 @@
cls.client = cls.floating_ips_client
cls.servers_client = cls.servers_client
- #Server creation
+ # Server creation
resp, server = cls.create_server(wait_until='ACTIVE')
cls.server_id = server['id']
resp, body = cls.servers_client.get_server(server['id'])
- #Floating IP creation
+ # Floating IP creation
resp, body = cls.client.create_floating_ip()
cls.floating_ip_id = body['id']
cls.floating_ip = body['ip']
- #Generating a nonexistent floatingIP id
+ # Generating a nonexistent floatingIP id
cls.floating_ip_ids = []
resp, body = cls.client.list_floating_ips()
for i in range(len(body)):
@@ -52,7 +52,7 @@
@classmethod
def tearDownClass(cls):
- #Deleting the floating IP which is created in this method
+ # Deleting the floating IP which is created in this method
resp, body = cls.client.delete_floating_ip(cls.floating_ip_id)
super(FloatingIPsTestJSON, cls).tearDownClass()
@@ -66,17 +66,17 @@
floating_ip_id_allocated = body['id']
resp, floating_ip_details = \
self.client.get_floating_ip_details(floating_ip_id_allocated)
- #Checking if the details of allocated IP is in list of floating IP
+ # Checking if the details of allocated IP is in list of floating IP
resp, body = self.client.list_floating_ips()
- self.assertTrue(floating_ip_details in body)
+ self.assertIn(floating_ip_details, body)
finally:
- #Deleting the floating IP which is created in this method
+ # Deleting the floating IP which is created in this method
self.client.delete_floating_ip(floating_ip_id_allocated)
@attr(type=['negative', 'gate'])
def test_allocate_floating_ip_from_nonexistent_pool(self):
# Positive test:Allocation of a new floating IP from a nonexistent_pool
- #to a project should fail
+ # to a project should fail
self.assertRaises(exceptions.NotFound,
self.client.create_floating_ip,
"non_exist_pool")
@@ -85,12 +85,12 @@
def test_delete_floating_ip(self):
# Positive test:Deletion of valid floating IP from project
# should be successful
- #Creating the floating IP that is to be deleted in this method
+ # Creating the floating IP that is to be deleted in this method
resp, floating_ip_body = self.client.create_floating_ip()
- #Storing the details of floating IP before deleting it
+ # Storing the details of floating IP before deleting it
cli_resp = self.client.get_floating_ip_details(floating_ip_body['id'])
resp, floating_ip_details = cli_resp
- #Deleting the floating IP from the project
+ # Deleting the floating IP from the project
resp, body = self.client.delete_floating_ip(floating_ip_body['id'])
self.assertEqual(202, resp.status)
# Check it was really deleted.
@@ -101,12 +101,12 @@
# Positive test:Associate and disassociate the provided floating IP
# to a specific server should be successful
- #Association of floating IP to fixed IP address
+ # Association of floating IP to fixed IP address
resp, body = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.server_id)
self.assertEqual(202, resp.status)
- #Disassociation of floating IP that was associated in this method
+ # Disassociation of floating IP that was associated in this method
resp, body = self.client.disassociate_floating_ip_from_server(
self.floating_ip,
self.server_id)
@@ -142,18 +142,18 @@
def test_associate_already_associated_floating_ip(self):
# positive test:Association of an already associated floating IP
# to specific server should change the association of the Floating IP
- #Create server so as to use for Multiple association
+ # Create server so as to use for Multiple association
resp, body = self.servers_client.create_server('floating-server2',
self.image_ref,
self.flavor_ref)
self.servers_client.wait_for_server_status(body['id'], 'ACTIVE')
self.new_server_id = body['id']
- #Associating floating IP for the first time
+ # Associating floating IP for the first time
resp, _ = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.server_id)
- #Associating floating IP for the second time
+ # Associating floating IP for the second time
resp, body = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.new_server_id)
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips.py b/tempest/api/compute/floating_ips/test_list_floating_ips.py
index 3e1aa82..e380334 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips.py
@@ -51,12 +51,12 @@
self.assertNotEqual(0, len(floating_ips),
"Expected floating IPs. Got zero.")
for i in range(3):
- self.assertTrue(self.floating_ip[i] in floating_ips)
+ self.assertIn(self.floating_ip[i], floating_ips)
@attr(type='gate')
def test_get_floating_ip_details(self):
# Positive test:Should be able to GET the details of floatingIP
- #Creating a floating IP for which details are to be checked
+ # Creating a floating IP for which details are to be checked
try:
resp, body = self.client.create_floating_ip()
floating_ip_instance_id = body['instance_id']
@@ -66,14 +66,14 @@
resp, body = \
self.client.get_floating_ip_details(floating_ip_id)
self.assertEqual(200, resp.status)
- #Comparing the details of floating IP
+ # Comparing the details of floating IP
self.assertEqual(floating_ip_instance_id,
body['instance_id'])
self.assertEqual(floating_ip_ip, body['ip'])
self.assertEqual(floating_ip_fixed_ip,
body['fixed_ip'])
self.assertEqual(floating_ip_id, body['id'])
- #Deleting the floating IP created in this method
+ # Deleting the floating IP created in this method
finally:
self.client.delete_floating_ip(floating_ip_id)
@@ -85,7 +85,7 @@
resp, body = self.client.list_floating_ips()
for i in range(len(body)):
floating_ip_id.append(body[i]['id'])
- #Creating a nonexistant floatingIP id
+ # Creating a nonexistant floatingIP id
while True:
non_exist_id = rand_name('999')
if non_exist_id not in floating_ip_id:
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index 7b8e1cc..52239cd 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -27,6 +27,10 @@
@classmethod
def setUpClass(cls):
super(ImagesMetadataTestJSON, cls).setUpClass()
+ if not cls.config.service_available.glance:
+ skip_msg = ("%s skipped as glance is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
+
cls.servers_client = cls.servers_client
cls.client = cls.images_client
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index a74bb68..2f0ed6b 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -1,7 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack, LLC
-# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -30,6 +29,9 @@
@classmethod
def setUpClass(cls):
super(ImagesTestJSON, cls).setUpClass()
+ if not cls.config.service_available.glance:
+ skip_msg = ("%s skipped as glance is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
cls.client = cls.images_client
cls.servers_client = cls.servers_client
@@ -37,7 +39,7 @@
if compute.MULTI_USER:
if cls.config.compute.allow_tenant_isolation:
- creds = cls._get_isolated_creds()
+ creds = cls.isolated_creds.get_alt_creds()
username, tenant_name, password = creds
cls.alt_manager = clients.Manager(username=username,
password=password,
@@ -89,24 +91,29 @@
'!@#$%^&*()', name, meta)
@attr(type=['negative', 'gate'])
- def test_create_image_when_server_is_terminating(self):
- # Return an error when creating image of server that is terminating
+ def test_create_image_from_stopped_server(self):
resp, server = self.create_server(wait_until='ACTIVE')
- self.servers_client.delete_server(server['id'])
-
+ self.servers_client.stop(server['id'])
+ self.servers_client.wait_for_server_status(server['id'],
+ 'SHUTOFF')
+ self.addCleanup(self.servers_client.delete_server, server['id'])
snapshot_name = rand_name('test-snap-')
- self.assertRaises(exceptions.Duplicate, self.client.create_image,
- server['id'], snapshot_name)
+ resp, image = self.create_image_from_server(server['id'],
+ name=snapshot_name,
+ wait_until='ACTIVE')
+ self.addCleanup(self.client.delete_image, image['id'])
+ self.assertEqual(snapshot_name, image['name'])
- @attr(type=['negative', 'gate'])
- def test_create_image_when_server_is_rebooting(self):
- # Return error when creating an image of server that is rebooting
+ @attr(type='gate')
+ def test_delete_saving_image(self):
+ snapshot_name = rand_name('test-snap-')
resp, server = self.create_server(wait_until='ACTIVE')
- self.servers_client.reboot(server['id'], 'HARD')
-
- snapshot_name = rand_name('test-snap-')
- self.assertRaises(exceptions.Duplicate, self.client.create_image,
- server['id'], snapshot_name)
+ self.addCleanup(self.servers_client.delete_server, server['id'])
+ resp, image = self.create_image_from_server(server['id'],
+ name=snapshot_name,
+ wait_until='SAVING')
+ resp, body = self.client.delete_image(image['id'])
+ self.assertEqual('204', resp['status'])
@attr(type=['negative', 'gate'])
def test_create_image_specify_uuid_35_characters_or_less(self):
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 7740cfc..0052a30 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -40,6 +40,9 @@
def setUpClass(cls):
super(ImagesOneServerTestJSON, cls).setUpClass()
cls.client = cls.images_client
+ if not cls.config.service_available.glance:
+ skip_msg = ("%s skipped as glance is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
try:
resp, cls.server = cls.create_server(wait_until='ACTIVE')
@@ -51,7 +54,7 @@
if compute.MULTI_USER:
if cls.config.compute.allow_tenant_isolation:
- creds = cls._get_isolated_creds()
+ creds = cls.isolated_creds.get_alt_creds()
username, tenant_name, password = creds
cls.alt_manager = clients.Manager(username=username,
password=password,
@@ -86,23 +89,6 @@
self.assertRaises(exceptions.BadRequest, self.client.create_image,
self.server['id'], snapshot_name, meta)
- @testtools.skipUnless(compute.MULTI_USER,
- 'Need multiple users for this test.')
- @attr(type=['negative', 'gate'])
- def test_delete_image_of_another_tenant(self):
- # Return an error while trying to delete another tenant's image
- self.servers_client.wait_for_server_status(self.server['id'], 'ACTIVE')
- snapshot_name = rand_name('test-snap-')
- resp, body = self.client.create_image(self.server['id'], snapshot_name)
- image_id = parse_image_id(resp['location'])
- self.image_ids.append(image_id)
- self.client.wait_for_image_resp_code(image_id, 200)
- self.client.wait_for_image_status(image_id, 'ACTIVE')
-
- # Delete image
- self.assertRaises(exceptions.NotFound,
- self.alt_client.delete_image, image_id)
-
def _get_default_flavor_disk_size(self, flavor_id):
resp, flavor = self.flavors_client.get_flavor_details(flavor_id)
return flavor['disk']
@@ -141,16 +127,6 @@
self.assertEqual('204', resp['status'])
self.client.wait_for_resource_deletion(image_id)
- @testtools.skipUnless(compute.MULTI_USER,
- 'Need multiple users for this test.')
- @attr(type=['negative', 'gate'])
- def test_create_image_for_server_in_another_tenant(self):
- # Creating image of another tenant's server should be return error
-
- snapshot_name = rand_name('test-snap-')
- self.assertRaises(exceptions.NotFound, self.alt_client.create_image,
- self.server['id'], snapshot_name)
-
@attr(type=['negative', 'gate'])
def test_create_second_image_when_first_image_is_being_saved(self):
# Disallow creating another image when first image is being saved
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index 5c6b630..a80f456 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -16,9 +16,9 @@
# under the License.
from tempest.api.compute import base
-from tempest.common import log as logging
from tempest.common.utils.data_utils import parse_image_id
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.test import attr
@@ -31,6 +31,9 @@
@classmethod
def setUpClass(cls):
super(ListImageFiltersTestJSON, cls).setUpClass()
+ if not cls.config.service_available.glance:
+ skip_msg = ("%s skipped as glance is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
cls.client = cls.images_client
cls.image_ids = []
@@ -141,16 +144,16 @@
# Verify only the expected number of results are returned
params = {'limit': '1'}
resp, images = self.client.list_images(params)
- #when _interface='xml', one element for images_links in images
- #ref: Question #224349
+ # when _interface='xml', one element for images_links in images
+ # ref: Question #224349
self.assertEqual(1, len([x for x in images if 'id' in x]))
@attr(type='gate')
def test_list_images_filter_by_changes_since(self):
# Verify only updated images are returned in the detailed list
- #Becoming ACTIVE will modify the updated time
- #Filter by the image's created time
+ # Becoming ACTIVE will modify the updated time
+ # Filter by the image's created time
params = {'changes-since': self.image3['created']}
resp, images = self.client.list_images(params)
found = any([i for i in images if i['id'] == self.image3_id])
@@ -219,8 +222,8 @@
def test_list_images_with_detail_filter_by_changes_since(self):
# Verify an update image is returned
- #Becoming ACTIVE will modify the updated time
- #Filter by the image's created time
+ # Becoming ACTIVE will modify the updated time
+ # Filter by the image's created time
params = {'changes-since': self.image1['created']}
resp, images = self.client.list_images_with_detail(params)
self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
diff --git a/tempest/api/compute/images/test_list_images.py b/tempest/api/compute/images/test_list_images.py
index fddad14..c7e23b1 100644
--- a/tempest/api/compute/images/test_list_images.py
+++ b/tempest/api/compute/images/test_list_images.py
@@ -25,6 +25,9 @@
@classmethod
def setUpClass(cls):
super(ListImagesTestJSON, cls).setUpClass()
+ if not cls.config.service_available.glance:
+ skip_msg = ("%s skipped as glance is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
cls.client = cls.images_client
@attr(type='smoke')
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index 6abca3f..e4e87c0 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -32,33 +32,33 @@
@attr(type='gate')
def test_keypairs_create_list_delete(self):
# Keypairs created should be available in the response list
- #Create 3 keypairs
+ # Create 3 keypairs
key_list = list()
for i in range(3):
k_name = rand_name('keypair-')
resp, keypair = self.client.create_keypair(k_name)
- #Need to pop these keys so that our compare doesn't fail later,
- #as the keypair dicts from list API doesn't have them.
+ # Need to pop these keys so that our compare doesn't fail later,
+ # as the keypair dicts from list API doesn't have them.
keypair.pop('private_key')
keypair.pop('user_id')
self.assertEqual(200, resp.status)
key_list.append(keypair)
- #Fetch all keypairs and verify the list
- #has all created keypairs
+ # Fetch all keypairs and verify the list
+ # has all created keypairs
resp, fetched_list = self.client.list_keypairs()
self.assertEqual(200, resp.status)
- #We need to remove the extra 'keypair' element in the
- #returned dict. See comment in keypairs_client.list_keypairs()
+ # We need to remove the extra 'keypair' element in the
+ # returned dict. See comment in keypairs_client.list_keypairs()
new_list = list()
for keypair in fetched_list:
new_list.append(keypair['keypair'])
fetched_list = new_list
- #Now check if all the created keypairs are in the fetched list
+ # Now check if all the created keypairs are in the fetched list
missing_kps = [kp for kp in key_list if kp not in fetched_list]
self.assertFalse(missing_kps,
"Failed to find keypairs %s in fetched list"
% ', '.join(m_key['name'] for m_key in missing_kps))
- #Delete all the keypairs created
+ # Delete all the keypairs created
for keypair in key_list:
resp, _ = self.client.delete_keypair(keypair['name'])
self.assertEqual(202, resp.status)
@@ -87,8 +87,8 @@
try:
resp, keypair_detail = self.client.get_keypair(k_name)
self.assertEqual(200, resp.status)
- self.assertTrue('name' in keypair_detail)
- self.assertTrue('public_key' in keypair_detail)
+ self.assertIn('name', keypair_detail)
+ self.assertIn('public_key', keypair_detail)
self.assertEqual(keypair_detail['name'], k_name,
"The created keypair name is not equal "
"to requested name")
@@ -163,7 +163,7 @@
k_name = rand_name('keypair-')
resp, _ = self.client.create_keypair(k_name)
self.assertEqual(200, resp.status)
- #Now try the same keyname to ceate another key
+ # Now try the same keyname to ceate another key
self.assertRaises(exceptions.Duplicate, self.client.create_keypair,
k_name)
resp, _ = self.client.delete_keypair(k_name)
diff --git a/tempest/api/compute/limits/test_absolute_limits.py b/tempest/api/compute/limits/test_absolute_limits.py
index beae122..972e4a8 100644
--- a/tempest/api/compute/limits/test_absolute_limits.py
+++ b/tempest/api/compute/limits/test_absolute_limits.py
@@ -51,11 +51,11 @@
@attr(type=['negative', 'gate'])
def test_max_image_meta_exceed_limit(self):
- #We should not create vm with image meta over maxImageMeta limit
+ # We should not create vm with image meta over maxImageMeta limit
# Get max limit value
max_meta = self.client.get_specific_absolute_limit('maxImageMeta')
- #Create server should fail, since we are passing > metadata Limit!
+ # Create server should fail, since we are passing > metadata Limit!
max_meta_data = int(max_meta) + 1
meta_data = {}
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 6a32b64..472b8b4 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -15,8 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_name
+from tempest import config
from tempest import exceptions
from tempest.test import attr
@@ -33,14 +36,14 @@
def test_security_group_rules_create(self):
# Positive test: Creation of Security Group rule
# should be successfull
- #Creating a Security Group to add rules to it
+ # Creating a Security Group to add rules to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
resp, securitygroup = \
self.client.create_security_group(s_name, s_description)
securitygroup_id = securitygroup['id']
self.addCleanup(self.client.delete_security_group, securitygroup_id)
- #Adding rules to the created Security Group
+ # Adding rules to the created Security Group
ip_protocol = 'tcp'
from_port = 22
to_port = 22
@@ -60,21 +63,21 @@
secgroup1 = None
secgroup2 = None
- #Creating a Security Group to add rules to it
+ # Creating a Security Group to add rules to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
resp, securitygroup = \
self.client.create_security_group(s_name, s_description)
secgroup1 = securitygroup['id']
self.addCleanup(self.client.delete_security_group, secgroup1)
- #Creating a Security Group so as to assign group_id to the rule
+ # Creating a Security Group so as to assign group_id to the rule
s_name2 = rand_name('securitygroup-')
s_description2 = rand_name('description-')
resp, securitygroup = \
self.client.create_security_group(s_name2, s_description2)
secgroup2 = securitygroup['id']
self.addCleanup(self.client.delete_security_group, secgroup2)
- #Adding rules to the created Security Group with optional arguments
+ # Adding rules to the created Security Group with optional arguments
parent_group_id = secgroup1
ip_protocol = 'tcp'
from_port = 22
@@ -91,6 +94,8 @@
self.addCleanup(self.client.delete_security_group_rule, rule['id'])
self.assertEqual(200, resp.status)
+ @testtools.skipIf(config.TempestConfig().service_available.neutron,
+ "Skipped until the Bug #1182384 is resolved")
@attr(type=['negative', 'gate'])
def test_security_group_rules_create_with_invalid_id(self):
# Negative test: Creation of Security Group rule should FAIL
@@ -108,12 +113,12 @@
def test_security_group_rules_create_with_invalid_ip_protocol(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid ip_protocol
- #Creating a Security Group to add rule to it
+ # Creating a Security Group to add rule to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
resp, securitygroup = self.client.create_security_group(s_name,
s_description)
- #Adding rules to the created Security Group
+ # Adding rules to the created Security Group
parent_group_id = securitygroup['id']
ip_protocol = rand_name('999')
from_port = 22
@@ -128,12 +133,12 @@
def test_security_group_rules_create_with_invalid_from_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid from_port
- #Creating a Security Group to add rule to it
+ # Creating a Security Group to add rule to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
resp, securitygroup = self.client.create_security_group(s_name,
s_description)
- #Adding rules to the created Security Group
+ # Adding rules to the created Security Group
parent_group_id = securitygroup['id']
ip_protocol = 'tcp'
from_port = rand_name('999')
@@ -147,12 +152,12 @@
def test_security_group_rules_create_with_invalid_to_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid from_port
- #Creating a Security Group to add rule to it
+ # Creating a Security Group to add rule to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
resp, securitygroup = self.client.create_security_group(s_name,
s_description)
- #Adding rules to the created Security Group
+ # Adding rules to the created Security Group
parent_group_id = securitygroup['id']
ip_protocol = 'tcp'
from_port = 22
@@ -181,6 +186,8 @@
self.client.create_security_group_rule,
secgroup_id, ip_protocol, from_port, to_port)
+ @testtools.skipIf(config.TempestConfig().service_available.neutron,
+ "Skipped until the Bug #1182384 is resolved")
@attr(type=['negative', 'gate'])
def test_security_group_rules_delete_with_invalid_id(self):
# Negative test: Deletion of Security Group rule should be FAIL
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index e105121..30db206 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -39,7 +39,7 @@
@attr(type='gate')
def test_security_groups_create_list_delete(self):
# Positive test:Should return the list of Security Groups
- #Create 3 Security Groups
+ # Create 3 Security Groups
security_group_list = list()
for i in range(3):
s_name = rand_name('securitygroup-')
@@ -50,11 +50,11 @@
self.addCleanup(self._delete_security_group,
securitygroup['id'])
security_group_list.append(securitygroup)
- #Fetch all Security Groups and verify the list
- #has all created Security Groups
+ # Fetch all Security Groups and verify the list
+ # has all created Security Groups
resp, fetched_list = self.client.list_security_groups()
self.assertEqual(200, resp.status)
- #Now check if all the created Security Groups are in fetched list
+ # Now check if all the created Security Groups are in fetched list
missing_sgs = \
[sg for sg in security_group_list if sg not in fetched_list]
self.assertFalse(missing_sgs,
@@ -62,8 +62,8 @@
"list" % ', '.join(m_group['name']
for m_group in missing_sgs))
- #TODO(afazekas): scheduled for delete,
- #test_security_group_create_get_delete covers it
+ # TODO(afazekas): scheduled for delete,
+ # test_security_group_create_get_delete covers it
@attr(type='gate')
def test_security_group_create_delete(self):
# Security Group should be created, verified and deleted
@@ -71,13 +71,13 @@
s_description = rand_name('description-')
resp, securitygroup = \
self.client.create_security_group(s_name, s_description)
- self.assertTrue('id' in securitygroup)
+ self.assertIn('id', securitygroup)
securitygroup_id = securitygroup['id']
self.addCleanup(self._delete_security_group,
securitygroup_id)
self.assertEqual(200, resp.status)
self.assertFalse(securitygroup_id is None)
- self.assertTrue('name' in securitygroup)
+ self.assertIn('name', securitygroup)
securitygroup_name = securitygroup['name']
self.assertEqual(securitygroup_name, s_name,
"The created Security Group name is "
@@ -94,12 +94,12 @@
securitygroup['id'])
self.assertEqual(200, resp.status)
- self.assertTrue('name' in securitygroup)
+ self.assertIn('name', securitygroup)
securitygroup_name = securitygroup['name']
self.assertEqual(securitygroup_name, s_name,
"The created Security Group name is "
"not equal to the requested name")
- #Now fetch the created Security Group by its 'id'
+ # Now fetch the created Security Group by its 'id'
resp, fetched_group = \
self.client.get_security_group(securitygroup['id'])
self.assertEqual(200, resp.status)
@@ -107,6 +107,8 @@
"The fetched Security Group is different "
"from the created Group")
+ @testtools.skipIf(config.TempestConfig().service_available.neutron,
+ "Skipped until the Bug #1182384 is resolved")
@attr(type=['negative', 'gate'])
def test_security_group_get_nonexistant_group(self):
# Negative test:Should not be able to GET the details
@@ -115,7 +117,7 @@
resp, body = self.client.list_security_groups()
for i in range(len(body)):
security_group_id.append(body[i]['id'])
- #Creating a nonexistant Security Group id
+ # Creating a nonexistant Security Group id
while True:
non_exist_id = rand_name('999')
if non_exist_id not in security_group_id:
@@ -123,6 +125,8 @@
self.assertRaises(exceptions.NotFound, self.client.get_security_group,
non_exist_id)
+ @testtools.skipIf(config.TempestConfig().service_available.neutron,
+ "Skipped until the Bug #1161411 is resolved")
@attr(type=['negative', 'gate'])
def test_security_group_create_with_invalid_group_name(self):
# Negative test: Security Group should not be created with group name
@@ -141,6 +145,8 @@
self.client.create_security_group, s_name,
s_description)
+ @testtools.skipIf(config.TempestConfig().service_available.neutron,
+ "Skipped until the Bug #1161411 is resolved")
@attr(type=['negative', 'gate'])
def test_security_group_create_with_invalid_group_description(self):
# Negative test:Security Group should not be created with description
@@ -158,7 +164,7 @@
self.client.create_security_group, s_name,
s_description)
- @testtools.skipIf(config.TempestConfig().network.neutron_available,
+ @testtools.skipIf(config.TempestConfig().service_available.neutron,
"Neutron allows duplicate names for security groups")
@attr(type=['negative', 'gate'])
def test_security_group_create_with_duplicate_name(self):
@@ -186,11 +192,13 @@
if body[i]['name'] == 'default':
default_security_group_id = body[i]['id']
break
- #Deleting the "default" Security Group
+ # Deleting the "default" Security Group
self.assertRaises(exceptions.BadRequest,
self.client.delete_security_group,
default_security_group_id)
+ @testtools.skipIf(config.TempestConfig().service_available.neutron,
+ "Skipped until the Bug #1182384 is resolved")
@attr(type=['negative', 'gate'])
def test_delete_nonexistant_security_group(self):
# Negative test:Deletion of a nonexistant Security Group should Fail
@@ -198,7 +206,7 @@
resp, body = self.client.list_security_groups()
for i in range(len(body)):
security_group_id.append(body[i]['id'])
- #Creating Non Existant Security Group
+ # Creating Non Existant Security Group
while True:
non_exist_id = rand_name('999')
if non_exist_id not in security_group_id:
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index de095c5..9f66a6c 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -24,7 +24,7 @@
@classmethod
def setUpClass(cls):
- if not cls.config.network.neutron_available:
+ if not cls.config.service_available.neutron:
raise cls.skipException("Neutron is required")
super(AttachInterfacesTestJSON, cls).setUpClass()
cls.client = cls.os.interfaces_client
diff --git a/tempest/api/compute/servers/test_disk_config.py b/tempest/api/compute/servers/test_disk_config.py
index e9385b5..efb01af 100644
--- a/tempest/api/compute/servers/test_disk_config.py
+++ b/tempest/api/compute/servers/test_disk_config.py
@@ -39,7 +39,7 @@
resp, server = self.create_server(disk_config='AUTO',
wait_until='ACTIVE')
- #Verify the specified attributes are set correctly
+ # Verify the specified attributes are set correctly
resp, server = self.client.get_server(server['id'])
self.assertEqual('AUTO', server['OS-DCF:diskConfig'])
@@ -47,14 +47,14 @@
self.image_ref_alt,
disk_config='MANUAL')
- #Wait for the server to become active
+ # Wait for the server to become active
self.client.wait_for_server_status(server['id'], 'ACTIVE')
- #Verify the specified attributes are set correctly
+ # Verify the specified attributes are set correctly
resp, server = self.client.get_server(server['id'])
self.assertEqual('MANUAL', server['OS-DCF:diskConfig'])
- #Delete the server
+ # Delete the server
resp, body = self.client.delete_server(server['id'])
@attr(type='gate')
@@ -63,7 +63,7 @@
resp, server = self.create_server(disk_config='MANUAL',
wait_until='ACTIVE')
- #Verify the specified attributes are set correctly
+ # Verify the specified attributes are set correctly
resp, server = self.client.get_server(server['id'])
self.assertEqual('MANUAL', server['OS-DCF:diskConfig'])
@@ -71,14 +71,14 @@
self.image_ref_alt,
disk_config='AUTO')
- #Wait for the server to become active
+ # Wait for the server to become active
self.client.wait_for_server_status(server['id'], 'ACTIVE')
- #Verify the specified attributes are set correctly
+ # Verify the specified attributes are set correctly
resp, server = self.client.get_server(server['id'])
self.assertEqual('AUTO', server['OS-DCF:diskConfig'])
- #Delete the server
+ # Delete the server
resp, body = self.client.delete_server(server['id'])
@testtools.skipUnless(compute.RESIZE_AVAILABLE, 'Resize not available.')
@@ -88,7 +88,7 @@
resp, server = self.create_server(disk_config='MANUAL',
wait_until='ACTIVE')
- #Resize with auto option
+ # Resize with auto option
self.client.resize(server['id'], self.flavor_ref_alt,
disk_config='AUTO')
self.client.wait_for_server_status(server['id'], 'VERIFY_RESIZE')
@@ -98,7 +98,7 @@
resp, server = self.client.get_server(server['id'])
self.assertEqual('AUTO', server['OS-DCF:diskConfig'])
- #Delete the server
+ # Delete the server
resp, body = self.client.delete_server(server['id'])
@testtools.skipUnless(compute.RESIZE_AVAILABLE, 'Resize not available.')
@@ -108,7 +108,7 @@
resp, server = self.create_server(disk_config='AUTO',
wait_until='ACTIVE')
- #Resize with manual option
+ # Resize with manual option
self.client.resize(server['id'], self.flavor_ref_alt,
disk_config='MANUAL')
self.client.wait_for_server_status(server['id'], 'VERIFY_RESIZE')
@@ -118,7 +118,7 @@
resp, server = self.client.get_server(server['id'])
self.assertEqual('MANUAL', server['OS-DCF:diskConfig'])
- #Delete the server
+ # Delete the server
resp, body = self.client.delete_server(server['id'])
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index 31b44f7..b8f965c 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -133,7 +133,7 @@
# Verify only the expected number of servers are returned
params = {'limit': 1}
resp, servers = self.client.list_servers(params)
- #when _interface='xml', one element for servers_links in servers
+ # when _interface='xml', one element for servers_links in servers
self.assertEqual(1, len([x for x in servers['servers'] if 'id' in x]))
@utils.skip_unless_attr('multiple_images', 'Only one image found')
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index db9bdc1..14ea174 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -35,7 +35,7 @@
if compute.MULTI_USER:
if cls.config.compute.allow_tenant_isolation:
- creds = cls._get_isolated_creds()
+ creds = cls.isolated_creds.get_alt_creds()
username, tenant_name, password = creds
cls.alt_manager = clients.Manager(username=username,
password=password,
@@ -59,8 +59,9 @@
if num_servers > 0:
username = cls.os.username
tenant_name = cls.os.tenant_name
- msg = ("User/tenant %(username)s/%(tenant_name)s already have "
- "existing server instances. Skipping test.") % locals()
+ msg = ("User/tenant %(u)s/%(t)s already have "
+ "existing server instances. Skipping test." %
+ {'u': username, 't': tenant_name})
raise cls.skipException(msg)
resp, body = cls.alt_client.list_servers()
@@ -69,8 +70,9 @@
if num_servers > 0:
username = cls.alt_manager.username
tenant_name = cls.alt_manager.tenant_name
- msg = ("Alt User/tenant %(username)s/%(tenant_name)s already have "
- "existing server instances. Skipping test.") % locals()
+ msg = ("Alt User/tenant %(u)s/%(t)s already have "
+ "existing server instances. Skipping test." %
+ {'u': username, 't': tenant_name})
raise cls.skipException(msg)
# The following servers are created for use
@@ -93,7 +95,7 @@
ignore_error=True)
cls.deleted_fixtures.append(srv)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_with_a_deleted_server(self):
# Verify deleted servers do not show by default in list servers
# List servers and verify server not returned
@@ -105,7 +107,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual([], actual)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_image(self):
# Listing servers for a non existing image returns empty list
non_existing_image = '1234abcd-zzz0-aaa9-ppp3-0987654abcde'
@@ -114,7 +116,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_flavor(self):
# Listing servers by non existing flavor returns empty list
non_existing_flavor = 1234
@@ -123,7 +125,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_server_name(self):
# Listing servers for a non existent server name returns empty list
non_existing_name = 'junk_server_1234'
@@ -132,7 +134,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_status_non_existing(self):
# Return an empty list when invalid status is specified
non_existing_status = 'BALONEY'
@@ -146,23 +148,23 @@
# List servers by specifying limits
resp, body = self.client.list_servers({'limit': 1})
self.assertEqual('200', resp['status'])
- #when _interface='xml', one element for servers_links in servers
+ # when _interface='xml', one element for servers_links in servers
self.assertEqual(1, len([x for x in body['servers'] if 'id' in x]))
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_limits_greater_than_actual_count(self):
# List servers by specifying a greater value for limit
resp, body = self.client.list_servers({'limit': 100})
self.assertEqual('200', resp['status'])
self.assertEqual(len(self.existing_fixtures), len(body['servers']))
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_limits_pass_string(self):
# Return an error if a string value is passed for limit
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'limit': 'testing'})
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_limits_pass_negative_value(self):
# Return an error if a negative value for limit is passed
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
@@ -180,13 +182,13 @@
len(self.deleted_fixtures))
self.assertEqual(num_expected, len(body['servers']))
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_changes_since_invalid_date(self):
# Return an error when invalid date format is passed
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'changes-since': '2011/01/01'})
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_by_changes_since_future_date(self):
# Return an empty list when a date in the future is passed
changes_since = {'changes-since': '2051-01-01T12:34:00Z'}
@@ -194,7 +196,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual(0, len(body['servers']))
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_list_servers_detail_server_is_deleted(self):
# Server details are not listed for a deleted server
deleted_ids = [s['id'] for s in self.deleted_fixtures]
diff --git a/tempest/api/compute/servers/test_multiple_create.py b/tempest/api/compute/servers/test_multiple_create.py
index 9fde618..edfafec 100644
--- a/tempest/api/compute/servers/test_multiple_create.py
+++ b/tempest/api/compute/servers/test_multiple_create.py
@@ -47,7 +47,7 @@
# reservation_id is not in the response body when the request send
# contains return_reservation_id=False
self.assertEqual('202', resp['status'])
- self.assertFalse('reservation_id' in body)
+ self.assertNotIn('reservation_id', body)
@attr(type=['negative', 'gate'])
def test_min_count_less_than_one(self):
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 304a512..893d9e0 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -35,7 +35,7 @@
run_ssh = tempest.config.TempestConfig().compute.run_ssh
def setUp(self):
- #NOTE(afazekas): Normally we use the same server with all test cases,
+ # NOTE(afazekas): Normally we use the same server with all test cases,
# but if it has an issue, we build a new one
super(ServerActionsTestJSON, self).setUp()
# Check if the server is in a clean state after test
@@ -112,7 +112,7 @@
meta = {'rebuild': 'server'}
new_name = rand_name('server')
file_contents = 'Test server rebuild.'
- personality = [{'path': '/etc/rebuild.txt',
+ personality = [{'path': 'rebuild.txt',
'contents': base64.b64encode(file_contents)}]
password = 'rebuildPassw0rd'
resp, rebuilt_server = self.client.rebuild(self.server_id,
@@ -121,13 +121,13 @@
personality=personality,
adminPass=password)
- #Verify the properties in the initial response are correct
+ # Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
self.assertEqual(self.flavor_ref, int(rebuilt_server['flavor']['id']))
- #Verify the server properties after the rebuild completes
+ # Verify the server properties after the rebuild completes
self.client.wait_for_server_status(rebuilt_server['id'], 'ACTIVE')
resp, server = self.client.get_server(rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
@@ -279,6 +279,15 @@
cls.server_id = server['id']
cls.password = server['adminPass']
+ @attr(type='gate')
+ def test_stop_start_server(self):
+ resp, server = self.servers_client.stop(self.server_id)
+ self.assertEqual(202, resp.status)
+ self.servers_client.wait_for_server_status(self.server_id, 'SHUTOFF')
+ resp, server = self.servers_client.start(self.server_id)
+ self.assertEqual(202, resp.status)
+ self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
+
class ServerActionsTestXML(ServerActionsTestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index 442d30c..45de0d6 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -47,7 +47,7 @@
# All metadata key/value pairs for a server should be returned
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
- #Verify the expected metadata items are in the list
+ # Verify the expected metadata items are in the list
self.assertEqual(200, resp.status)
expected = {'key1': 'value1', 'key2': 'value2'}
self.assertEqual(expected, resp_metadata)
@@ -55,14 +55,14 @@
@attr(type='gate')
def test_set_server_metadata(self):
# The server's metadata should be replaced with the provided values
- #Create a new set of metadata for the server
+ # Create a new set of metadata for the server
req_metadata = {'meta2': 'data2', 'meta3': 'data3'}
resp, metadata = self.client.set_server_metadata(self.server_id,
req_metadata)
self.assertEqual(200, resp.status)
- #Verify the expected values are correct, and that the
- #previous values have been removed
+ # Verify the expected values are correct, and that the
+ # previous values have been removed
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
self.assertEqual(resp_metadata, req_metadata)
@@ -98,7 +98,7 @@
meta)
self.assertEqual(200, resp.status)
- #Verify the values have been updated to the proper values
+ # Verify the values have been updated to the proper values
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
expected = {'key1': 'alt1', 'key2': 'value2', 'key3': 'value3'}
self.assertEqual(expected, resp_metadata)
@@ -123,13 +123,13 @@
@attr(type='gate')
def test_set_server_metadata_item(self):
# The item's value should be updated to the provided value
- #Update the metadata value
+ # Update the metadata value
meta = {'nova': 'alt'}
resp, body = self.client.set_server_metadata_item(self.server_id,
'nova', meta)
self.assertEqual(200, resp.status)
- #Verify the meta item's value has been updated
+ # Verify the meta item's value has been updated
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
expected = {'key1': 'value1', 'key2': 'value2', 'nova': 'alt'}
self.assertEqual(expected, resp_metadata)
@@ -141,7 +141,7 @@
'key1')
self.assertEqual(204, resp.status)
- #Verify the metadata item has been removed
+ # Verify the metadata item has been removed
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
expected = {'key2': 'value2'}
self.assertEqual(expected, resp_metadata)
@@ -197,7 +197,7 @@
# Negative test: Should not be able to delete metadata item from a
# nonexistant server
- #Delete the metadata item
+ # Delete the metadata item
self.assertRaises(exceptions.NotFound,
self.client.delete_server_metadata_item, 999, 'd')
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 8225a4c..82559d5 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -32,12 +32,12 @@
super(ServerRescueTestJSON, cls).setUpClass()
cls.device = 'vdf'
- #Floating IP creation
+ # Floating IP creation
resp, body = cls.floating_ips_client.create_floating_ip()
cls.floating_ip_id = str(body['id']).strip()
cls.floating_ip = str(body['ip']).strip()
- #Security group creation
+ # Security group creation
cls.sg_name = rand_name('sg')
cls.sg_desc = rand_name('sg-desc')
resp, cls.sg = \
@@ -85,7 +85,7 @@
@classmethod
def tearDownClass(cls):
- #Deleting the floating IP which is created in this method
+ # Deleting the floating IP which is created in this method
cls.floating_ips_client.delete_floating_ip(cls.floating_ip_id)
client = cls.volumes_extensions_client
client.delete_volume(str(cls.volume_to_attach['id']).strip())
@@ -110,6 +110,11 @@
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+ def _unpause(self, server_id):
+ resp, body = self.servers_client.unpause_server(server_id)
+ self.assertEqual(202, resp.status)
+ self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+
@attr(type='smoke')
def test_rescue_unrescue_instance(self):
resp, body = self.servers_client.rescue_server(
@@ -121,11 +126,30 @@
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
@attr(type=['negative', 'gate'])
+ def test_rescue_paused_instance(self):
+ # Rescue a paused server
+ resp, body = self.servers_client.pause_server(
+ self.server_id)
+ self.addCleanup(self._unpause, self.server_id)
+ self.assertEqual(202, resp.status)
+ self.servers_client.wait_for_server_status(self.server_id, 'PAUSED')
+ self.assertRaises(exceptions.Duplicate,
+ self.servers_client.rescue_server,
+ self.server_id)
+
+ @attr(type=['negative', 'gate'])
def test_rescued_vm_reboot(self):
self.assertRaises(exceptions.Duplicate, self.servers_client.reboot,
self.rescue_id, 'HARD')
@attr(type=['negative', 'gate'])
+ def test_rescue_non_existent_server(self):
+ # Rescue a non-existing server
+ self.assertRaises(exceptions.NotFound,
+ self.servers_client.rescue_server,
+ '999erra43')
+
+ @attr(type=['negative', 'gate'])
def test_rescued_vm_rebuild(self):
self.assertRaises(exceptions.Duplicate,
self.servers_client.rebuild,
@@ -158,7 +182,7 @@
# Rescue the server
self.servers_client.rescue_server(self.server_id, self.password)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
- #addCleanup is a LIFO queue
+ # addCleanup is a LIFO queue
self.addCleanup(self._detach, self.server_id,
self.volume_to_detach['id'])
self.addCleanup(self._unrescue, self.server_id)
@@ -177,13 +201,13 @@
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
self.addCleanup(self._unrescue, self.server_id)
- #Association of floating IP to a rescued vm
+ # Association of floating IP to a rescued vm
client = self.floating_ips_client
resp, body = client.associate_floating_ip_to_server(self.floating_ip,
self.server_id)
self.assertEqual(202, resp.status)
- #Disassociation of floating IP that was associated in this method
+ # Disassociation of floating IP that was associated in this method
resp, body = \
client.disassociate_floating_ip_from_server(self.floating_ip,
self.server_id)
@@ -196,12 +220,12 @@
self.server_id, self.password)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
- #Add Security group
+ # Add Security group
resp, body = self.servers_client.add_security_group(self.server_id,
self.sg_name)
self.assertEqual(202, resp.status)
- #Delete Security group
+ # Delete Security group
resp, body = self.servers_client.remove_security_group(self.server_id,
self.sg_name)
self.assertEqual(202, resp.status)
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 5cc8dc6..e09a23f 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -99,6 +99,17 @@
self.server_id, 'SOFT')
@attr(type=['negative', 'gate'])
+ def test_pause_paused_server(self):
+ # Pause a paused server.
+ resp, server = self.create_server(wait_until='ACTIVE')
+ self.server_id = server['id']
+ self.client.pause_server(self.server_id)
+ self.client.wait_for_server_status(self.server_id, 'PAUSED')
+ self.assertRaises(exceptions.Duplicate,
+ self.client.pause_server,
+ self.server_id)
+
+ @attr(type=['negative', 'gate'])
def test_rebuild_deleted_server(self):
# Rebuild a deleted server
@@ -210,13 +221,10 @@
@attr(type=['negative', 'gate'])
def test_delete_a_server_of_another_tenant(self):
# Delete a server that belongs to another tenant
- try:
- resp, server = self.create_server(wait_until='ACTIVE')
- self.assertRaises(exceptions.NotFound,
- self.alt_client.delete_server,
- server['id'])
- finally:
- self.client.delete_server(server['id'])
+ resp, server = self.create_server(wait_until='ACTIVE')
+ self.assertRaises(exceptions.NotFound,
+ self.alt_client.delete_server,
+ server['id'])
@attr(type=['negative', 'gate'])
def test_delete_server_pass_negative_id(self):
@@ -247,6 +255,20 @@
self.assertRaises(exceptions.NotFound, self.client.get_server,
'999erra43')
+ @attr(type=['negative', 'gate'])
+ def test_stop_non_existent_server(self):
+ # Stop a non existent server
+ non_exist_id = rand_name('non-existent-server')
+ self.assertRaises(exceptions.NotFound, self.servers_client.stop,
+ non_exist_id)
+
+ @attr(type=['negative', 'gate'])
+ def test_pause_non_existent_server(self):
+ # pause a non existent server
+ non_exist_id = rand_name('non-existent-server')
+ self.assertRaises(exceptions.NotFound, self.client.pause_server,
+ non_exist_id)
+
class ServersNegativeTestXML(ServersNegativeTestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index 35f0fc0..2a5be8c 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -37,7 +37,7 @@
resp, server = cls.create_server(wait_until='ACTIVE')
cls.server_id = server['id']
- @testtools.skipIf(CONF.network.neutron_available, "This feature is not " +
+ @testtools.skipIf(CONF.service_available.neutron, "This feature is not " +
"implemented by Neutron. See bug: #1183436")
@attr(type='gate')
def test_list_virtual_interfaces(self):
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 1a65a20..60297a9 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -41,7 +41,7 @@
cls.security_client = cls.os.security_groups_client
if cls.config.compute.allow_tenant_isolation:
- creds = cls._get_isolated_creds()
+ creds = cls.isolated_creds.get_alt_creds()
username, tenant_name, password = creds
cls.alt_manager = clients.Manager(username=username,
password=password,
@@ -126,7 +126,7 @@
def test_list_servers_with_alternate_tenant(self):
# A list on servers from one tenant should not
# show on alternate tenant
- #Listing servers from alternate tenant
+ # Listing servers from alternate tenant
alt_server_ids = []
resp, body = self.alt_client.list_servers()
alt_server_ids = [s['id'] for s in body['servers']]
@@ -188,7 +188,7 @@
def test_create_keypair_in_analt_user_tenant(self):
# A create keypair request should fail if the tenant id does not match
# the current user
- #POST keypair with other user tenant
+ # POST keypair with other user tenant
k_name = rand_name('keypair-')
self.alt_keypairs_client._set_auth()
self.saved_base_url = self.alt_keypairs_client.base_url
@@ -238,7 +238,7 @@
def test_create_security_group_in_analt_user_tenant(self):
# A create security group request should fail if the tenant id does not
# match the current user
- #POST security group with other user tenant
+ # POST security group with other user tenant
s_name = rand_name('security-')
s_description = rand_name('security')
self.saved_base_url = self.alt_security_client.base_url
@@ -276,7 +276,7 @@
def test_create_security_group_rule_in_analt_user_tenant(self):
# A create security group rule request should fail if the tenant id
# does not match the current user
- #POST security group rule with other user tenant
+ # POST security group rule with other user tenant
parent_group_id = self.security_group['id']
ip_protocol = 'icmp'
from_port = -1
diff --git a/tempest/api/compute/test_extensions.py b/tempest/api/compute/test_extensions.py
index 291c8e4..4359c49 100644
--- a/tempest/api/compute/test_extensions.py
+++ b/tempest/api/compute/test_extensions.py
@@ -27,7 +27,7 @@
def test_list_extensions(self):
# List of all extensions
resp, extensions = self.extensions_client.list_extensions()
- self.assertTrue("extensions" in extensions)
+ self.assertIn("extensions", extensions)
self.assertEqual(200, resp.status)
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index 1a8a40b..8014fca 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -30,31 +30,33 @@
resp, tenants = cls.admin_client.list_tenants()
cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
cls.client.tenant_name][0]
- cls.default_quota_set = {'injected_file_content_bytes': 10240,
- 'metadata_items': 128, 'injected_files': 5,
- 'ram': 51200, 'floating_ips': 10,
- 'fixed_ips': -1, 'key_pairs': 100,
- 'injected_file_path_bytes': 255,
- 'instances': 10, 'security_group_rules': 20,
- 'cores': 20, 'security_groups': 10}
+ cls.default_quota_set = set(('injected_file_content_bytes',
+ 'metadata_items', 'injected_files',
+ 'ram', 'floating_ips',
+ 'fixed_ips', 'key_pairs',
+ 'injected_file_path_bytes',
+ 'instances', 'security_group_rules',
+ 'cores', 'security_groups'))
@attr(type='smoke')
def test_get_quotas(self):
# User can get the quota set for it's tenant
- expected_quota_set = self.default_quota_set.copy()
- expected_quota_set['id'] = self.tenant_id
+ expected_quota_set = self.default_quota_set | set(['id'])
resp, quota_set = self.client.get_quota_set(self.tenant_id)
self.assertEqual(200, resp.status)
- self.assertEqual(expected_quota_set, quota_set)
+ self.assertEqual(sorted(expected_quota_set),
+ sorted(quota_set.keys()))
+ self.assertEqual(quota_set['id'], self.tenant_id)
@attr(type='smoke')
def test_get_default_quotas(self):
# User can get the default quota set for it's tenant
- expected_quota_set = self.default_quota_set.copy()
- expected_quota_set['id'] = self.tenant_id
+ expected_quota_set = self.default_quota_set | set(['id'])
resp, quota_set = self.client.get_default_quota_set(self.tenant_id)
self.assertEqual(200, resp.status)
- self.assertEqual(expected_quota_set, quota_set)
+ self.assertEqual(sorted(expected_quota_set),
+ sorted(quota_set.keys()))
+ self.assertEqual(quota_set['id'], self.tenant_id)
class QuotasTestXML(QuotasTestJSON):
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index b507e03..a3b051e 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2012 IBM Corp.
+# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -37,12 +37,16 @@
def setUpClass(cls):
super(AttachVolumeTestJSON, cls).setUpClass()
cls.device = 'vdb'
+ if not cls.config.service_available.cinder:
+ skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
def _detach(self, server_id, volume_id):
- self.servers_client.detach_volume(server_id, volume_id)
- self.volumes_client.wait_for_volume_status(volume_id, 'available')
+ if self.attached:
+ self.servers_client.detach_volume(server_id, volume_id)
+ self.volumes_client.wait_for_volume_status(volume_id, 'available')
- def _delete(self, volume):
+ def _delete_volume(self):
if self.volume:
self.volumes_client.delete_volume(self.volume['id'])
self.volume = None
@@ -60,6 +64,7 @@
resp, volume = self.volumes_client.create_volume(1,
display_name='test')
self.volume = volume
+ self.addCleanup(self._delete_volume)
self.volumes_client.wait_for_volume_status(volume['id'], 'available')
# Attach the volume to the server
@@ -68,49 +73,41 @@
self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
self.attached = True
+ self.addCleanup(self._detach, server['id'], volume['id'])
@testtools.skipIf(not run_ssh, 'SSH required for this test')
@attr(type='gate')
def test_attach_detach_volume(self):
# Stop and Start a server with an attached volume, ensuring that
# the volume remains attached.
- try:
- self._create_and_attach()
- server = self.server
- volume = self.volume
+ self._create_and_attach()
+ server = self.server
+ volume = self.volume
- self.servers_client.stop(server['id'])
- self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')
+ self.servers_client.stop(server['id'])
+ self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')
- self.servers_client.start(server['id'])
- self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
+ self.servers_client.start(server['id'])
+ self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
- linux_client = RemoteClient(server,
- self.ssh_user, server['adminPass'])
- partitions = linux_client.get_partitions()
- self.assertTrue(self.device in partitions)
+ linux_client = RemoteClient(server,
+ self.ssh_user, server['adminPass'])
+ partitions = linux_client.get_partitions()
+ self.assertIn(self.device, partitions)
- self._detach(server['id'], volume['id'])
- self.attached = False
+ self._detach(server['id'], volume['id'])
+ self.attached = False
- self.servers_client.stop(server['id'])
- self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')
+ self.servers_client.stop(server['id'])
+ self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')
- self.servers_client.start(server['id'])
- self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
+ self.servers_client.start(server['id'])
+ self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
- linux_client = RemoteClient(server,
- self.ssh_user, server['adminPass'])
- partitions = linux_client.get_partitions()
- self.assertFalse(self.device in partitions)
- except Exception:
- self.fail("The test_attach_detach_volume is faild!")
- finally:
- if self.attached:
- self._detach(server['id'], volume['id'])
- # NOTE(maurosr): here we do the cleanup for volume, servers are
- # dealt on BaseComputeTest.tearDownClass
- self._delete(self.volume)
+ linux_client = RemoteClient(server,
+ self.ssh_user, server['adminPass'])
+ partitions = linux_client.get_partitions()
+ self.assertNotIn(self.device, partitions)
class AttachVolumeTestXML(AttachVolumeTestJSON):
diff --git a/tempest/api/compute/volumes/test_volumes_get.py b/tempest/api/compute/volumes/test_volumes_get.py
index 1acc57d..4f0f17e 100644
--- a/tempest/api/compute/volumes/test_volumes_get.py
+++ b/tempest/api/compute/volumes/test_volumes_get.py
@@ -28,6 +28,9 @@
def setUpClass(cls):
super(VolumesGetTestJSON, cls).setUpClass()
cls.client = cls.volumes_extensions_client
+ if not cls.config.service_available.cinder:
+ skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
@attr(type='smoke')
def test_volume_create_get_delete(self):
@@ -35,25 +38,25 @@
volume = None
v_name = rand_name('Volume-%s-') % self._interface
metadata = {'Type': 'work'}
- #Create volume
+ # Create volume
resp, volume = self.client.create_volume(size=1,
display_name=v_name,
metadata=metadata)
self.addCleanup(self._delete_volume, volume)
self.assertEqual(200, resp.status)
- self.assertTrue('id' in volume)
- self.assertTrue('displayName' in volume)
+ self.assertIn('id', volume)
+ self.assertIn('displayName', volume)
self.assertEqual(volume['displayName'], v_name,
"The created volume name is not equal "
"to the requested name")
self.assertTrue(volume['id'] is not None,
"Field volume id is empty or not found.")
- #Wait for Volume status to become ACTIVE
+ # Wait for Volume status to become ACTIVE
self.client.wait_for_volume_status(volume['id'], 'available')
- #GET Volume
+ # GET Volume
resp, fetched_volume = self.client.get_volume(volume['id'])
self.assertEqual(200, resp.status)
- #Verfication of details of fetched Volume
+ # Verfication of details of fetched Volume
self.assertEqual(v_name,
fetched_volume['displayName'],
'The fetched Volume is different '
@@ -71,27 +74,27 @@
def test_volume_get_metadata_none(self):
# CREATE, GET empty metadata dict
v_name = rand_name('Volume-')
- #Create volume
+ # Create volume
resp, volume = self.client.create_volume(size=1,
display_name=v_name,
metadata={})
self.addCleanup(self._delete_volume, volume)
self.assertEqual(200, resp.status)
- self.assertTrue('id' in volume)
- self.assertTrue('displayName' in volume)
- #Wait for Volume status to become ACTIVE
+ self.assertIn('id', volume)
+ self.assertIn('displayName', volume)
+ # Wait for Volume status to become ACTIVE
self.client.wait_for_volume_status(volume['id'], 'available')
- #GET Volume
+ # GET Volume
resp, fetched_volume = self.client.get_volume(volume['id'])
self.assertEqual(200, resp.status)
self.assertEqual(fetched_volume['metadata'], {})
def _delete_volume(self, volume):
- #Delete the Volume created in this method
+ # Delete the Volume created in this method
try:
resp, _ = self.client.delete_volume(volume['id'])
self.assertEqual(202, resp.status)
- #Checking if the deleted Volume still exists
+ # Checking if the deleted Volume still exists
self.client.wait_for_resource_deletion(volume['id'])
except KeyError:
return
diff --git a/tempest/api/compute/volumes/test_volumes_list.py b/tempest/api/compute/volumes/test_volumes_list.py
index d52349e..0e475cf 100644
--- a/tempest/api/compute/volumes/test_volumes_list.py
+++ b/tempest/api/compute/volumes/test_volumes_list.py
@@ -36,6 +36,9 @@
def setUpClass(cls):
super(VolumesTestJSON, cls).setUpClass()
cls.client = cls.volumes_extensions_client
+ if not cls.config.service_available.cinder:
+ skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
# Create 3 Volumes
cls.volume_list = []
cls.volume_id_list = []
@@ -94,10 +97,10 @@
@attr(type='gate')
def test_volume_list_with_details(self):
# Should return the list of Volumes with details
- #Fetch all Volumes
+ # Fetch all Volumes
resp, fetched_list = self.client.list_volumes_with_detail()
self.assertEqual(200, resp.status)
- #Now check if all the Volumes created in setup are in fetched list
+ # Now check if all the Volumes created in setup are in fetched list
missing_volumes = [
v for v in self.volume_list if v not in fetched_list
]
diff --git a/tempest/api/compute/volumes/test_volumes_negative.py b/tempest/api/compute/volumes/test_volumes_negative.py
index de214fc..c91e95b 100644
--- a/tempest/api/compute/volumes/test_volumes_negative.py
+++ b/tempest/api/compute/volumes/test_volumes_negative.py
@@ -28,11 +28,14 @@
def setUpClass(cls):
super(VolumesNegativeTest, cls).setUpClass()
cls.client = cls.volumes_extensions_client
+ if not cls.config.service_available.cinder:
+ skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_volume_get_nonexistant_volume_id(self):
# Negative: Should not be able to get details of nonexistant volume
- #Creating a nonexistant volume id
+ # Creating a nonexistant volume id
volume_id_list = list()
resp, body = self.client.list_volumes()
for i in range(len(body)):
@@ -45,7 +48,7 @@
self.assertRaises(exceptions.NotFound, self.client.get_volume,
non_exist_id)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_volume_delete_nonexistant_volume_id(self):
# Negative: Should not be able to delete nonexistant Volume
# Creating nonexistant volume id
@@ -61,7 +64,7 @@
self.assertRaises(exceptions.NotFound, self.client.delete_volume,
non_exist_id)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_create_volume_with_invalid_size(self):
# Negative: Should not be able to create volume with invalid size
# in request
@@ -70,7 +73,7 @@
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='#$%', display_name=v_name, metadata=metadata)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_create_volume_with_out_passing_size(self):
# Negative: Should not be able to create volume without passing size
# in request
@@ -79,7 +82,7 @@
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='', display_name=v_name, metadata=metadata)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_create_volume_with_size_zero(self):
# Negative: Should not be able to create volume with size zero
v_name = rand_name('Volume-')
@@ -87,25 +90,25 @@
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='0', display_name=v_name, metadata=metadata)
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_get_invalid_volume_id(self):
# Negative: Should not be able to get volume with invalid id
self.assertRaises(exceptions.NotFound,
self.client.get_volume, '#$%%&^&^')
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_get_volume_without_passing_volume_id(self):
# Negative: Should not be able to get volume when empty ID is passed
self.assertRaises(exceptions.NotFound, self.client.get_volume, '')
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_delete_invalid_volume_id(self):
# Negative: Should not be able to delete volume when invalid ID is
# passed
self.assertRaises(exceptions.NotFound,
self.client.delete_volume, '!@#$%^&*()')
- @attr(type='gate')
+ @attr(type=['negative', 'gate'])
def test_delete_volume_without_passing_volume_id(self):
# Negative: Should not be able to delete volume when empty ID is passed
self.assertRaises(exceptions.NotFound, self.client.delete_volume, '')
diff --git a/tempest/api/identity/__init__.py b/tempest/api/identity/__init__.py
index 718aa15..0ed47f5 100644
--- a/tempest/api/identity/__init__.py
+++ b/tempest/api/identity/__init__.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/tempest/api/identity/admin/test_roles.py b/tempest/api/identity/admin/test_roles.py
index 08b86ca..cc112cc 100644
--- a/tempest/api/identity/admin/test_roles.py
+++ b/tempest/api/identity/admin/test_roles.py
@@ -73,7 +73,7 @@
# Role should be created, verified, and deleted
role_name = rand_name('role-test-')
resp, body = self.client.create_role(role_name)
- self.assertTrue('status' in resp)
+ self.assertIn('status', resp)
self.assertTrue(resp['status'].startswith('2'))
self.assertEqual(role_name, body['name'])
@@ -82,7 +82,7 @@
self.assertTrue(any(found))
resp, body = self.client.delete_role(found[0]['id'])
- self.assertTrue('status' in resp)
+ self.assertIn('status', resp)
self.assertTrue(resp['status'].startswith('2'))
resp, body = self.client.list_roles()
@@ -100,7 +100,7 @@
role_name = rand_name('role-dup-')
resp, body = self.client.create_role(role_name)
role1_id = body.get('id')
- self.assertTrue('status' in resp)
+ self.assertIn('status', resp)
self.assertTrue(resp['status'].startswith('2'))
self.addCleanup(self.client.delete_role, role1_id)
self.assertRaises(exceptions.Duplicate, self.client.create_role,
diff --git a/tempest/api/identity/admin/test_services.py b/tempest/api/identity/admin/test_services.py
index 644853a..2be0c29 100644
--- a/tempest/api/identity/admin/test_services.py
+++ b/tempest/api/identity/admin/test_services.py
@@ -29,33 +29,33 @@
def test_create_get_delete_service(self):
# GET Service
try:
- #Creating a Service
+ # Creating a Service
name = rand_name('service-')
type = rand_name('type--')
description = rand_name('description-')
resp, service_data = self.client.create_service(
name, type, description=description)
self.assertTrue(resp['status'].startswith('2'))
- #Verifying response body of create service
- self.assertTrue('id' in service_data)
+ # Verifying response body of create service
+ self.assertIn('id', service_data)
self.assertFalse(service_data['id'] is None)
- self.assertTrue('name' in service_data)
+ self.assertIn('name', service_data)
self.assertEqual(name, service_data['name'])
- self.assertTrue('type' in service_data)
+ self.assertIn('type', service_data)
self.assertEqual(type, service_data['type'])
- self.assertTrue('description' in service_data)
+ self.assertIn('description', service_data)
self.assertEqual(description, service_data['description'])
- #Get service
+ # Get service
resp, fetched_service = self.client.get_service(service_data['id'])
self.assertTrue(resp['status'].startswith('2'))
- #verifying the existence of service created
- self.assertTrue('id' in fetched_service)
+ # verifying the existence of service created
+ self.assertIn('id', fetched_service)
self.assertEquals(fetched_service['id'], service_data['id'])
- self.assertTrue('name' in fetched_service)
+ self.assertIn('name', fetched_service)
self.assertEqual(fetched_service['name'], service_data['name'])
- self.assertTrue('type' in fetched_service)
+ self.assertIn('type', fetched_service)
self.assertEqual(fetched_service['type'], service_data['type'])
- self.assertTrue('description' in fetched_service)
+ self.assertIn('description', fetched_service)
self.assertEqual(fetched_service['description'],
service_data['description'])
finally:
diff --git a/tempest/api/identity/admin/test_users.py b/tempest/api/identity/admin/test_users.py
index 0bba250..6f90b04 100644
--- a/tempest/api/identity/admin/test_users.py
+++ b/tempest/api/identity/admin/test_users.py
@@ -240,12 +240,12 @@
self.assertEquals('200', resp['status'])
user_ids.append(user2['id'])
self.data.users.append(user2)
- #List of users for the respective tenant ID
+ # List of users for the respective tenant ID
resp, body = self.client.list_users_for_tenant(self.data.tenant['id'])
- self.assertTrue(resp['status'] in ('200', '203'))
+ self.assertIn(resp['status'], ('200', '203'))
for i in body:
fetched_user_ids.append(i['id'])
- #verifying the user Id in the list
+ # verifying the user Id in the list
missing_users =\
[user for user in user_ids if user not in fetched_user_ids]
self.assertEqual(0, len(missing_users),
@@ -260,7 +260,7 @@
user = self.get_user_by_name(self.data.test_user)
tenant = self.get_tenant_by_name(self.data.test_tenant)
role = self.get_role_by_name(self.data.test_role)
- #Assigning roles to two users
+ # Assigning roles to two users
user_ids = list()
fetched_user_ids = list()
user_ids.append(user['id'])
@@ -277,12 +277,12 @@
second_user['id'],
role['id'])
self.assertEquals('200', resp['status'])
- #List of users with roles for the respective tenant ID
+ # List of users with roles for the respective tenant ID
resp, body = self.client.list_users_for_tenant(self.data.tenant['id'])
self.assertEquals('200', resp['status'])
for i in body:
fetched_user_ids.append(i['id'])
- #verifying the user Id in the list
+ # verifying the user Id in the list
missing_users = [missing_user for missing_user in user_ids
if missing_user not in fetched_user_ids]
self.assertEqual(0, len(missing_users),
@@ -293,13 +293,13 @@
def test_list_users_with_invalid_tenant(self):
# Should not be able to return a list of all
# users for a nonexistant tenant
- #Assign invalid tenant ids
+ # Assign invalid tenant ids
invalid_id = list()
invalid_id.append(rand_name('999'))
invalid_id.append('alpha')
invalid_id.append(rand_name("dddd@#%%^$"))
invalid_id.append('!@#()$%^&*?<>{}[]')
- #List the users with invalid tenant id
+ # List the users with invalid tenant id
for invalid in invalid_id:
self.assertRaises(exceptions.NotFound,
self.client.list_users_for_tenant, invalid)
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 3d40eb3..9136934 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -33,7 +33,7 @@
@attr(type='smoke')
def test_list_domains(self):
- #Test to list domains
+ # Test to list domains
domain_ids = list()
fetched_ids = list()
for _ in range(3):
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index f01cc64..d98fb71 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -37,7 +37,7 @@
description=s_description)
cls.service_id = cls.service_data['id']
cls.service_ids.append(cls.service_id)
- #Create endpoints so as to use for LIST and GET test cases
+ # Create endpoints so as to use for LIST and GET test cases
cls.setup_endpoints = list()
for i in range(2):
region = rand_name('region')
@@ -53,12 +53,13 @@
cls.client.delete_endpoint(e['id'])
for s in cls.service_ids:
cls.identity_client.delete_service(s)
+ super(EndPointsTestJSON, cls).tearDownClass()
@attr(type='gate')
def test_list_endpoints(self):
# Get a list of endpoints
resp, fetched_endpoints = self.client.list_endpoints()
- #Asserting LIST Endpoint
+ # Asserting LIST Endpoint
self.assertEqual(resp['status'], '200')
missing_endpoints =\
[e for e in self.setup_endpoints if e not in fetched_endpoints]
@@ -78,11 +79,11 @@
self.client.create_endpoint(self.service_id, interface, url,
region=region, enabled=True)
create_flag = True
- #Asserting Create Endpoint response body
+ # Asserting Create Endpoint response body
self.assertEqual(resp['status'], '201')
self.assertEqual(region, endpoint['region'])
self.assertEqual(url, endpoint['url'])
- #Checking if created endpoint is present in the list of endpoints
+ # Checking if created endpoint is present in the list of endpoints
resp, fetched_endpoints = self.client.list_endpoints()
for e in fetched_endpoints:
if endpoint['id'] == e['id']:
@@ -93,12 +94,12 @@
finally:
if create_flag:
matched = False
- #Deleting the endpoint created in this method
+ # Deleting the endpoint created in this method
resp_header, resp_body =\
self.client.delete_endpoint(endpoint['id'])
self.assertEqual(resp_header['status'], '204')
self.assertEqual(resp_body, '')
- #Checking whether endpoint is deleted successfully
+ # Checking whether endpoint is deleted successfully
resp, fetched_endpoints = self.client.list_endpoints()
for e in fetched_endpoints:
if endpoint['id'] == e['id']:
@@ -108,8 +109,8 @@
@attr(type='smoke')
def test_update_endpoint(self):
- #Creating an endpoint so as to check update endpoint
- #with new values
+ # Creating an endpoint so as to check update endpoint
+ # with new values
region1 = rand_name('region')
url1 = rand_name('url')
interface1 = 'public'
@@ -117,7 +118,7 @@
self.client.create_endpoint(self.service_id, interface1,
url1, region=region1,
enabled=True)
- #Creating service so as update endpoint with new service ID
+ # Creating service so as update endpoint with new service ID
s_name = rand_name('service-')
s_type = rand_name('type--')
s_description = rand_name('description-')
@@ -125,7 +126,7 @@
self.identity_client.create_service(s_name, s_type,
description=s_description)
self.service_ids.append(self.service2['id'])
- #Updating endpoint with new values
+ # Updating endpoint with new values
region2 = rand_name('region')
url2 = rand_name('url')
interface2 = 'internal'
@@ -135,7 +136,7 @@
interface=interface2, url=url2,
region=region2, enabled=False)
self.assertEqual(resp['status'], '200')
- #Asserting if the attributes of endpoint are updated
+ # Asserting if the attributes of endpoint are updated
self.assertEqual(self.service2['id'], endpoint['service_id'])
self.assertEqual(interface2, endpoint['interface'])
self.assertEqual(url2, endpoint['url'])
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 799b081..737a0e0 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -29,7 +29,7 @@
@attr(type='smoke')
def test_list_policies(self):
- #Test to list policies
+ # Test to list policies
policy_ids = list()
fetched_ids = list()
for _ in range(3):
@@ -50,7 +50,7 @@
@attr(type='smoke')
def test_create_update_delete_policy(self):
- #Test to update policy
+ # Test to update policy
blob = rand_name('BlobName-')
policy_type = rand_name('PolicyType-')
resp, policy = self.policy_client.create_policy(blob, policy_type)
@@ -63,12 +63,12 @@
self.assertEqual(policy_type, policy['type'])
resp, fetched_policy = self.policy_client.get_policy(policy['id'])
self.assertEqual(resp['status'], '200')
- #Update policy
+ # Update policy
update_type = rand_name('UpdatedPolicyType-')
resp, data = self.policy_client.update_policy(
policy['id'], type=update_type)
- self.assertTrue('type' in data)
- #Assertion for updated value with fetched value
+ self.assertIn('type', data)
+ # Assertion for updated value with fetched value
resp, fetched_policy = self.policy_client.get_policy(policy['id'])
self.assertIn('id', fetched_policy)
self.assertIn('blob', fetched_policy)
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
new file mode 100644
index 0000000..cab84c0
--- /dev/null
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -0,0 +1,170 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+class RolesV3TestJSON(base.BaseIdentityAdminTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(RolesV3TestJSON, cls).setUpClass()
+ cls.fetched_role_ids = list()
+ u_name = rand_name('user-')
+ u_desc = '%s description' % u_name
+ u_email = '%s@testmail.tm' % u_name
+ u_password = rand_name('pass-')
+ resp = [None] * 5
+ resp[0], cls.project = cls.v3_client.create_project(
+ rand_name('project-'), description=rand_name('project-desc-'))
+ resp[1], cls.domain = cls.v3_client.create_domain(
+ rand_name('domain-'), description=rand_name('domain-desc-'))
+ resp[2], cls.group_body = cls.v3_client.create_group(
+ rand_name('Group-'), project_id=cls.project['id'],
+ domain_id=cls.domain['id'])
+ resp[3], cls.user_body = cls.v3_client.create_user(
+ u_name, description=u_desc, password=u_password,
+ email=u_email, project_id=cls.project['id'],
+ domain_id=cls.domain['id'])
+ resp[4], cls.role = cls.v3_client.create_role(rand_name('Role-'))
+ for r in resp:
+ assert r['status'] == '201', "Expected: %s" % r['status']
+
+ @classmethod
+ def tearDownClass(cls):
+ resp = [None] * 5
+ resp[0], _ = cls.v3_client.delete_role(cls.role['id'])
+ resp[1], _ = cls.v3_client.delete_group(cls.group_body['id'])
+ resp[2], _ = cls.v3_client.delete_user(cls.user_body['id'])
+ resp[3], _ = cls.v3_client.delete_project(cls.project['id'])
+ #NOTE(harika-vakadi): It is necessary to disable the domian
+ # before deleting,or else it would result in unauthorized error
+ cls.v3_client.update_domain(cls.domain['id'], enabled=False)
+ resp[4], _ = cls.v3_client.delete_domain(cls.domain['id'])
+ for r in resp:
+ assert r['status'] == '204', "Expected: %s" % r['status']
+ super(RolesV3TestJSON, cls).tearDownClass()
+
+ def _list_assertions(self, resp, body, fetched_role_ids, role_id):
+ self.assertEqual(resp['status'], '200')
+ self.assertEqual(len(body), 1)
+ self.assertIn(role_id, fetched_role_ids)
+
+ @attr(type='smoke')
+ def test_role_create_update_get(self):
+ r_name = rand_name('Role-')
+ resp, role = self.v3_client.create_role(r_name)
+ self.addCleanup(self.v3_client.delete_role, role['id'])
+ self.assertEqual(resp['status'], '201')
+ self.assertIn('name', role)
+ self.assertEqual(role['name'], r_name)
+
+ new_name = rand_name('NewRole-')
+ resp, updated_role = self.v3_client.update_role(new_name, role['id'])
+ self.assertEqual(resp['status'], '200')
+ self.assertIn('name', updated_role)
+ self.assertIn('id', updated_role)
+ self.assertIn('links', updated_role)
+ self.assertNotEqual(r_name, updated_role['name'])
+
+ resp, new_role = self.v3_client.get_role(role['id'])
+ self.assertEqual(resp['status'], '200')
+ self.assertEqual(new_name, new_role['name'])
+ self.assertEqual(updated_role['id'], new_role['id'])
+
+ @attr(type='smoke')
+ def test_grant_list_revoke_role_to_user_on_project(self):
+ resp, _ = self.v3_client.assign_user_role_on_project(
+ self.project['id'], self.user_body['id'], self.role['id'])
+ self.assertEqual(resp['status'], '204')
+
+ resp, roles = self.v3_client.list_user_roles_on_project(
+ self.project['id'], self.user_body['id'])
+
+ for i in roles:
+ self.fetched_role_ids.append(i['id'])
+
+ self._list_assertions(resp, roles, self.fetched_role_ids,
+ self.role['id'])
+
+ resp, _ = self.v3_client.revoke_role_from_user_on_project(
+ self.project['id'], self.user_body['id'], self.role['id'])
+ self.assertEqual(resp['status'], '204')
+
+ @attr(type='smoke')
+ def test_grant_list_revoke_role_to_user_on_domain(self):
+ resp, _ = self.v3_client.assign_user_role_on_domain(
+ self.domain['id'], self.user_body['id'], self.role['id'])
+ self.assertEqual(resp['status'], '204')
+
+ resp, roles = self.v3_client.list_user_roles_on_domain(
+ self.domain['id'], self.user_body['id'])
+
+ for i in roles:
+ self.fetched_role_ids.append(i['id'])
+
+ self._list_assertions(resp, roles, self.fetched_role_ids,
+ self.role['id'])
+
+ resp, _ = self.v3_client.revoke_role_from_user_on_domain(
+ self.domain['id'], self.user_body['id'], self.role['id'])
+ self.assertEqual(resp['status'], '204')
+
+ @attr(type='smoke')
+ def test_grant_list_revoke_role_to_group_on_project(self):
+ resp, _ = self.v3_client.assign_group_role_on_project(
+ self.project['id'], self.group_body['id'], self.role['id'])
+ self.assertEqual(resp['status'], '204')
+
+ resp, roles = self.v3_client.list_group_roles_on_project(
+ self.project['id'], self.group_body['id'])
+
+ for i in roles:
+ self.fetched_role_ids.append(i['id'])
+
+ self._list_assertions(resp, roles, self.fetched_role_ids,
+ self.role['id'])
+
+ resp, _ = self.v3_client.revoke_role_from_group_on_project(
+ self.project['id'], self.group_body['id'], self.role['id'])
+ self.assertEqual(resp['status'], '204')
+
+ @attr(type='smoke')
+ def test_grant_list_revoke_role_to_group_on_domain(self):
+ resp, _ = self.v3_client.assign_group_role_on_domain(
+ self.domain['id'], self.group_body['id'], self.role['id'])
+ self.assertEqual(resp['status'], '204')
+
+ resp, roles = self.v3_client.list_group_roles_on_domain(
+ self.domain['id'], self.group_body['id'])
+
+ for i in roles:
+ self.fetched_role_ids.append(i['id'])
+
+ self._list_assertions(resp, roles, self.fetched_role_ids,
+ self.role['id'])
+
+ resp, _ = self.v3_client.revoke_role_from_group_on_domain(
+ self.domain['id'], self.group_body['id'], self.role['id'])
+ self.assertEqual(resp['status'], '204')
+
+
+class RolesV3TestXML(RolesV3TestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index b35b93a..bfa0d84 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -1,4 +1,4 @@
-#vim: tabstop=4 shiftwidth=4 softtabstop=4
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
@@ -33,7 +33,7 @@
resp, body = self.client.create_service(
name, type, description=description)
self.assertEqual('200', resp['status'])
- #Deleting the service created in this method
+ # Deleting the service created in this method
self.addCleanup(self.client.delete_service, body['id'])
s_id = body['id']
@@ -46,7 +46,7 @@
self.assertEqual('200', resp['status'])
self.assertNotEqual(resp1_desc, resp2_desc)
- #Get service
+ # Get service
resp, body = self.client.get_service(s_id)
resp3_desc = body['description']
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
new file mode 100644
index 0000000..2a20493
--- /dev/null
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -0,0 +1,57 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+from tempest.test import attr
+
+
+class UsersTestJSON(base.BaseIdentityAdminTest):
+ _interface = 'json'
+
+ @attr(type='smoke')
+ def test_tokens(self):
+ # Valid user's token is authenticated
+ # Create a User
+ u_name = rand_name('user-')
+ u_desc = '%s-description' % u_name
+ u_email = '%s@testmail.tm' % u_name
+ u_password = rand_name('pass-')
+ resp, user = self.v3_client.create_user(
+ u_name, description=u_desc, password=u_password,
+ email=u_email)
+ self.assertTrue(resp['status'].startswith('2'))
+ self.addCleanup(self.v3_client.delete_user, user['id'])
+ # Perform Authentication
+ resp, body = self.v3_token.auth(user['id'], u_password)
+ self.assertEqual(resp['status'], '201')
+ subject_token = resp['x-subject-token']
+ # Perform GET Token
+ resp, token_details = self.v3_client.get_token(subject_token)
+ self.assertEqual(resp['status'], '200')
+ self.assertEqual(resp['x-subject-token'], subject_token)
+ self.assertEqual(token_details['user']['id'], user['id'])
+ self.assertEqual(token_details['user']['name'], u_name)
+ # Perform Delete Token
+ resp, _ = self.v3_client.delete_token(subject_token)
+ self.assertRaises(exceptions.Unauthorized, self.v3_client.get_token,
+ subject_token)
+
+
+class UsersTestXML(UsersTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 04e993d..bf7a554 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -26,7 +26,7 @@
@attr(type='gate')
def test_user_update(self):
# Test case to check if updating of user attributes is successful.
- #Creating first user
+ # Creating first user
u_name = rand_name('user-')
u_desc = u_name + 'description'
u_email = u_name + '@testmail.tm'
@@ -36,12 +36,12 @@
email=u_email, enabled=False)
# Delete the User at the end of this method
self.addCleanup(self.v3_client.delete_user, user['id'])
- #Creating second project for updation
+ # Creating second project for updation
resp, project = self.v3_client.create_project(
rand_name('project-'), description=rand_name('project-desc-'))
# Delete the Project at the end of this method
self.addCleanup(self.v3_client.delete_project, project['id'])
- #Updating user details with new values
+ # Updating user details with new values
u_name2 = rand_name('user2-')
u_email2 = u_name2 + '@testmail.tm'
u_description2 = u_name2 + ' description'
@@ -49,7 +49,7 @@
user['id'], name=u_name2, description=u_description2,
project_id=project['id'],
email=u_email2, enabled=False)
- #Assert response body of update user.
+ # Assert response body of update user.
self.assertEqual(200, resp.status)
self.assertEqual(u_name2, update_user['name'])
self.assertEqual(u_description2, update_user['description'])
@@ -57,9 +57,9 @@
update_user['project_id'])
self.assertEqual(u_email2, update_user['email'])
self.assertEqual('false', str(update_user['enabled']).lower())
- #GET by id after updation
+ # GET by id after updation
resp, new_user_get = self.v3_client.get_user(user['id'])
- #Assert response body of GET after updation
+ # Assert response body of GET after updation
self.assertEqual(u_name2, new_user_get['name'])
self.assertEqual(u_description2, new_user_get['description'])
self.assertEqual(project['id'],
@@ -69,14 +69,14 @@
@attr(type='gate')
def test_list_user_projects(self):
- #List the projects that a user has access upon
+ # List the projects that a user has access upon
assigned_project_ids = list()
fetched_project_ids = list()
_, u_project = self.v3_client.create_project(
rand_name('project-'), description=rand_name('project-desc-'))
# Delete the Project at the end of this method
self.addCleanup(self.v3_client.delete_project, u_project['id'])
- #Create a user.
+ # Create a user.
u_name = rand_name('user-')
u_desc = u_name + 'description'
u_email = u_name + '@testmail.tm'
@@ -100,7 +100,7 @@
_, project = self.v3_client.get_project(project_body['id'])
# Delete the Project at the end of this method
self.addCleanup(self.v3_client.delete_project, project_body['id'])
- #Assigning roles to user on project
+ # Assigning roles to user on project
self.v3_client.assign_user_role(project['id'],
user['id'],
role['id'])
@@ -109,7 +109,7 @@
self.assertEqual(200, resp.status)
for i in body:
fetched_project_ids.append(i['id'])
- #verifying the project ids in list
+ # verifying the project ids in list
missing_projects =\
[p for p in assigned_project_ids
if p not in fetched_project_ids]
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index db55509..bfb5372 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -25,6 +25,7 @@
@classmethod
def setUpClass(cls):
+ super(BaseIdentityAdminTest, cls).setUpClass()
os = clients.AdminManager(interface=cls._interface)
cls.client = os.identity_client
cls.token_client = os.token_client
@@ -32,6 +33,7 @@
cls.v3_client = os.identity_v3_client
cls.service_client = os.service_client
cls.policy_client = os.policy_client
+ cls.v3_token = os.token_v3_client
if not cls.client.has_admin_extensions():
raise cls.skipException("Admin extensions disabled")
@@ -44,6 +46,7 @@
@classmethod
def tearDownClass(cls):
cls.data.teardown_all()
+ super(BaseIdentityAdminTest, cls).tearDownClass()
def disable_user(self, user_name):
user = self.get_user_by_name(user_name)
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index e62d84b..4f54a15 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -15,9 +15,10 @@
# under the License.
from tempest import clients
-from tempest.common import log as logging
+from tempest.common import isolated_creds
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
+from tempest.openstack.common import log as logging
import tempest.test
LOG = logging.getLogger(__name__)
@@ -28,8 +29,21 @@
@classmethod
def setUpClass(cls):
- cls.os = clients.Manager()
+ super(BaseImageTest, cls).setUpClass()
cls.created_images = []
+ cls._interface = 'json'
+ cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__)
+ if not cls.config.service_available.glance:
+ skip_msg = ("%s skipped as glance is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
+ if cls.config.compute.allow_tenant_isolation:
+ creds = cls.isolated_creds.get_primary_creds()
+ username, tenant_name, password = creds
+ cls.os = clients.Manager(username=username,
+ password=password,
+ tenant_name=tenant_name)
+ else:
+ cls.os = clients.Manager()
@classmethod
def tearDownClass(cls):
@@ -41,6 +55,8 @@
for image_id in cls.created_images:
cls.client.wait_for_resource_deletion(image_id)
+ cls.isolated_creds.clear_isolated_creds()
+ super(BaseImageTest, cls).tearDownClass()
@classmethod
def create_image(cls, **kwargs):
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 640daa5..327df0f 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -45,7 +45,7 @@
disk_format='raw',
is_public=True,
properties=properties)
- self.assertTrue('id' in body)
+ self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Name', body.get('name'))
self.assertTrue(body.get('is_public'))
@@ -56,7 +56,7 @@
# Now try uploading an image file
image_file = StringIO.StringIO(('*' * 1024))
resp, body = self.client.update_image(image_id, data=image_file)
- self.assertTrue('size' in body)
+ self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
@attr(type='gate')
@@ -69,7 +69,7 @@
'/someimage.iso',
properties={'key1': 'value1',
'key2': 'value2'})
- self.assertTrue('id' in body)
+ self.assertIn('id', body)
self.assertEqual('New Remote Image', body.get('name'))
self.assertTrue(body.get('is_public'))
self.assertEqual('active', body.get('status'))
@@ -83,7 +83,7 @@
container_format='bare',
disk_format='raw', is_public=True,
copy_from=self.config.images.http_image)
- self.assertTrue('id' in body)
+ self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Http Image', body.get('name'))
self.assertTrue(body.get('is_public'))
@@ -101,7 +101,7 @@
is_public=True,
min_ram=40,
properties=properties)
- self.assertTrue('id' in body)
+ self.assertIn('id', body)
self.assertEqual('New_image_with_min_ram', body.get('name'))
self.assertTrue(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
@@ -184,7 +184,7 @@
self.assertEqual(resp['status'], '200')
image_list = map(lambda x: x['id'], images_list)
for image_id in self.created_images:
- self.assertTrue(image_id in image_list)
+ self.assertIn(image_id, image_list)
@attr(type='gate')
def test_index_disk_format(self):
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 34db6e3..ad7be39 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -48,13 +48,13 @@
container_format='bare',
disk_format='raw',
visibility='public')
- self.assertTrue('id' in body)
+ self.assertIn('id', body)
image_id = body.get('id')
- self.assertTrue('name' in body)
+ self.assertIn('name', body)
self.assertEqual('New Name', body.get('name'))
- self.assertTrue('visibility' in body)
+ self.assertIn('visibility', body)
self.assertTrue(body.get('visibility') == 'public')
- self.assertTrue('status' in body)
+ self.assertIn('status', body)
self.assertEqual('queued', body.get('status'))
# Now try uploading an image file
@@ -62,7 +62,7 @@
resp, body = self.client.store_image(image_id, image_file)
self.assertEqual(resp.status, 204)
resp, body = self.client.get_image_metadata(image_id)
- self.assertTrue('size' in body)
+ self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
@@ -104,4 +104,9 @@
self.assertEqual(resp['status'], '200')
image_list = map(lambda x: x['id'], images_list)
for image in self.created_images:
- self.assertTrue(image in image_list)
+ self.assertIn(image, image_list)
+
+ @attr(type=['negative', 'gate'])
+ def test_get_image_meta_by_null_id(self):
+ self.assertRaises(exceptions.NotFound,
+ self.client.get_image_metadata, '')
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 3b7f9dd..2a3b3f7 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -38,24 +38,34 @@
tenant_network_mask_bits with the mask bits to be used to partition the
block defined by tenant-network_cidr
+
+ Finally, it is assumed that the following option is defined in the
+ [service_available] section of etc/tempest.conf
+
+ neutron as True
"""
@classmethod
def setUpClass(cls):
+ super(BaseNetworkTest, cls).setUpClass()
os = clients.Manager()
cls.network_cfg = os.config.network
- if not cls.network_cfg.neutron_available:
+ if not cls.config.service_available.neutron:
raise cls.skipException("Neutron support is required")
cls.client = os.network_client
cls.networks = []
cls.subnets = []
+ cls.ports = []
@classmethod
def tearDownClass(cls):
+ for port in cls.ports:
+ cls.client.delete_port(port['id'])
for subnet in cls.subnets:
cls.client.delete_subnet(subnet['id'])
for network in cls.networks:
cls.client.delete_network(network['id'])
+ super(BaseNetworkTest, cls).tearDownClass()
@classmethod
def create_network(cls, network_name=None):
@@ -93,3 +103,11 @@
subnet = body['subnet']
cls.subnets.append(subnet)
return subnet
+
+ @classmethod
+ def create_port(cls, network):
+ """Wrapper utility that returns a test port."""
+ resp, body = cls.client.create_port(network['id'])
+ port = body['port']
+ cls.ports.append(port)
+ return port
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 4481853..00a8ef7 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -35,6 +35,13 @@
create a subnet for a tenant
list tenant's subnets
show a tenant subnet details
+ port create
+ port delete
+ port list
+ port show
+ port update
+ network update
+ subnet update
v2.0 of the Neutron API is assumed. It is also assumed that the following
options are defined in the [network] section of etc/tempest.conf:
@@ -53,21 +60,28 @@
cls.name = cls.network['name']
cls.subnet = cls.create_subnet(cls.network)
cls.cidr = cls.subnet['cidr']
+ cls.port = cls.create_port(cls.network)
@attr(type='gate')
- def test_create_delete_network_subnet(self):
+ def test_create_update_delete_network_subnet(self):
# Creates a network
name = rand_name('network-')
resp, body = self.client.create_network(name)
self.assertEqual('201', resp['status'])
network = body['network']
- self.assertTrue(network['id'] is not None)
+ net_id = network['id']
+ # Verification of network update
+ new_name = "New_network"
+ resp, body = self.client.update_network(net_id, new_name)
+ self.assertEqual('200', resp['status'])
+ updated_net = body['network']
+ self.assertEqual(updated_net['name'], new_name)
# Find a cidr that is not in use yet and create a subnet with it
cidr = netaddr.IPNetwork(self.network_cfg.tenant_network_cidr)
mask_bits = self.network_cfg.tenant_network_mask_bits
for subnet_cidr in cidr.subnet(mask_bits):
try:
- resp, body = self.client.create_subnet(network['id'],
+ resp, body = self.client.create_subnet(net_id,
str(subnet_cidr))
break
except exceptions.BadRequest as e:
@@ -76,11 +90,17 @@
raise
self.assertEqual('201', resp['status'])
subnet = body['subnet']
- self.assertTrue(subnet['id'] is not None)
- #Deletes subnet and network
- resp, body = self.client.delete_subnet(subnet['id'])
+ subnet_id = subnet['id']
+ # Verification of subnet update
+ new_subnet = "New_subnet"
+ resp, body = self.client.update_subnet(subnet_id, new_subnet)
+ self.assertEqual('200', resp['status'])
+ updated_subnet = body['subnet']
+ self.assertEqual(updated_subnet['name'], new_subnet)
+ # Deletes subnet and network
+ resp, body = self.client.delete_subnet(subnet_id)
self.assertEqual('204', resp['status'])
- resp, body = self.client.delete_network(network['id'])
+ resp, body = self.client.delete_network(net_id)
self.assertEqual('204', resp['status'])
@attr(type='gate')
@@ -97,8 +117,12 @@
# Verify the network exists in the list of all networks
resp, body = self.client.list_networks()
networks = body['networks']
- found = any(n for n in networks if n['id'] == self.network['id'])
- self.assertTrue(found)
+ found = None
+ for n in networks:
+ if (n['id'] == self.network['id']):
+ found = n['id']
+ msg = "Network list doesn't contain created network"
+ self.assertIsNotNone(found, msg)
@attr(type='gate')
def test_show_subnet(self):
@@ -114,5 +138,57 @@
# Verify the subnet exists in the list of all subnets
resp, body = self.client.list_subnets()
subnets = body['subnets']
- found = any(n for n in subnets if n['id'] == self.subnet['id'])
- self.assertTrue(found)
+ found = None
+ for n in subnets:
+ if (n['id'] == self.subnet['id']):
+ found = n['id']
+ msg = "Subnet list doesn't contain created subnet"
+ self.assertIsNotNone(found, msg)
+
+ @attr(type='gate')
+ def test_create_update_delete_port(self):
+ # Verify that successful port creation & deletion
+ resp, body = self.client.create_port(self.network['id'])
+ self.assertEqual('201', resp['status'])
+ port = body['port']
+ # Verification of port update
+ new_port = "New_Port"
+ resp, body = self.client.update_port(port['id'], new_port)
+ self.assertEqual('200', resp['status'])
+ updated_port = body['port']
+ self.assertEqual(updated_port['name'], new_port)
+ # Verification of port delete
+ resp, body = self.client.delete_port(port['id'])
+ self.assertEqual('204', resp['status'])
+
+ @attr(type='gate')
+ def test_show_ports(self):
+ # Verify the details of port
+ resp, body = self.client.show_port(self.port['id'])
+ self.assertEqual('200', resp['status'])
+ port = body['port']
+ self.assertEqual(self.port['id'], port['id'])
+
+ @attr(type='gate')
+ def test_list_ports(self):
+ # Verify the port exists in the list of all ports
+ resp, body = self.client.list_ports()
+ self.assertEqual('200', resp['status'])
+ ports_list = body['ports']
+ found = None
+ for n in ports_list:
+ if (n['id'] == self.port['id']):
+ found = n['id']
+ self.assertIsNotNone(found, "Port list doesn't contain created port")
+
+ @attr(type=['negative', 'gate'])
+ def test_show_non_existent_network(self):
+ non_exist_id = rand_name('network')
+ self.assertRaises(exceptions.NotFound, self.client.show_network,
+ non_exist_id)
+
+ @attr(type=['negative', 'gate'])
+ def test_show_non_existent_subnet(self):
+ non_exist_id = rand_name('subnet')
+ self.assertRaises(exceptions.NotFound, self.client.show_subnet,
+ non_exist_id)
diff --git a/tempest/api/network/test_quotas.py b/tempest/api/network/test_quotas.py
new file mode 100644
index 0000000..ba70f34
--- /dev/null
+++ b/tempest/api/network/test_quotas.py
@@ -0,0 +1,91 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from tempest.api.network import base
+from tempest import clients
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+class QuotasTest(base.BaseNetworkTest):
+
+ """
+ Tests the following operations in the Neutron API using the REST client for
+ Neutron:
+
+ list quotas for tenants who have non-default quota values
+ show quotas for a specified tenant
+ update quotas for a specified tenant
+ reset quotas to default values for a specified tenant
+
+ v2.0 of the API is assumed. It is also assumed that the following
+ option is defined in the [service_available] section of etc/tempest.conf:
+
+ neutron as True
+
+ Finally, it is assumed that the per-tenant quota extension API is
+ configured in /etc/neutron/neutron.conf as follows:
+
+ quota_driver = neutron.db.quota_db.DbQuotaDriver
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(QuotasTest, cls).setUpClass()
+ admin_manager = clients.AdminManager()
+ cls.admin_client = admin_manager.network_client
+ cls.identity_admin_client = admin_manager.identity_client
+
+ @attr(type='gate')
+ def test_quotas(self):
+ # Add a tenant to conduct the test
+ test_tenant = rand_name('test_tenant_')
+ test_description = rand_name('desc_')
+ _, tenant = self.identity_admin_client.create_tenant(
+ name=test_tenant,
+ description=test_description)
+ tenant_id = tenant['id']
+ self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
+ # Change quotas for tenant
+ new_quotas = {'network': 0, 'security_group': 0}
+ resp, quota_set = self.admin_client.update_quotas(tenant_id,
+ **new_quotas)
+ self.assertEqual('200', resp['status'])
+ self.addCleanup(self.admin_client.reset_quotas, tenant_id)
+ self.assertEqual(0, quota_set['network'])
+ self.assertEqual(0, quota_set['security_group'])
+ # Confirm our tenant is listed among tenants with non default quotas
+ resp, non_default_quotas = self.admin_client.list_quotas()
+ self.assertEqual('200', resp['status'])
+ found = False
+ for qs in non_default_quotas:
+ if qs['tenant_id'] == tenant_id:
+ found = True
+ self.assertTrue(found)
+ # Confirm from APi quotas were changed as requested for tenant
+ resp, quota_set = self.admin_client.show_quotas(tenant_id)
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(0, quota_set['network'])
+ self.assertEqual(0, quota_set['security_group'])
+ # Reset quotas to default and confirm
+ resp, body = self.admin_client.reset_quotas(tenant_id)
+ self.assertEqual('204', resp['status'])
+ resp, non_default_quotas = self.admin_client.list_quotas()
+ self.assertEqual('200', resp['status'])
+ for q in non_default_quotas:
+ self.assertNotEqual(tenant_id, q['tenant_id'])
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index bf013ec..820328c 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -26,6 +26,10 @@
@classmethod
def setUpClass(cls):
+ super(BaseObjectTest, cls).setUpClass()
+ if not cls.config.service_available.swift:
+ skip_msg = ("%s skipped as swift is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
cls.os = clients.Manager()
cls.object_client = cls.os.object_client
cls.container_client = cls.os.container_client
@@ -42,12 +46,6 @@
cls.data = DataGenerator(cls.identity_admin_client)
- try:
- cls.account_client.list_account_containers()
- except exceptions.EndpointNotFound:
- skip_msg = "No OpenStack Object Storage API endpoint"
- raise cls.skipException(skip_msg)
-
@classmethod
def delete_containers(cls, containers, container_client=None,
object_client=None):
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
new file mode 100644
index 0000000..bc050dc
--- /dev/null
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -0,0 +1,115 @@
+# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
+#
+# Author: Joe H. Rahme <joe.hakim.rahme@enovance.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+from tempest.api.object_storage import base
+from tempest import clients
+from tempest.common.utils.data_utils import arbitrary_string
+from tempest.common.utils.data_utils import rand_name
+import tempest.config
+from tempest import exceptions
+from tempest.test import attr
+
+
+class AccountQuotasTest(base.BaseObjectTest):
+ accounts_quotas_available = \
+ tempest.config.TempestConfig().object_storage.accounts_quotas_available
+
+ @classmethod
+ def setUpClass(cls):
+ super(AccountQuotasTest, cls).setUpClass()
+ cls.container_name = rand_name(name="TestContainer")
+ cls.container_client.create_container(cls.container_name)
+
+ cls.data.setup_test_user()
+
+ cls.os_reselleradmin = clients.Manager(
+ cls.data.test_user,
+ cls.data.test_password,
+ cls.data.test_tenant)
+
+ # Retrieve the ResellerAdmin role id
+ reseller_role_id = None
+ try:
+ _, roles = cls.os_admin.identity_client.list_roles()
+ reseller_role_id = next(r['id'] for r in roles if r['name']
+ == 'ResellerAdmin')
+ except StopIteration:
+ msg = "No ResellerAdmin role found"
+ raise exceptions.NotFound(msg)
+
+ # Retrieve the ResellerAdmin tenant id
+ _, users = cls.os_admin.identity_client.get_users()
+ reseller_user_id = next(usr['id'] for usr in users if usr['name']
+ == cls.data.test_user)
+
+ # Retrieve the ResellerAdmin tenant id
+ _, tenants = cls.os_admin.identity_client.list_tenants()
+ reseller_tenant_id = next(tnt['id'] for tnt in tenants if tnt['name']
+ == cls.data.test_tenant)
+
+ # Assign the newly created user the appropriate ResellerAdmin role
+ cls.os_admin.identity_client.assign_user_role(
+ reseller_tenant_id,
+ reseller_user_id,
+ reseller_role_id)
+
+ # Retrieve a ResellerAdmin auth token and use it to set a quota
+ # on the client's account
+ cls.reselleradmin_token = cls.token_client.get_token(
+ cls.data.test_user,
+ cls.data.test_password,
+ cls.data.test_tenant)
+
+ headers = {"X-Auth-Token": cls.reselleradmin_token,
+ "X-Account-Meta-Quota-Bytes": "20"}
+
+ cls.os.custom_account_client.request("POST", "", headers, "")
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.delete_containers([cls.container_name])
+ cls.data.teardown_all()
+
+ # remove the quota from the container
+ headers = {"X-Auth-Token": cls.reselleradmin_token,
+ "X-Remove-Account-Meta-Quota-Bytes": "x"}
+
+ cls.os.custom_account_client.request("POST", "", headers, "")
+
+ super(AccountQuotasTest, cls).tearDownClass()
+
+ @testtools.skipIf(not accounts_quotas_available,
+ "Account Quotas middleware not available")
+ @attr(type="smoke")
+ def test_upload_valid_object(self):
+ object_name = rand_name(name="TestObject")
+ data = arbitrary_string()
+ resp, _ = self.object_client.create_object(self.container_name,
+ object_name, data)
+
+ self.assertEqual(resp["status"], "201")
+
+ @testtools.skipIf(not accounts_quotas_available,
+ "Account Quotas middleware not available")
+ @attr(type=["negative", "smoke"])
+ def test_upload_large_object(self):
+ object_name = rand_name(name="TestObject")
+ data = arbitrary_string(30)
+ self.assertRaises(exceptions.OverLimit,
+ self.object_client.create_object,
+ self.container_name, object_name, data)
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index 029f2d5..b443933 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import random
+
from tempest.api.object_storage import base
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
@@ -26,12 +28,17 @@
@classmethod
def setUpClass(cls):
super(AccountTest, cls).setUpClass()
- cls.container_name = rand_name(name='TestContainer')
- cls.container_client.create_container(cls.container_name)
+ cls.containers = []
+ for i in xrange(ord('a'), ord('f') + 1):
+ name = rand_name(name='%s-' % chr(i))
+ cls.container_client.create_container(name)
+ cls.containers.append(name)
+ cls.containers_count = len(cls.containers)
@classmethod
def tearDownClass(cls):
- cls.container_client.delete_container(cls.container_name)
+ cls.delete_containers(cls.containers)
+ super(AccountTest, cls).tearDownClass()
@attr(type='smoke')
def test_list_containers(self):
@@ -42,7 +49,59 @@
self.assertIsNotNone(container_list)
container_names = [c['name'] for c in container_list]
- self.assertTrue(self.container_name in container_names)
+ for container_name in self.containers:
+ self.assertIn(container_name, container_names)
+
+ @attr(type='smoke')
+ def test_list_containers_with_limit(self):
+ # list containers one of them, half of them then all of them
+ for limit in (1, self.containers_count / 2, self.containers_count):
+ params = {'limit': limit}
+ resp, container_list = \
+ self.account_client.list_account_containers(params=params)
+ self.assertEquals(len(container_list), limit)
+
+ @attr(type='smoke')
+ def test_list_containers_with_marker(self):
+ # list containers using marker param
+ # first expect to get 0 container as we specified last
+ # the container as marker
+ # second expect to get the bottom half of the containers
+ params = {'marker': self.containers[-1]}
+ resp, container_list = \
+ self.account_client.list_account_containers(params=params)
+ self.assertEquals(len(container_list), 0)
+ params = {'marker': self.containers[self.containers_count / 2]}
+ resp, container_list = \
+ self.account_client.list_account_containers(params=params)
+ self.assertEquals(len(container_list), self.containers_count / 2 - 1)
+
+ @attr(type='smoke')
+ def test_list_containers_with_end_marker(self):
+ # list containers using end_marker param
+ # first expect to get 0 container as we specified first container as
+ # end_marker
+ # second expect to get the top half of the containers
+ params = {'end_marker': self.containers[0]}
+ resp, container_list = \
+ self.account_client.list_account_containers(params=params)
+ self.assertEquals(len(container_list), 0)
+ params = {'end_marker': self.containers[self.containers_count / 2]}
+ resp, container_list = \
+ self.account_client.list_account_containers(params=params)
+ self.assertEquals(len(container_list), self.containers_count / 2)
+
+ @attr(type='smoke')
+ def test_list_containers_with_limit_and_marker(self):
+ # list containers combining marker and limit param
+ # result are always limitated by the limit whatever the marker
+ for marker in random.choice(self.containers):
+ limit = random.randint(0, self.containers_count - 1)
+ params = {'marker': marker,
+ 'limit': limit}
+ resp, container_list = \
+ self.account_client.list_account_containers(params=params)
+ self.assertLessEqual(len(container_list), limit)
@attr(type='smoke')
def test_list_account_metadata(self):
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
new file mode 100644
index 0000000..1a31b91
--- /dev/null
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -0,0 +1,225 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.object_storage import base
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+from tempest.test import attr
+from tempest.test import HTTP_SUCCESS
+
+
+class ObjectTestACLs(base.BaseObjectTest):
+ @classmethod
+ def setUpClass(cls):
+ super(ObjectTestACLs, cls).setUpClass()
+ cls.data.setup_test_user()
+ cls.new_token = cls.token_client.get_token(cls.data.test_user,
+ cls.data.test_password,
+ cls.data.test_tenant)
+ cls.custom_headers = {'X-Auth-Token': cls.new_token}
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.data.teardown_all()
+ super(ObjectTestACLs, cls).tearDownClass()
+
+ def setUp(self):
+ super(ObjectTestACLs, self).setUp()
+ self.container_name = rand_name(name='TestContainer')
+ self.container_client.create_container(self.container_name)
+
+ def tearDown(self):
+ self.delete_containers([self.container_name])
+ super(ObjectTestACLs, self).tearDown()
+
+ @attr(type=['negative', 'gate'])
+ def test_write_object_without_using_creds(self):
+ # trying to create object with empty headers
+ # X-Auth-Token is not provided
+ object_name = rand_name(name='Object')
+ self.assertRaises(exceptions.Unauthorized,
+ self.custom_object_client.create_object,
+ self.container_name, object_name, 'data')
+
+ @attr(type=['negative', 'gate'])
+ def test_delete_object_without_using_creds(self):
+ # create object
+ object_name = rand_name(name='Object')
+ resp, _ = self.object_client.create_object(self.container_name,
+ object_name, 'data')
+ # trying to delete object with empty headers
+ # X-Auth-Token is not provided
+ self.assertRaises(exceptions.Unauthorized,
+ self.custom_object_client.delete_object,
+ self.container_name, object_name)
+
+ @attr(type=['negative', 'gate'])
+ def test_write_object_with_non_authorized_user(self):
+ # attempt to upload another file using non-authorized user
+ # User provided token is forbidden. ACL are not set
+ object_name = rand_name(name='Object')
+ # trying to create object with non-authorized user
+ self.assertRaises(exceptions.Unauthorized,
+ self.custom_object_client.create_object,
+ self.container_name, object_name, 'data',
+ metadata=self.custom_headers)
+
+ @attr(type=['negative', 'gate'])
+ def test_read_object_with_non_authorized_user(self):
+ # attempt to read object using non-authorized user
+ # User provided token is forbidden. ACL are not set
+ object_name = rand_name(name='Object')
+ resp, _ = self.object_client.create_object(
+ self.container_name, object_name, 'data')
+ self.assertEqual(resp['status'], '201')
+ # trying to get object with non authorized user token
+ self.assertRaises(exceptions.Unauthorized,
+ self.custom_object_client.get_object,
+ self.container_name, object_name,
+ metadata=self.custom_headers)
+
+ @attr(type=['negative', 'gate'])
+ def test_delete_object_with_non_authorized_user(self):
+ # attempt to delete object using non-authorized user
+ # User provided token is forbidden. ACL are not set
+ object_name = rand_name(name='Object')
+ resp, _ = self.object_client.create_object(
+ self.container_name, object_name, 'data')
+ self.assertEqual(resp['status'], '201')
+ # trying to delete object with non-authorized user token
+ self.assertRaises(exceptions.Unauthorized,
+ self.custom_object_client.delete_object,
+ self.container_name, object_name,
+ metadata=self.custom_headers)
+
+ @attr(type=['negative', 'smoke'])
+ def test_read_object_without_rights(self):
+ # attempt to read object using non-authorized user
+ # update X-Container-Read metadata ACL
+ cont_headers = {'X-Container-Read': 'badtenant:baduser'}
+ resp_meta, body = self.container_client.update_container_metadata(
+ self.container_name, metadata=cont_headers,
+ metadata_prefix='')
+ self.assertIn(int(resp_meta['status']), HTTP_SUCCESS)
+ # create object
+ object_name = rand_name(name='Object')
+ resp, _ = self.object_client.create_object(self.container_name,
+ object_name, 'data')
+ self.assertEqual(resp['status'], '201')
+ # Trying to read the object without rights
+ self.assertRaises(exceptions.Unauthorized,
+ self.custom_object_client.get_object,
+ self.container_name, object_name,
+ metadata=self.custom_headers)
+
+ @attr(type=['negative', 'smoke'])
+ def test_write_object_without_rights(self):
+ # attempt to write object using non-authorized user
+ # update X-Container-Write metadata ACL
+ cont_headers = {'X-Container-Write': 'badtenant:baduser'}
+ resp_meta, body = self.container_client.update_container_metadata(
+ self.container_name, metadata=cont_headers,
+ metadata_prefix='')
+ self.assertIn(int(resp_meta['status']), HTTP_SUCCESS)
+ # Trying to write the object without rights
+ object_name = rand_name(name='Object')
+ self.assertRaises(exceptions.Unauthorized,
+ self.custom_object_client.create_object,
+ self.container_name,
+ object_name, 'data',
+ metadata=self.custom_headers)
+
+ @attr(type='smoke')
+ def test_read_object_with_rights(self):
+ # attempt to read object using authorized user
+ # update X-Container-Read metadata ACL
+ cont_headers = {'X-Container-Read':
+ self.data.test_tenant + ':' + self.data.test_user}
+ resp_meta, body = self.container_client.update_container_metadata(
+ self.container_name, metadata=cont_headers,
+ metadata_prefix='')
+ self.assertIn(int(resp_meta['status']), HTTP_SUCCESS)
+ # create object
+ object_name = rand_name(name='Object')
+ resp, _ = self.object_client.create_object(self.container_name,
+ object_name, 'data')
+ self.assertEqual(resp['status'], '201')
+ # Trying to read the object with rights
+ resp, _ = self.custom_object_client.get_object(
+ self.container_name, object_name,
+ metadata=self.custom_headers)
+ self.assertIn(int(resp['status']), HTTP_SUCCESS)
+
+ @attr(type='smoke')
+ def test_write_object_with_rights(self):
+ # attempt to write object using authorized user
+ # update X-Container-Write metadata ACL
+ cont_headers = {'X-Container-Write':
+ self.data.test_tenant + ':' + self.data.test_user}
+ resp_meta, body = self.container_client.update_container_metadata(
+ self.container_name, metadata=cont_headers,
+ metadata_prefix='')
+ self.assertIn(int(resp_meta['status']), HTTP_SUCCESS)
+ # Trying to write the object with rights
+ object_name = rand_name(name='Object')
+ resp, _ = self.custom_object_client.create_object(
+ self.container_name,
+ object_name, 'data',
+ metadata=self.custom_headers)
+ self.assertIn(int(resp['status']), HTTP_SUCCESS)
+
+ @attr(type=['negative', 'smoke'])
+ def test_write_object_without_write_rights(self):
+ # attempt to write object using non-authorized user
+ # update X-Container-Read and X-Container-Write metadata ACL
+ cont_headers = {'X-Container-Read':
+ self.data.test_tenant + ':' + self.data.test_user,
+ 'X-Container-Write': ''}
+ resp_meta, body = self.container_client.update_container_metadata(
+ self.container_name, metadata=cont_headers,
+ metadata_prefix='')
+ self.assertIn(int(resp_meta['status']), HTTP_SUCCESS)
+ # Trying to write the object without write rights
+ object_name = rand_name(name='Object')
+ self.assertRaises(exceptions.Unauthorized,
+ self.custom_object_client.create_object,
+ self.container_name,
+ object_name, 'data',
+ metadata=self.custom_headers)
+
+ @attr(type=['negative', 'smoke'])
+ def test_delete_object_without_write_rights(self):
+ # attempt to delete object using non-authorized user
+ # update X-Container-Read and X-Container-Write metadata ACL
+ cont_headers = {'X-Container-Read':
+ self.data.test_tenant + ':' + self.data.test_user,
+ 'X-Container-Write': ''}
+ resp_meta, body = self.container_client.update_container_metadata(
+ self.container_name, metadata=cont_headers,
+ metadata_prefix='')
+ self.assertIn(int(resp_meta['status']), HTTP_SUCCESS)
+ # create object
+ object_name = rand_name(name='Object')
+ resp, _ = self.object_client.create_object(self.container_name,
+ object_name, 'data')
+ self.assertEqual(resp['status'], '201')
+ # Trying to delete the object without write rights
+ self.assertRaises(exceptions.Unauthorized,
+ self.custom_object_client.delete_object,
+ self.container_name,
+ object_name,
+ metadata=self.custom_headers)
diff --git a/tempest/api/object_storage/test_container_services.py b/tempest/api/object_storage/test_container_services.py
index 5cb6341..eaaed39 100644
--- a/tempest/api/object_storage/test_container_services.py
+++ b/tempest/api/object_storage/test_container_services.py
@@ -31,13 +31,14 @@
@classmethod
def tearDownClass(cls):
cls.delete_containers(cls.containers)
+ super(ContainerTest, cls).tearDownClass()
@attr(type='smoke')
def test_create_container(self):
container_name = rand_name(name='TestContainer')
resp, body = self.container_client.create_container(container_name)
self.containers.append(container_name)
- self.assertTrue(resp['status'] in ('202', '201'))
+ self.assertIn(resp['status'], ('202', '201'))
@attr(type='smoke')
def test_delete_container(self):
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index ea8637c..d18c2ad 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -50,6 +50,7 @@
def tearDownClass(cls):
for client in cls.clients.values():
cls.delete_containers(cls.containers, client[0], client[1])
+ super(ContainerSyncTest, cls).tearDownClass()
@testtools.skip('Until Bug #1093743 is resolved.')
@attr(type='gate')
@@ -67,9 +68,9 @@
(cont_client[1].base_url, str(cont[1]))}
resp, body = \
cont_client[0].put(str(cont[0]), body=None, headers=headers)
- self.assertTrue(resp['status'] in ('202', '201'),
- 'Error installing X-Container-Sync-To '
- 'for the container "%s"' % (cont[0]))
+ self.assertIn(resp['status'], ('202', '201'),
+ 'Error installing X-Container-Sync-To '
+ 'for the container "%s"' % (cont[0]))
# create object in container
object_name = rand_name(name='TestSyncObject')
data = object_name[::-1] # arbitrary_string()
diff --git a/tempest/api/object_storage/test_object_expiry.py b/tempest/api/object_storage/test_object_expiry.py
index aaa2c64..8703480 100644
--- a/tempest/api/object_storage/test_object_expiry.py
+++ b/tempest/api/object_storage/test_object_expiry.py
@@ -41,11 +41,12 @@
NotFound exception and also non empty container cannot be deleted.
"""
cls.delete_containers([cls.container_name])
+ super(ObjectExpiryTest, cls).tearDownClass()
@testtools.skip('Until Bug #1069849 is resolved.')
@attr(type='gate')
def test_get_object_after_expiry_time(self):
- #TODO(harika-vakadi): similar test case has to be created for
+ # TODO(harika-vakadi): similar test case has to be created for
# "X-Delete-At", after this test case works.
# create object
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 6136216..c599562 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -21,7 +21,6 @@
from tempest.api.object_storage import base
from tempest.common.utils.data_utils import arbitrary_string
from tempest.common.utils.data_utils import rand_name
-from tempest import exceptions
from tempest.test import attr
from tempest.test import HTTP_SUCCESS
@@ -48,6 +47,7 @@
cls.delete_containers(cls.containers)
# delete the user setup created
cls.data.teardown_all()
+ super(ObjectTest, cls).tearDownClass()
@attr(type='smoke')
def test_create_object(self):
@@ -230,74 +230,6 @@
self.fail("Got exception :%s ; while copying"
" object across containers" % e)
- @attr(type=['negative', 'gate'])
- def test_write_object_without_using_creds(self):
- # trying to create object with empty headers
- object_name = rand_name(name='Object')
- data = arbitrary_string(size=len(object_name),
- base_text=object_name)
- obj_headers = {'Content-Type': 'application/json',
- 'Accept': 'application/json'}
- self.assertRaises(exceptions.Unauthorized,
- self.custom_object_client.create_object,
- self.container_name, object_name, data,
- metadata=obj_headers)
-
- @attr(type=['negative', 'gate'])
- def test_delete_object_without_using_creds(self):
- # create object
- object_name = rand_name(name='Object')
- data = arbitrary_string(size=len(object_name),
- base_text=object_name)
- resp, _ = self.object_client.create_object(self.container_name,
- object_name, data)
- # trying to delete object with empty headers
- self.assertRaises(exceptions.Unauthorized,
- self.custom_object_client.delete_object,
- self.container_name, object_name)
-
- @attr(type=['negative', 'gate'])
- def test_write_object_with_non_authorized_user(self):
- # attempt to upload another file using non-authorized user
- object_name = rand_name(name='Object')
- data = arbitrary_string(size=len(object_name) * 5,
- base_text=object_name)
-
- # trying to create object with non-authorized user
- self.assertRaises(exceptions.Unauthorized,
- self.custom_object_client.create_object,
- self.container_name, object_name, data,
- metadata=self.custom_headers)
-
- @attr(type=['negative', 'gate'])
- def test_read_object_with_non_authorized_user(self):
- object_name = rand_name(name='Object')
- data = arbitrary_string(size=len(object_name) * 5,
- base_text=object_name)
- resp, body = self.object_client.create_object(
- self.container_name, object_name, data)
- self.assertEqual(resp['status'], '201')
-
- # trying to get object with non authorized user token
- self.assertRaises(exceptions.Unauthorized,
- self.custom_object_client.get_object,
- self.container_name, object_name,
- metadata=self.custom_headers)
-
- @attr(type=['negative', 'gate'])
- def test_delete_object_with_non_authorized_user(self):
- object_name = rand_name(name='Object')
- data = arbitrary_string(size=len(object_name) * 5,
- base_text=object_name)
- resp, body = self.object_client.create_object(
- self.container_name, object_name, data)
- self.assertEqual(resp['status'], '201')
- # trying to delete object with non-authorized user token
- self.assertRaises(exceptions.Unauthorized,
- self.custom_object_client.delete_object,
- self.container_name, object_name,
- metadata=self.custom_headers)
-
@attr(type='gate')
def test_get_object_using_temp_url(self):
# access object using temporary URL within expiration time
diff --git a/tempest/api/object_storage/test_object_version.py b/tempest/api/object_storage/test_object_version.py
index cda3e4f..2b93c32 100644
--- a/tempest/api/object_storage/test_object_version.py
+++ b/tempest/api/object_storage/test_object_version.py
@@ -29,6 +29,7 @@
@classmethod
def tearDownClass(cls):
cls.delete_containers(cls.containers)
+ super(ContainerTest, cls).tearDownClass()
def assertContainer(self, container, count, byte, versioned):
resp, _ = self.container_client.list_container_metadata(container)
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index ffa534a..745dd87 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -12,11 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
import time
from tempest import clients
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
import tempest.test
@@ -28,10 +28,10 @@
@classmethod
def setUpClass(cls):
-
+ super(BaseOrchestrationTest, cls).setUpClass()
os = clients.OrchestrationManager()
cls.orchestration_cfg = os.config.orchestration
- if not cls.orchestration_cfg.heat_available:
+ if not os.config.service_available.heat:
raise cls.skipException("Heat support is required")
cls.build_timeout = cls.orchestration_cfg.build_timeout
cls.build_interval = cls.orchestration_cfg.build_interval
@@ -107,6 +107,7 @@
def tearDownClass(cls):
cls.clear_stacks()
cls.clear_keypairs()
+ super(BaseOrchestrationTest, cls).tearDownClass()
def wait_for(self, condition):
"""Repeatedly calls condition() until a timeout."""
diff --git a/tempest/api/orchestration/stacks/test_instance_cfn_init.py b/tempest/api/orchestration/stacks/test_instance_cfn_init.py
index 4f22158..fe55ecf 100644
--- a/tempest/api/orchestration/stacks/test_instance_cfn_init.py
+++ b/tempest/api/orchestration/stacks/test_instance_cfn_init.py
@@ -13,13 +13,13 @@
# under the License.
import json
-from tempest.common import log as logging
import testtools
from tempest.api.orchestration import base
from tempest.common.utils.data_utils import rand_name
from tempest.common.utils.linux.remote_client import RemoteClient
import tempest.config
+from tempest.openstack.common import log as logging
from tempest.test import attr
@@ -145,7 +145,7 @@
'ImageId': cls.orchestration_cfg.image_ref
})
- @attr(type='gate')
+ @attr(type='slow')
@testtools.skipIf(existing_keypair, 'Server ssh tests are disabled.')
def test_can_log_into_created_server(self):
@@ -168,7 +168,7 @@
server, 'ec2-user', pkey=self.keypair['private_key'])
self.assertTrue(linux_client.can_authenticate())
- @attr(type='gate')
+ @attr(type='slow')
def test_stack_wait_condition_data(self):
sid = self.stack_identifier
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index 15979ed..f1f1f7e 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -12,10 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
-
from tempest.api.orchestration import base
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
from tempest.test import attr
diff --git a/tempest/api/utils.py b/tempest/api/utils.py
index 0738201..69ab7fb 100644
--- a/tempest/api/utils.py
+++ b/tempest/api/utils.py
@@ -17,7 +17,7 @@
"""Common utilities used in testing."""
-from testtools import TestCase
+from tempest.test import BaseTestCase
class skip_unless_attr(object):
@@ -32,7 +32,7 @@
"""Wrapped skipper function."""
testobj = args[0]
if not getattr(testobj, self.attr, False):
- raise TestCase.skipException(self.message)
+ raise BaseTestCase.skipException(self.message)
func(*args, **kw)
_skipper.__name__ = func.__name__
_skipper.__doc__ = func.__doc__
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 086b981..797aa71 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -13,8 +13,8 @@
# under the License.
from tempest.api.volume import base
-from tempest.common import log as logging
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
from tempest.services.volume.json.admin import volume_types_client
from tempest.services.volume.json import volumes_client
from tempest.test import attr
@@ -81,19 +81,19 @@
cls.volume_id_list.append(cls.volume2['id'])
cls.volume_client.wait_for_volume_status(cls.volume2['id'],
'available')
- except Exception:
- LOG.exception("setup failed")
+ except Exception as e:
+ LOG.exception("setup failed: %s" % e)
cls.tearDownClass()
raise
@classmethod
def tearDownClass(cls):
- ## volumes deletion
+ # volumes deletion
for volume_id in cls.volume_id_list:
cls.volume_client.delete_volume(volume_id)
cls.volume_client.wait_for_resource_deletion(volume_id)
- ## volume types deletion
+ # volume types deletion
for volume_type_id in cls.volume_type_id_list:
cls.type_client.delete_volume_type(volume_type_id)
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 3c4b5d8..822f691 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -38,126 +38,111 @@
auth_url,
adm_tenant)
+ def _delete_volume(self, volume_id):
+ resp, _ = self.volumes_client.delete_volume(volume_id)
+ self.assertEqual(202, resp.status)
+
+ def _delete_volume_type(self, volume_type_id):
+ resp, _ = self.client.delete_volume_type(volume_type_id)
+ self.assertEqual(202, resp.status)
+
@attr(type='smoke')
def test_volume_type_list(self):
# List Volume types.
- try:
- resp, body = self.client.list_volume_types()
- self.assertEqual(200, resp.status)
- self.assertTrue(type(body), list)
- except Exception:
- self.fail("Could not list volume types")
+ resp, body = self.client.list_volume_types()
+ self.assertEqual(200, resp.status)
+ self.assertTrue(type(body), list)
@attr(type='smoke')
def test_create_get_delete_volume_with_volume_type_and_extra_specs(self):
# Create/get/delete volume with volume_type and extra spec.
- try:
- volume = {}
- vol_name = rand_name("volume-")
- vol_type_name = rand_name("volume-type-")
- proto = self.config.volume.storage_protocol
- vendor = self.config.volume.vendor_name
- extra_specs = {"storage_protocol": proto,
- "vendor_name": vendor}
- body = {}
- resp, body = self.client.create_volume_type(
- vol_type_name,
- extra_specs=extra_specs)
- self.assertEqual(200, resp.status)
- self.assertTrue('id' in body)
- self.assertTrue('name' in body)
- resp, volume = self.volumes_client.create_volume(
- size=1, display_name=vol_name,
- volume_type=vol_type_name)
- self.assertEqual(200, resp.status)
- self.assertTrue('id' in volume)
- self.assertTrue('display_name' in volume)
- self.assertEqual(volume['display_name'], vol_name,
- "The created volume name is not equal "
- "to the requested name")
- self.assertTrue(volume['id'] is not None,
- "Field volume id is empty or not found.")
- self.volumes_client.wait_for_volume_status(volume['id'],
- 'available')
- resp, fetched_volume = self.volumes_client.get_volume(volume['id'])
- self.assertEqual(200, resp.status)
- self.assertEqual(vol_name, fetched_volume['display_name'],
- 'The fetched Volume is different '
- 'from the created Volume')
- self.assertEqual(volume['id'], fetched_volume['id'],
- 'The fetched Volume is different '
- 'from the created Volume')
- self.assertEqual(vol_type_name, fetched_volume['volume_type'],
- 'The fetched Volume is different '
- 'from the created Volume')
- except Exception:
- self.fail("Could not create correct volume with volume_type")
- finally:
- if volume:
- # Delete the Volume if it was created
- resp, _ = self.volumes_client.delete_volume(volume['id'])
- self.assertEqual(202, resp.status)
-
- if body:
- resp, _ = self.client.delete_volume_type(body['id'])
- self.assertEqual(202, resp.status)
+ volume = {}
+ vol_name = rand_name("volume-")
+ vol_type_name = rand_name("volume-type-")
+ proto = self.config.volume.storage_protocol
+ vendor = self.config.volume.vendor_name
+ extra_specs = {"storage_protocol": proto,
+ "vendor_name": vendor}
+ body = {}
+ resp, body = self.client.create_volume_type(
+ vol_type_name,
+ extra_specs=extra_specs)
+ self.assertEqual(200, resp.status)
+ self.assertIn('id', body)
+ self.addCleanup(self._delete_volume_type, body['id'])
+ self.assertIn('name', body)
+ resp, volume = self.volumes_client.create_volume(
+ size=1, display_name=vol_name,
+ volume_type=vol_type_name)
+ self.assertEqual(200, resp.status)
+ self.assertIn('id', volume)
+ self.addCleanup(self._delete_volume, volume['id'])
+ self.assertIn('display_name', volume)
+ self.assertEqual(volume['display_name'], vol_name,
+ "The created volume name is not equal "
+ "to the requested name")
+ self.assertTrue(volume['id'] is not None,
+ "Field volume id is empty or not found.")
+ self.volumes_client.wait_for_volume_status(volume['id'],
+ 'available')
+ resp, fetched_volume = self.volumes_client.get_volume(volume['id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(vol_name, fetched_volume['display_name'],
+ 'The fetched Volume is different '
+ 'from the created Volume')
+ self.assertEqual(volume['id'], fetched_volume['id'],
+ 'The fetched Volume is different '
+ 'from the created Volume')
+ self.assertEqual(vol_type_name, fetched_volume['volume_type'],
+ 'The fetched Volume is different '
+ 'from the created Volume')
@attr(type='smoke')
def test_volume_type_create_delete(self):
# Create/Delete volume type.
- try:
- name = rand_name("volume-type-")
- extra_specs = {"storage_protocol": "iSCSI",
- "vendor_name": "Open Source"}
- resp, body = self.client.create_volume_type(
- name,
- extra_specs=extra_specs)
- self.assertEqual(200, resp.status)
- self.assertTrue('id' in body)
- self.assertTrue('name' in body)
- self.assertEqual(body['name'], name,
- "The created volume_type name is not equal "
- "to the requested name")
- self.assertTrue(body['id'] is not None,
- "Field volume_type id is empty or not found.")
- resp, _ = self.client.delete_volume_type(body['id'])
- self.assertEqual(202, resp.status)
- except Exception:
- self.fail("Could not create a volume_type")
+ name = rand_name("volume-type-")
+ extra_specs = {"storage_protocol": "iSCSI",
+ "vendor_name": "Open Source"}
+ resp, body = self.client.create_volume_type(
+ name,
+ extra_specs=extra_specs)
+ self.assertEqual(200, resp.status)
+ self.assertIn('id', body)
+ self.addCleanup(self._delete_volume_type, body['id'])
+ self.assertIn('name', body)
+ self.assertEqual(body['name'], name,
+ "The created volume_type name is not equal "
+ "to the requested name")
+ self.assertTrue(body['id'] is not None,
+ "Field volume_type id is empty or not found.")
@attr(type='smoke')
def test_volume_type_create_get(self):
# Create/get volume type.
- try:
- body = {}
- name = rand_name("volume-type-")
- extra_specs = {"storage_protocol": "iSCSI",
- "vendor_name": "Open Source"}
- resp, body = self.client.create_volume_type(
- name,
- extra_specs=extra_specs)
- self.assertEqual(200, resp.status)
- self.assertTrue('id' in body)
- self.assertTrue('name' in body)
- self.assertEqual(body['name'], name,
- "The created volume_type name is not equal "
- "to the requested name")
- self.assertTrue(body['id'] is not None,
- "Field volume_type id is empty or not found.")
- resp, fetched_volume_type = self.client.get_volume_type(body['id'])
- self.assertEqual(200, resp.status)
- self.assertEqual(name, fetched_volume_type['name'],
- 'The fetched Volume_type is different '
- 'from the created Volume_type')
- self.assertEqual(str(body['id']), fetched_volume_type['id'],
- 'The fetched Volume_type is different '
- 'from the created Volume_type')
- self.assertEqual(extra_specs, fetched_volume_type['extra_specs'],
- 'The fetched Volume_type is different '
- 'from the created Volume_type')
- except Exception:
- self.fail("Could not create a volume_type")
- finally:
- if body:
- resp, _ = self.client.delete_volume_type(body['id'])
- self.assertEqual(202, resp.status)
+ body = {}
+ name = rand_name("volume-type-")
+ extra_specs = {"storage_protocol": "iSCSI",
+ "vendor_name": "Open Source"}
+ resp, body = self.client.create_volume_type(
+ name,
+ extra_specs=extra_specs)
+ self.assertEqual(200, resp.status)
+ self.assertIn('id', body)
+ self.addCleanup(self._delete_volume_type, body['id'])
+ self.assertIn('name', body)
+ self.assertEqual(body['name'], name,
+ "The created volume_type name is not equal "
+ "to the requested name")
+ self.assertTrue(body['id'] is not None,
+ "Field volume_type id is empty or not found.")
+ resp, fetched_volume_type = self.client.get_volume_type(body['id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(name, fetched_volume_type['name'],
+ 'The fetched Volume_type is different '
+ 'from the created Volume_type')
+ self.assertEqual(str(body['id']), fetched_volume_type['id'],
+ 'The fetched Volume_type is different '
+ 'from the created Volume_type')
+ self.assertEqual(extra_specs, fetched_volume_type['extra_specs'],
+ 'The fetched Volume_type is different '
+ 'from the created Volume_type')
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs.py b/tempest/api/volume/admin/test_volume_types_extra_specs.py
index 417f296..7d94f58 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs.py
@@ -37,68 +37,59 @@
@attr(type='smoke')
def test_volume_type_extra_specs_list(self):
# List Volume types extra specs.
- try:
- extra_specs = {"spec1": "val1"}
- resp, body = self.client.create_volume_type_extra_specs(
- self.volume_type['id'], extra_specs)
- self.assertEqual(200, resp.status)
- self.assertEqual(extra_specs, body,
- "Volume type extra spec incorrectly created")
- resp, body = self.client.list_volume_types_extra_specs(
- self.volume_type['id'])
- self.assertEqual(200, resp.status)
- self.assertTrue(type(body), dict)
- self.assertTrue('spec1' in body, "Incorrect volume type extra"
- " spec returned")
- except Exception:
- self.fail("Could not list volume types extra specs")
+ extra_specs = {"spec1": "val1"}
+ resp, body = self.client.create_volume_type_extra_specs(
+ self.volume_type['id'], extra_specs)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(extra_specs, body,
+ "Volume type extra spec incorrectly created")
+ resp, body = self.client.list_volume_types_extra_specs(
+ self.volume_type['id'])
+ self.assertEqual(200, resp.status)
+ self.assertTrue(type(body), dict)
+ self.assertTrue('spec1' in body, "Incorrect volume type extra"
+ " spec returned")
@attr(type='gate')
def test_volume_type_extra_specs_update(self):
# Update volume type extra specs
- try:
- extra_specs = {"spec2": "val1"}
- resp, body = self.client.create_volume_type_extra_specs(
- self.volume_type['id'], extra_specs)
- self.assertEqual(200, resp.status)
- self.assertEqual(extra_specs, body,
- "Volume type extra spec incorrectly created")
+ extra_specs = {"spec2": "val1"}
+ resp, body = self.client.create_volume_type_extra_specs(
+ self.volume_type['id'], extra_specs)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(extra_specs, body,
+ "Volume type extra spec incorrectly created")
- extra_spec = {"spec2": "val2"}
- resp, body = self.client.update_volume_type_extra_specs(
- self.volume_type['id'],
- extra_spec.keys()[0],
- extra_spec)
- self.assertEqual(200, resp.status)
- self.assertTrue('spec2' in body,
- "Volume type extra spec incorrectly updated")
- self.assertEqual(extra_spec['spec2'], body['spec2'],
- "Volume type extra spec incorrectly updated")
- except Exception:
- self.fail("Couldnt update volume type extra spec")
+ extra_spec = {"spec2": "val2"}
+ resp, body = self.client.update_volume_type_extra_specs(
+ self.volume_type['id'],
+ extra_spec.keys()[0],
+ extra_spec)
+ self.assertEqual(200, resp.status)
+ self.assertTrue('spec2' in body,
+ "Volume type extra spec incorrectly updated")
+ self.assertEqual(extra_spec['spec2'], body['spec2'],
+ "Volume type extra spec incorrectly updated")
@attr(type='smoke')
def test_volume_type_extra_spec_create_get_delete(self):
# Create/Get/Delete volume type extra spec.
- try:
- extra_specs = {"spec3": "val1"}
- resp, body = self.client.create_volume_type_extra_specs(
- self.volume_type['id'],
- extra_specs)
- self.assertEqual(200, resp.status)
- self.assertEqual(extra_specs, body,
- "Volume type extra spec incorrectly created")
+ extra_specs = {"spec3": "val1"}
+ resp, body = self.client.create_volume_type_extra_specs(
+ self.volume_type['id'],
+ extra_specs)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(extra_specs, body,
+ "Volume type extra spec incorrectly created")
- resp, _ = self.client.get_volume_type_extra_specs(
- self.volume_type['id'],
- extra_specs.keys()[0])
- self.assertEqual(200, resp.status)
- self.assertEqual(extra_specs, body,
- "Volume type extra spec incorrectly fetched")
+ resp, _ = self.client.get_volume_type_extra_specs(
+ self.volume_type['id'],
+ extra_specs.keys()[0])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(extra_specs, body,
+ "Volume type extra spec incorrectly fetched")
- resp, _ = self.client.delete_volume_type_extra_specs(
- self.volume_type['id'],
- extra_specs.keys()[0])
- self.assertEqual(202, resp.status)
- except Exception:
- self.fail("Could not create a volume_type extra spec")
+ resp, _ = self.client.delete_volume_type_extra_specs(
+ self.volume_type['id'],
+ extra_specs.keys()[0])
+ self.assertEqual(202, resp.status)
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index fc510cb..7781647 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -18,9 +18,8 @@
import time
from tempest import clients
-from tempest.common import log as logging
-from tempest.common.utils.data_utils import rand_name
-from tempest import exceptions
+from tempest.common import isolated_creds
+from tempest.openstack.common import log as logging
import tempest.test
LOG = logging.getLogger(__name__)
@@ -32,10 +31,15 @@
@classmethod
def setUpClass(cls):
- cls.isolated_creds = []
+ super(BaseVolumeTest, cls).setUpClass()
+ cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__)
+
+ if not cls.config.service_available.cinder:
+ skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
if cls.config.compute.allow_tenant_isolation:
- creds = cls._get_isolated_creds()
+ creds = cls.isolated_creds.get_primary_creds()
username, tenant_name, password = creds
os = clients.Manager(username=username,
password=password,
@@ -55,72 +59,18 @@
cls.snapshots = []
cls.volumes = []
- skip_msg = ("%s skipped as Cinder endpoint is not available" %
- cls.__name__)
- try:
- cls.volumes_client.keystone_auth(cls.os.username,
- cls.os.password,
- cls.os.auth_url,
- cls.volumes_client.service,
- cls.os.tenant_name)
- except exceptions.EndpointNotFound:
- cls.clear_isolated_creds()
- raise cls.skipException(skip_msg)
-
- @classmethod
- def _get_identity_admin_client(cls):
- """
- Returns an instance of the Identity Admin API client
- """
- os = clients.ComputeAdminManager()
- return os.identity_client
-
- @classmethod
- def _get_isolated_creds(cls):
- """
- Creates a new set of user/tenant/password credentials for a
- **regular** user of the Volume API so that a test case can
- operate in an isolated tenant container.
- """
- admin_client = cls._get_identity_admin_client()
- rand_name_root = rand_name(cls.__name__)
- if cls.isolated_creds:
- # Main user already created. Create the alt one...
- rand_name_root += '-alt'
- username = rand_name_root + "-user"
- email = rand_name_root + "@example.com"
- tenant_name = rand_name_root + "-tenant"
- tenant_desc = tenant_name + "-desc"
- password = "pass"
-
- resp, tenant = admin_client.create_tenant(name=tenant_name,
- description=tenant_desc)
- resp, user = admin_client.create_user(username,
- password,
- tenant['id'],
- email)
- # Store the complete creds (including UUID ids...) for later
- # but return just the username, tenant_name, password tuple
- # that the various clients will use.
- cls.isolated_creds.append((user, tenant))
-
- return username, tenant_name, password
-
- @classmethod
- def clear_isolated_creds(cls):
- if not cls.isolated_creds:
- return
- admin_client = cls._get_identity_admin_client()
-
- for user, tenant in cls.isolated_creds:
- admin_client.delete_user(user['id'])
- admin_client.delete_tenant(tenant['id'])
+ cls.volumes_client.keystone_auth(cls.os.username,
+ cls.os.password,
+ cls.os.auth_url,
+ cls.volumes_client.service,
+ cls.os.tenant_name)
@classmethod
def tearDownClass(cls):
cls.clear_snapshots()
cls.clear_volumes()
- cls.clear_isolated_creds()
+ cls.isolated_creds.clear_isolated_creds()
+ super(BaseVolumeTest, cls).tearDownClass()
@classmethod
def create_snapshot(cls, volume_id=1, **kwargs):
@@ -133,7 +83,7 @@
'available')
return snapshot
- #NOTE(afazekas): these create_* and clean_* could be defined
+ # NOTE(afazekas): these create_* and clean_* could be defined
# only in a single location in the source, and could be more general.
@classmethod
@@ -201,6 +151,13 @@
msg = ("Missing Volume Admin API credentials "
"in configuration.")
raise cls.skipException(msg)
-
- cls.os_adm = clients.AdminManager(interface=cls._interface)
+ if cls.config.compute.allow_tenant_isolation:
+ creds = cls.isolated_creds.get_admin_creds()
+ admin_username, admin_tenant_name, admin_password = creds
+ cls.os_adm = clients.Manager(username=admin_username,
+ password=admin_password,
+ tenant_name=admin_tenant_name,
+ interface=cls._interface)
+ else:
+ cls.os_adm = clients.AdminManager(interface=cls._interface)
cls.client = cls.os_adm.volume_types_client
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 56a3006..5861497 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -82,7 +82,7 @@
try:
resp, volume = self.client.get_volume(self.volume['id'])
self.assertEqual(200, resp.status)
- self.assertTrue('attachments' in volume)
+ self.assertIn('attachments', volume)
attachment = volume['attachments'][0]
self.assertEqual(mountpoint, attachment['device'])
self.assertEqual(self.server['id'], attachment['server_id'])
@@ -106,3 +106,4 @@
self.addCleanup(self.image_client.delete_image, image_id)
self.assertEqual(202, resp.status)
self.image_client.wait_for_image_status(image_id, 'active')
+ self.client.wait_for_volume_status(self.volume['id'], 'available')
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index eda7153..2e90f16 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -28,76 +28,65 @@
super(VolumesGetTest, cls).setUpClass()
cls.client = cls.volumes_client
+ def _delete_volume(self, volume_id):
+ resp, _ = self.client.delete_volume(volume_id)
+ self.assertEqual(202, resp.status)
+ self.client.wait_for_resource_deletion(volume_id)
+
def _volume_create_get_delete(self, **kwargs):
# Create a volume, Get it's details and Delete the volume
- try:
- volume = {}
- v_name = rand_name('Volume')
- metadata = {'Type': 'Test'}
- #Create a volume
- resp, volume = self.client.create_volume(size=1,
- display_name=v_name,
- metadata=metadata,
- **kwargs)
- self.assertEqual(200, resp.status)
- self.assertTrue('id' in volume)
- self.assertTrue('display_name' in volume)
- self.assertEqual(volume['display_name'], v_name,
- "The created volume name is not equal "
- "to the requested name")
- self.assertTrue(volume['id'] is not None,
- "Field volume id is empty or not found.")
- self.client.wait_for_volume_status(volume['id'], 'available')
- # Get Volume information
- resp, fetched_volume = self.client.get_volume(volume['id'])
- self.assertEqual(200, resp.status)
- self.assertEqual(v_name,
- fetched_volume['display_name'],
- 'The fetched Volume is different '
- 'from the created Volume')
- self.assertEqual(volume['id'],
- fetched_volume['id'],
- 'The fetched Volume is different '
- 'from the created Volume')
- self.assertEqual(metadata,
- fetched_volume['metadata'],
- 'The fetched Volume is different '
- 'from the created Volume')
- except Exception:
- self.fail("Could not create a volume")
- finally:
- if volume:
- # Delete the Volume if it was created
- resp, _ = self.client.delete_volume(volume['id'])
- self.assertEqual(202, resp.status)
- self.client.wait_for_resource_deletion(volume['id'])
+ volume = {}
+ v_name = rand_name('Volume')
+ metadata = {'Type': 'Test'}
+ # Create a volume
+ resp, volume = self.client.create_volume(size=1,
+ display_name=v_name,
+ metadata=metadata,
+ **kwargs)
+ self.assertEqual(200, resp.status)
+ self.assertIn('id', volume)
+ self.addCleanup(self._delete_volume, volume['id'])
+ self.assertIn('display_name', volume)
+ self.assertEqual(volume['display_name'], v_name,
+ "The created volume name is not equal "
+ "to the requested name")
+ self.assertTrue(volume['id'] is not None,
+ "Field volume id is empty or not found.")
+ self.client.wait_for_volume_status(volume['id'], 'available')
+ # Get Volume information
+ resp, fetched_volume = self.client.get_volume(volume['id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(v_name,
+ fetched_volume['display_name'],
+ 'The fetched Volume is different '
+ 'from the created Volume')
+ self.assertEqual(volume['id'],
+ fetched_volume['id'],
+ 'The fetched Volume is different '
+ 'from the created Volume')
+ self.assertEqual(metadata,
+ fetched_volume['metadata'],
+ 'The fetched Volume is different '
+ 'from the created Volume')
@attr(type='gate')
def test_volume_get_metadata_none(self):
# Create a volume without passing metadata, get details, and delete
- try:
- volume = {}
- v_name = rand_name('Volume-')
- # Create a volume without metadata
- resp, volume = self.client.create_volume(size=1,
- display_name=v_name,
- metadata={})
- self.assertEqual(200, resp.status)
- self.assertTrue('id' in volume)
- self.assertTrue('display_name' in volume)
- self.client.wait_for_volume_status(volume['id'], 'available')
- #GET Volume
- resp, fetched_volume = self.client.get_volume(volume['id'])
- self.assertEqual(200, resp.status)
- self.assertEqual(fetched_volume['metadata'], {})
- except Exception:
- self.fail("Could not get volume metadata")
- finally:
- if volume:
- # Delete the Volume if it was created
- resp, _ = self.client.delete_volume(volume['id'])
- self.assertEqual(202, resp.status)
- self.client.wait_for_resource_deletion(volume['id'])
+ volume = {}
+ v_name = rand_name('Volume-')
+ # Create a volume without metadata
+ resp, volume = self.client.create_volume(size=1,
+ display_name=v_name,
+ metadata={})
+ self.assertEqual(200, resp.status)
+ self.assertIn('id', volume)
+ self.addCleanup(self._delete_volume, volume['id'])
+ self.assertIn('display_name', volume)
+ self.client.wait_for_volume_status(volume['id'], 'available')
+ # GET Volume
+ resp, fetched_volume = self.client.get_volume(volume['id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(fetched_volume['metadata'], {})
@attr(type='smoke')
def test_volume_create_get_delete(self):
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index eea37e0..e2b15a4 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -32,7 +32,7 @@
@attr(type='gate')
def test_volume_get_nonexistant_volume_id(self):
# Should not be able to get a nonexistant volume
- #Creating a nonexistant volume id
+ # Creating a nonexistant volume id
volume_id_list = []
resp, volumes = self.client.list_volumes()
for i in range(len(volumes)):
@@ -41,7 +41,7 @@
non_exist_id = rand_name('999')
if non_exist_id not in volume_id_list:
break
- #Trying to Get a non existant volume
+ # Trying to Get a non existant volume
self.assertRaises(exceptions.NotFound, self.client.get_volume,
non_exist_id)
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 602209a..0328b44 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -13,8 +13,8 @@
# under the License.
from tempest.api.volume import base
-from tempest.common import log as logging
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
from tempest.test import attr
LOG = logging.getLogger(__name__)
diff --git a/tempest/cli/README.rst b/tempest/cli/README.rst
index 3eae492..f86adf3 100644
--- a/tempest/cli/README.rst
+++ b/tempest/cli/README.rst
@@ -12,7 +12,7 @@
Why are these tests in tempest?
-------------------------------
These tests exist here because it is extremely difficult to build a
-functional enough environment in the python-*client unit tests to
+functional enough environment in the python-\*client unit tests to
provide this kind of testing. Because we already put up a cloud in the
gate with devstack + tempest it was decided it was better to have
these as a side tree in tempest instead of another QA effort which
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index 0e1d6db..f04d23f 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -22,7 +22,7 @@
from oslo.config import cfg
import tempest.cli.output_parser
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
import tempest.test
@@ -35,6 +35,9 @@
cfg.StrOpt('cli_dir',
default='/usr/local/bin/',
help="directory where python client binaries are located"),
+ cfg.IntOpt('timeout',
+ default=15,
+ help="Number of seconds to wait on a CLI timeout"),
]
CONF = cfg.CONF
@@ -82,6 +85,11 @@
return self.cmd_with_auth(
'cinder', action, flags, params, admin, fail_ok)
+ def neutron(self, action, flags='', params='', admin=True, fail_ok=False):
+ """Executes neutron command for the given action."""
+ return self.cmd_with_auth(
+ 'neutron', action, flags, params, admin, fail_ok)
+
def cmd_with_auth(self, cmd, action, flags='', params='',
admin=True, fail_ok=False):
"""Executes given command with auth attributes appended."""
diff --git a/tempest/cli/output_parser.py b/tempest/cli/output_parser.py
index 3ee3098..bfd7f9e 100644
--- a/tempest/cli/output_parser.py
+++ b/tempest/cli/output_parser.py
@@ -17,11 +17,10 @@
"""Collection of utilities for parsing CLI clients output."""
-
-from tempest.common import log as logging
-
import re
+from tempest.openstack.common import log as logging
+
LOG = logging.getLogger(__name__)
diff --git a/tempest/cli/simple_read_only/test_compute.py b/tempest/cli/simple_read_only/test_compute.py
index 561fd00..4c7f604 100644
--- a/tempest/cli/simple_read_only/test_compute.py
+++ b/tempest/cli/simple_read_only/test_compute.py
@@ -21,8 +21,7 @@
import testtools
import tempest.cli
-from tempest.common import log as logging
-
+from tempest.openstack.common import log as logging
CONF = cfg.CONF
@@ -69,6 +68,8 @@
def test_admin_credentials(self):
self.nova('credentials')
+ @testtools.skipIf(CONF.service_available.neutron,
+ "Neutron does not provide this feature")
def test_admin_dns_domains(self):
self.nova('dns-domains')
@@ -175,7 +176,7 @@
self.nova('list', flags='--debug')
def test_admin_timeout(self):
- self.nova('list', flags='--timeout 2')
+ self.nova('list', flags='--timeout %d' % CONF.cli.timeout)
def test_admin_timing(self):
self.nova('list', flags='--timing')
diff --git a/tempest/cli/simple_read_only/test_compute_manage.py b/tempest/cli/simple_read_only/test_compute_manage.py
index 802a206..1848827 100644
--- a/tempest/cli/simple_read_only/test_compute_manage.py
+++ b/tempest/cli/simple_read_only/test_compute_manage.py
@@ -18,7 +18,7 @@
import subprocess
import tempest.cli
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/tempest/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/test_glance.py
index fa77e8a..3d58451 100644
--- a/tempest/cli/simple_read_only/test_glance.py
+++ b/tempest/cli/simple_read_only/test_glance.py
@@ -19,7 +19,7 @@
import subprocess
import tempest.cli
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index 3bc8b3e..4c7982b 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -18,8 +18,12 @@
import re
import subprocess
+from oslo.config import cfg
+
import tempest.cli
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
+
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -117,4 +121,4 @@
self.keystone('catalog', flags='--debug')
def test_admin_timeout(self):
- self.keystone('catalog', flags='--timeout 15')
+ self.keystone('catalog', flags='--timeout %d' % CONF.cli.timeout)
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
new file mode 100644
index 0000000..7b8340d
--- /dev/null
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -0,0 +1,118 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+import subprocess
+
+from oslo.config import cfg
+
+import tempest.cli
+from tempest.openstack.common import log as logging
+
+CONF = cfg.CONF
+
+LOG = logging.getLogger(__name__)
+
+
+class SimpleReadOnlyNeutronClientTest(tempest.cli.ClientTestBase):
+ """Basic, read-only tests for Neutron CLI client.
+
+ Checks return values and output of read-only commands.
+ These tests do not presume any content, nor do they create
+ their own. They only verify the structure of output if present.
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ if (not CONF.service_available.neutron):
+ msg = "Skiping all Neutron cli tests because it is not available"
+ raise cls.skipException(msg)
+ super(SimpleReadOnlyNeutronClientTest, cls).setUpClass()
+
+ def test_neutron_fake_action(self):
+ self.assertRaises(subprocess.CalledProcessError,
+ self.neutron,
+ 'this-does-not-exist')
+
+ def test_neutron_net_list(self):
+ self.neutron('net-list')
+
+ def test_neutron_ext_list(self):
+ ext = self.parser.listing(self.neutron('ext-list'))
+ self.assertTableStruct(ext, ['alias', 'name'])
+
+ def test_neutron_dhcp_agent_list_hosting_net(self):
+ self.neutron('dhcp-agent-list-hosting-net',
+ params=CONF.compute.fixed_network_name)
+
+ def test_neutron_agent_list(self):
+ agents = self.parser.listing(self.neutron('agent-list'))
+ field_names = ['id', 'agent_type', 'host', 'alive', 'admin_state_up']
+ self.assertTableStruct(agents, field_names)
+
+ def test_neutron_floatingip_list(self):
+ self.neutron('floatingip-list')
+
+ def test_neutron_net_external_list(self):
+ self.neutron('net-external-list')
+
+ def test_neutron_port_list(self):
+ self.neutron('port-list')
+
+ def test_neutron_quota_list(self):
+ self.neutron('quota-list')
+
+ def test_neutron_router_list(self):
+ self.neutron('router-list')
+
+ def test_neutron_security_group_list(self):
+ security_grp = self.parser.listing(self.neutron('security-group-list'))
+ self.assertTableStruct(security_grp, ['id', 'name', 'description'])
+
+ def test_neutron_security_group_rule_list(self):
+ self.neutron('security-group-rule-list')
+
+ def test_neutron_subnet_list(self):
+ self.neutron('subnet-list')
+
+ def test_neutron_help(self):
+ help_text = self.neutron('help')
+ lines = help_text.split('\n')
+ self.assertTrue(lines[0].startswith('usage: neutron'))
+
+ commands = []
+ cmds_start = lines.index('Commands for API v2.0:')
+ command_pattern = re.compile('^ {2}([a-z0-9\-\_]+)')
+ for line in lines[cmds_start:]:
+ match = command_pattern.match(line)
+ if match:
+ commands.append(match.group(1))
+ commands = set(commands)
+ wanted_commands = set(('net-create', 'subnet-list', 'port-delete',
+ 'router-show', 'agent-update', 'help'))
+ self.assertFalse(wanted_commands - commands)
+
+ # Optional arguments:
+
+ def test_neutron_version(self):
+ self.neutron('', flags='--version')
+
+ def test_neutron_debug_net_list(self):
+ self.neutron('net-list', flags='--debug')
+
+ def test_neutron_quiet_net_list(self):
+ self.neutron('net-list', flags='--quiet')
diff --git a/tempest/clients.py b/tempest/clients.py
index d7a740a..195cb89 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -15,9 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
from tempest import config
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.services import botoclients
from tempest.services.compute.json.aggregates_client import \
AggregatesClientJSON
@@ -75,12 +75,14 @@
EndPointClientJSON
from tempest.services.identity.v3.json.identity_client import \
IdentityV3ClientJSON
+from tempest.services.identity.v3.json.identity_client import V3TokenClientJSON
from tempest.services.identity.v3.json.policy_client import PolicyClientJSON
from tempest.services.identity.v3.json.service_client import \
ServiceClientJSON
from tempest.services.identity.v3.xml.endpoints_client import EndPointClientXML
from tempest.services.identity.v3.xml.identity_client import \
IdentityV3ClientXML
+from tempest.services.identity.v3.xml.identity_client import V3TokenClientXML
from tempest.services.identity.v3.xml.policy_client import PolicyClientXML
from tempest.services.identity.v3.xml.service_client import \
ServiceClientXML
@@ -239,6 +241,11 @@
"xml": HypervisorClientXML,
}
+V3_TOKEN_CLIENT = {
+ "json": V3TokenClientJSON,
+ "xml": V3TokenClientXML,
+}
+
class Manager(object):
@@ -267,8 +274,9 @@
if None in (self.username, self.password, self.tenant_name):
msg = ("Missing required credentials. "
- "username: %(username)s, password: %(password)s, "
- "tenant_name: %(tenant_name)s") % locals()
+ "username: %(u)s, password: %(p)s, "
+ "tenant_name: %(t)s" %
+ {'u': username, 'p': password, 't': tenant_name})
raise exceptions.InvalidConfiguration(msg)
self.auth_url = self.config.identity.uri
@@ -288,7 +296,8 @@
try:
self.servers_client = SERVERS_CLIENTS[interface](*client_args)
self.limits_client = LIMITS_CLIENTS[interface](*client_args)
- self.images_client = IMAGES_CLIENTS[interface](*client_args)
+ if self.config.service_available.glance:
+ self.images_client = IMAGES_CLIENTS[interface](*client_args)
self.keypairs_client = KEYPAIRS_CLIENTS[interface](*client_args)
self.quotas_client = QUOTAS_CLIENTS[interface](*client_args)
self.flavors_client = FLAVORS_CLIENTS[interface](*client_args)
@@ -319,6 +328,7 @@
TENANT_USAGES_CLIENT[interface](*client_args)
self.policy_client = POLICY_CLIENT[interface](*client_args)
self.hypervisor_client = HYPERVISOR_CLIENT[interface](*client_args)
+ self.token_v3_client = V3_TOKEN_CLIENT[interface](*client_args)
if client_args_v3_auth:
self.servers_client_v3_auth = SERVERS_CLIENTS[interface](
@@ -332,8 +342,9 @@
self.network_client = NetworkClient(*client_args)
self.hosts_client = HostsClientJSON(*client_args)
self.account_client = AccountClient(*client_args)
- self.image_client = ImageClientJSON(*client_args)
- self.image_client_v2 = ImageClientV2JSON(*client_args)
+ if self.config.service_available.glance:
+ self.image_client = ImageClientJSON(*client_args)
+ self.image_client_v2 = ImageClientV2JSON(*client_args)
self.container_client = ContainerClient(*client_args)
self.object_client = ObjectClient(*client_args)
self.orchestration_client = OrchestrationClient(*client_args)
diff --git a/tempest/common/glance_http.py b/tempest/common/glance_http.py
index cd33a22..831874d 100644
--- a/tempest/common/glance_http.py
+++ b/tempest/common/glance_http.py
@@ -34,9 +34,8 @@
import OpenSSL
-from tempest.common import log as logging
from tempest import exceptions as exc
-
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
USER_AGENT = 'tempest'
@@ -125,11 +124,12 @@
conn.request(method, conn_url, **kwargs)
resp = conn.getresponse()
except socket.gaierror as e:
- message = "Error finding address for %(url)s: %(e)s" % locals()
+ message = ("Error finding address for %(url)s: %(e)s" %
+ {'url': url, 'e': e})
raise exc.EndpointNotFound(message)
except (socket.error, socket.timeout) as e:
- endpoint = self.endpoint
- message = "Error communicating with %(endpoint)s %(e)s" % locals()
+ message = ("Error communicating with %(endpoint)s %(e)s" %
+ {'endpoint': self.endpoint, 'e': e})
raise exc.TimeoutException(message)
body_iter = ResponseBodyIterator(resp)
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
new file mode 100644
index 0000000..22e1bd2
--- /dev/null
+++ b/tempest/common/isolated_creds.py
@@ -0,0 +1,248 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import keystoneclient.v2_0.client
+
+from tempest import clients
+from tempest.common.utils.data_utils import rand_name
+from tempest import config
+from tempest import exceptions
+from tempest.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+class IsolatedCreds(object):
+
+ def __init__(self, name, tempest_client=True, interface='json',
+ password='pass'):
+ self.isolated_creds = {}
+ self.name = name
+ self.config = config.TempestConfig()
+ self.tempest_client = tempest_client
+ self.interface = interface
+ self.password = password
+ self.admin_client = self._get_identity_admin_client()
+
+ def _get_keystone_client(self):
+ username = self.config.identity.admin_username
+ password = self.config.identity.admin_password
+ tenant_name = self.config.identity.admin_tenant_name
+ auth_url = self.config.identity.uri
+ dscv = self.config.identity.disable_ssl_certificate_validation
+ return keystoneclient.v2_0.client.Client(username=username,
+ password=password,
+ tenant_name=tenant_name,
+ auth_url=auth_url,
+ insecure=dscv)
+
+ def _get_identity_admin_client(self):
+ """
+ Returns an instance of the Identity Admin API client
+ """
+ if self.tempest_client:
+ os = clients.AdminManager(interface=self.interface)
+ admin_client = os.identity_client
+ else:
+ admin_client = self._get_keystone_client()
+ return admin_client
+
+ def _create_tenant(self, name, description):
+ if self.tempest_client:
+ resp, tenant = self.admin_client.create_tenant(
+ name=name, description=description)
+ else:
+ tenant = self.admin_client.tenants.create(name,
+ description=description)
+ return tenant
+
+ def _get_tenant_by_name(self, name):
+ if self.tempest_client:
+ resp, tenant = self.admin_client.get_tenant_by_name(name)
+ else:
+ tenants = self.admin_client.tenants.list()
+ for ten in tenants:
+ if ten['name'] == name:
+ tenant = ten
+ raise exceptions.NotFound('No such tenant')
+ return tenant
+
+ def _create_user(self, username, password, tenant, email):
+ if self.tempest_client:
+ resp, user = self.admin_client.create_user(username, password,
+ tenant['id'], email)
+ else:
+ user = self.admin_client.users.create(username, password, email,
+ tenant_id=tenant.id)
+ return user
+
+ def _get_user(self, tenant, username):
+ if self.tempest_client:
+ resp, user = self.admin_client.get_user_by_username(tenant['id'],
+ username)
+ else:
+ user = self.admin_client.users.get(username)
+ return user
+
+ def _list_roles(self):
+ if self.tempest_client:
+ resp, roles = self.admin_client.list_roles()
+ else:
+ roles = self.admin_client.roles.list()
+ return roles
+
+ def _assign_user_role(self, tenant, user, role):
+ if self.tempest_client:
+ self.admin_client.assign_user_role(tenant, user, role)
+ else:
+ self.admin_client.roles.add_user_role(user, role, tenant=tenant)
+
+ def _delete_user(self, user):
+ if self.tempest_client:
+ self.admin_client.delete_user(user)
+ else:
+ self.admin_client.users.delete(user)
+
+ def _delete_tenant(self, tenant):
+ if self.tempest_client:
+ self.admin_client.delete_tenant(tenant)
+ else:
+ self.admin_client.tenants.delete(tenant)
+
+ def _create_creds(self, suffix=None, admin=False):
+ rand_name_root = rand_name(self.name)
+ if suffix:
+ rand_name_root += suffix
+ tenant_name = rand_name_root + "-tenant"
+ tenant_desc = tenant_name + "-desc"
+ rand_name_root = rand_name(self.name)
+ tenant = self._create_tenant(name=tenant_name,
+ description=tenant_desc)
+ if suffix:
+ rand_name_root += suffix
+ username = rand_name_root + "-user"
+ email = rand_name_root + "@example.com"
+ user = self._create_user(username, self.password,
+ tenant, email)
+ if admin:
+ role = None
+ try:
+ roles = self._list_roles()
+ if self.tempest_client:
+ role = next(r for r in roles if r['name'] == 'admin')
+ else:
+ role = next(r for r in roles if r.name == 'admin')
+ except StopIteration:
+ msg = "No admin role found"
+ raise exceptions.NotFound(msg)
+ if self.tempest_client:
+ self._assign_user_role(tenant['id'], user['id'], role['id'])
+ else:
+ self._assign_user_role(tenant.id, user.id, role.id)
+ return user, tenant
+
+ def _get_cred_names(self, user, tenant):
+ if self.tempest_client:
+ username = user.get('name')
+ tenant_name = tenant.get('name')
+ else:
+ username = user.name
+ tenant_name = tenant.name
+ return username, tenant_name
+
+ def get_primary_tenant(self):
+ return self.isolated_creds.get('primary')[1]
+
+ def get_primary_user(self):
+ return self.isolated_creds.get('primary')[0]
+
+ def get_alt_tenant(self):
+ return self.isolated_creds.get('alt')[1]
+
+ def get_alt_user(self):
+ return self.isolated_creds.get('alt')[0]
+
+ def get_admin_tenant(self):
+ return self.isolated_creds.get('admin')[1]
+
+ def get_admin_user(self):
+ return self.isolated_creds.get('admin')[0]
+
+ def get_primary_creds(self):
+ if self.isolated_creds.get('primary'):
+ user, tenant = self.isolated_creds['primary']
+ username, tenant_name = self._get_cred_names(user, tenant)
+ else:
+ user, tenant = self._create_creds()
+ username, tenant_name = self._get_cred_names(user, tenant)
+ self.isolated_creds['primary'] = (user, tenant)
+ LOG.info("Aquired isolated creds:\n user: %s, tenant: %s"
+ % (username, tenant_name))
+ return username, tenant_name, self.password
+
+ def get_admin_creds(self):
+ if self.isolated_creds.get('admin'):
+ user, tenant = self.isolated_creds['admin']
+ username, tenant_name = self._get_cred_names(user, tenant)
+ else:
+ user, tenant = self._create_creds(admin=True)
+ username, tenant_name = self._get_cred_names(user, tenant)
+ self.isolated_creds['admin'] = (user, tenant)
+ LOG.info("Aquired admin isolated creds:\n user: %s, tenant: %s"
+ % (username, tenant_name))
+ return username, tenant_name, self.password
+
+ def get_alt_creds(self):
+ if self.isolated_creds.get('alt'):
+ user, tenant = self.isolated_creds['alt']
+ username, tenant_name = self._get_cred_names(user, tenant)
+ else:
+ user, tenant = self._create_creds()
+ username, tenant_name = self._get_cred_names(user, tenant)
+ self.isolated_creds['alt'] = (user, tenant)
+ LOG.info("Aquired alt isolated creds:\n user: %s, tenant: %s"
+ % (username, tenant_name))
+ return username, tenant_name, self.password
+
+ def clear_isolated_creds(self):
+ if not self.isolated_creds:
+ return
+ for cred in self.isolated_creds:
+ user, tenant = self.isolated_creds.get(cred)
+ try:
+ if self.tempest_client:
+ self._delete_user(user['id'])
+ else:
+ self._delete_user(user.id)
+ except exceptions.NotFound:
+ if self.tempest_client:
+ name = user['name']
+ else:
+ name = user.name
+ LOG.warn("user with name: %s not found for delete" % name)
+ pass
+ try:
+ if self.tempest_client:
+ self._delete_tenant(tenant['id'])
+ else:
+ self._delete_tenant(tenant.id)
+ except exceptions.NotFound:
+ if self.tempest_client:
+ name = tenant['name']
+ else:
+ name = tenant.name
+ LOG.warn("tenant with name: %s not found for delete" % name)
+ pass
diff --git a/tempest/common/log.py b/tempest/common/log.py
deleted file mode 100644
index 2159bfe..0000000
--- a/tempest/common/log.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2013 NEC Corporation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import ConfigParser
-import inspect
-import logging
-import logging.config
-import os
-import re
-
-from oslo.config import cfg
-
-
-_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
-_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
-
-_loggers = {}
-
-
-def getLogger(name='unknown'):
- if len(_loggers) == 0:
- loaded = _load_log_config()
- getLogger.adapter = TestsAdapter if loaded else None
-
- if name not in _loggers:
- logger = logging.getLogger(name)
- if getLogger.adapter:
- _loggers[name] = getLogger.adapter(logger, name)
- else:
- _loggers[name] = logger
-
- return _loggers[name]
-
-
-def _load_log_config():
- conf_dir = os.environ.get('TEMPEST_LOG_CONFIG_DIR', None)
- conf_file = os.environ.get('TEMPEST_LOG_CONFIG', None)
- if not conf_dir or not conf_file:
- return False
-
- log_config = os.path.join(conf_dir, conf_file)
- try:
- logging.config.fileConfig(log_config)
- except ConfigParser.Error as exc:
- raise cfg.ConfigFileParseError(log_config, str(exc))
- return True
-
-
-class TestsAdapter(logging.LoggerAdapter):
-
- def __init__(self, logger, project_name):
- self.logger = logger
- self.project = project_name
- self.regexp = re.compile(r"test_\w+\.py")
-
- def __getattr__(self, key):
- return getattr(self.logger, key)
-
- def _get_test_name(self):
- frames = inspect.stack()
- for frame in frames:
- binary_name = frame[1]
- if self.regexp.search(binary_name) and 'self' in frame[0].f_locals:
- return frame[0].f_locals.get('self').id()
- elif frame[3] == '_run_cleanups':
- #NOTE(myamazaki): method calling addCleanup
- return frame[0].f_locals.get('self').case.id()
- elif frame[3] in ['setUpClass', 'tearDownClass']:
- #NOTE(myamazaki): setUpClass or tearDownClass
- return "%s.%s.%s" % (frame[0].f_locals['cls'].__module__,
- frame[0].f_locals['cls'].__name__,
- frame[3])
- return None
-
- def process(self, msg, kwargs):
- if 'extra' not in kwargs:
- kwargs['extra'] = {}
- extra = kwargs['extra']
-
- test_name = self._get_test_name()
- if test_name:
- extra.update({'testname': test_name})
- extra['extra'] = extra.copy()
-
- return msg, kwargs
-
-
-class TestsFormatter(logging.Formatter):
- def __init__(self, fmt=None, datefmt=None):
- super(TestsFormatter, self).__init__()
- self.default_format = _DEFAULT_LOG_FORMAT
- self.testname_format =\
- "%(asctime)s %(levelname)8s [%(testname)s] %(message)s"
- self.datefmt = _DEFAULT_LOG_DATE_FORMAT
-
- def format(self, record):
- extra = record.__dict__.get('extra', None)
- if extra and 'testname' in extra:
- self._fmt = self.testname_format
- else:
- self._fmt = self.default_format
- return logging.Formatter.format(self, record)
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index e94455d..759ab81 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -24,8 +24,8 @@
import re
import time
-from tempest.common import log as logging
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.services.compute.xml.common import xml_to_json
# redrive rate limited calls at most twice
@@ -166,7 +166,8 @@
elif resp.status == 401:
raise exceptions.AuthenticationFailure(user=user,
- password=password)
+ password=password,
+ tenant=tenant_name)
raise exceptions.IdentityError('Unexpected status code {0}'.format(
resp.status))
diff --git a/tempest/common/ssh.py b/tempest/common/ssh.py
index 04cc851..be350c8 100644
--- a/tempest/common/ssh.py
+++ b/tempest/common/ssh.py
@@ -112,10 +112,10 @@
channel.shutdown_write()
out_data = []
err_data = []
-
- select_params = [channel], [], [], self.channel_timeout
+ poll = select.poll()
+ poll.register(channel, select.POLLIN)
while True:
- ready = select.select(*select_params)
+ ready = poll.poll(self.channel_timeout)
if not any(ready):
raise exceptions.TimeoutException(
"Command: '{0}' executed on host '{1}'.".format(
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index fd5d3d0..de2bf43 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -1,3 +1,17 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
import re
import time
diff --git a/tempest/config.py b/tempest/config.py
index 96b144c..9b1a91e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -23,10 +23,9 @@
from oslo.config import cfg
-from tempest.common import log as logging
from tempest.common.utils.misc import singleton
+from tempest.openstack.common import log as logging
-LOG = logging.getLogger(__name__)
identity_group = cfg.OptGroup(name='identity',
title="Keystone Configuration Options")
@@ -160,6 +159,10 @@
cfg.StrOpt('ssh_user',
default='root',
help="User name used to authenticate to an instance."),
+ cfg.IntOpt('ping_timeout',
+ default=60,
+ help="Timeout in seconds to wait for ping to "
+ "succeed."),
cfg.IntOpt('ssh_timeout',
default=300,
help="Timeout in seconds to wait for authentication to "
@@ -177,6 +180,9 @@
cfg.IntOpt('ip_version_for_ssh',
default=4,
help="IP version used for SSH connections."),
+ cfg.BoolOpt('use_floatingip_for_ssh',
+ default=True,
+ help="Dose the SSH uses Floating IP?"),
cfg.StrOpt('catalog_type',
default='compute',
help="Catalog type of the Compute service."),
@@ -285,7 +291,7 @@
default="10.100.0.0/16",
help="The cidr block to allocate tenant networks from"),
cfg.IntOpt('tenant_network_mask_bits',
- default=29,
+ default=28,
help="The mask bits for tenant networks"),
cfg.BoolOpt('tenant_networks_reachable',
default=False,
@@ -299,9 +305,6 @@
default="",
help="Id of the public router that provides external "
"connectivity"),
- cfg.BoolOpt('neutron_available',
- default=False,
- help="Whether or not neutron is expected to be available"),
]
@@ -363,6 +366,9 @@
default=5,
help="Number of seconds to wait while looping to check the"
"status of a container to container synchronization"),
+ cfg.BoolOpt('accounts_quotas_available',
+ default=True,
+ help="Set to True if the Account Quota middleware is enabled"),
]
@@ -391,9 +397,6 @@
cfg.IntOpt('build_timeout',
default=300,
help="Timeout in seconds to wait for a stack to build."),
- cfg.BoolOpt('heat_available',
- default=False,
- help="Whether or not Heat is expected to be available"),
cfg.StrOpt('instance_type',
default='m1.micro',
help="Instance type for tests. Needs to be big enough for a "
@@ -413,6 +416,26 @@
for opt in OrchestrationGroup:
conf.register_opt(opt, group='orchestration')
+
+dashboard_group = cfg.OptGroup(name="dashboard",
+ title="Dashboard options")
+
+DashboardGroup = [
+ cfg.StrOpt('dashboard_url',
+ default='http://localhost/',
+ help="Where the dashboard can be found"),
+ cfg.StrOpt('login_url',
+ default='http://localhost/auth/login/',
+ help="Login page for the dashboard"),
+]
+
+
+def register_dashboard_opts(conf):
+ conf.register_group(scenario_group)
+ for opt in DashboardGroup:
+ conf.register_opt(opt, group='dashboard')
+
+
boto_group = cfg.OptGroup(name='boto',
title='EC2/S3 options')
BotoConfig = [
@@ -520,7 +543,12 @@
help='AKI image file name'),
cfg.StrOpt('ssh_user',
default='cirros',
- help='ssh username for the image file')
+ help='ssh username for the image file'),
+ cfg.IntOpt(
+ 'large_ops_number',
+ default=0,
+ help="specifies how many resources to request at once. Used "
+ "for large operations testing.")
]
@@ -530,6 +558,40 @@
conf.register_opt(opt, group='scenario')
+service_available_group = cfg.OptGroup(name="service_available",
+ title="Available OpenStack Services")
+
+ServiceAvailableGroup = [
+ cfg.BoolOpt('cinder',
+ default=True,
+ help="Whether or not cinder is expected to be available"),
+ cfg.BoolOpt('neutron',
+ default=False,
+ help="Whether or not neutron is expected to be available"),
+ cfg.BoolOpt('glance',
+ default=True,
+ help="Whether or not glance is expected to be available"),
+ cfg.BoolOpt('swift',
+ default=True,
+ help="Whether or not swift is expected to be available"),
+ cfg.BoolOpt('nova',
+ default=True,
+ help="Whether or not nova is expected to be available"),
+ cfg.BoolOpt('heat',
+ default=False,
+ help="Whether or not Heat is expected to be available"),
+ cfg.BoolOpt('horizon',
+ default=True,
+ help="Whether or not Horizon is expected to be available"),
+]
+
+
+def register_service_available_opts(conf):
+ conf.register_group(scenario_group)
+ for opt in ServiceAvailableGroup:
+ conf.register_opt(opt, group='service_available')
+
+
@singleton
class TempestConfig:
"""Provides OpenStack configuration information."""
@@ -543,7 +605,6 @@
def __init__(self):
"""Initialize a configuration from a conf directory and conf file."""
config_files = []
-
failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE
# Environment variables override defaults...
@@ -558,15 +619,16 @@
'TEMPEST_CONFIG' in os.environ):
path = failsafe_path
- LOG.info("Using tempest config file %s" % path)
-
if not os.path.exists(path):
- msg = "Config file %(path)s not found" % locals()
+ msg = "Config file %s not found" % path
print(RuntimeError(msg), file=sys.stderr)
else:
config_files.append(path)
cfg.CONF([], project='tempest', default_config_files=config_files)
+ logging.setup('tempest')
+ LOG = logging.getLogger('tempest')
+ LOG.info("Using tempest config file %s" % path)
register_compute_opts(cfg.CONF)
register_identity_opts(cfg.CONF)
@@ -576,10 +638,12 @@
register_volume_opts(cfg.CONF)
register_object_storage_opts(cfg.CONF)
register_orchestration_opts(cfg.CONF)
+ register_dashboard_opts(cfg.CONF)
register_boto_opts(cfg.CONF)
register_compute_admin_opts(cfg.CONF)
register_stress_opts(cfg.CONF)
register_scenario_opts(cfg.CONF)
+ register_service_available_opts(cfg.CONF)
self.compute = cfg.CONF.compute
self.whitebox = cfg.CONF.whitebox
self.identity = cfg.CONF.identity
@@ -588,10 +652,12 @@
self.volume = cfg.CONF.volume
self.object_storage = cfg.CONF['object-storage']
self.orchestration = cfg.CONF.orchestration
+ self.dashboard = cfg.CONF.dashboard
self.boto = cfg.CONF.boto
self.compute_admin = cfg.CONF['compute-admin']
self.stress = cfg.CONF.stress
self.scenario = cfg.CONF.scenario
+ self.service_available = cfg.CONF.service_available
if not self.compute_admin.username:
self.compute_admin.username = self.identity.admin_username
self.compute_admin.password = self.identity.admin_password
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 448fbdf..62bd8cf 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -105,7 +105,7 @@
class AuthenticationFailure(RestClientException):
message = ("Authentication with user %(user)s and password "
- "%(password)s failed")
+ "%(password)s failed auth using tenant %(tenant)s.")
class EndpointNotFound(TempestException):
diff --git a/tempest/manager.py b/tempest/manager.py
index 4a447f3..54a0dec 100644
--- a/tempest/manager.py
+++ b/tempest/manager.py
@@ -15,41 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
import tempest.config
from tempest import exceptions
-# Tempest REST Fuzz testing client libs
-from tempest.services.compute.json import extensions_client
-from tempest.services.compute.json import flavors_client
-from tempest.services.compute.json import floating_ips_client
-from tempest.services.compute.json import hypervisor_client
-from tempest.services.compute.json import images_client
-from tempest.services.compute.json import keypairs_client
-from tempest.services.compute.json import limits_client
-from tempest.services.compute.json import quotas_client
-from tempest.services.compute.json import security_groups_client
-from tempest.services.compute.json import servers_client
-from tempest.services.compute.json import volumes_extensions_client
-from tempest.services.network.json import network_client
-from tempest.services.volume.json import snapshots_client
-from tempest.services.volume.json import volumes_client
-
-NetworkClient = network_client.NetworkClient
-ImagesClient = images_client.ImagesClientJSON
-FlavorsClient = flavors_client.FlavorsClientJSON
-ServersClient = servers_client.ServersClientJSON
-LimitsClient = limits_client.LimitsClientJSON
-ExtensionsClient = extensions_client.ExtensionsClientJSON
-FloatingIPsClient = floating_ips_client.FloatingIPsClientJSON
-SecurityGroupsClient = security_groups_client.SecurityGroupsClientJSON
-KeyPairsClient = keypairs_client.KeyPairsClientJSON
-VolumesExtensionsClient = volumes_extensions_client.VolumesExtensionsClientJSON
-VolumesClient = volumes_client.VolumesClientJSON
-SnapshotsClient = snapshots_client.SnapshotsClientJSON
-QuotasClient = quotas_client.QuotasClientJSON
-HypervisorClient = hypervisor_client.HypervisorClientJSON
-
-LOG = logging.getLogger(__name__)
class Manager(object):
@@ -65,100 +32,11 @@
self.config = tempest.config.TempestConfig()
self.client_attr_names = []
-
-class FuzzClientManager(Manager):
-
- """
- Manager class that indicates the client provided by the manager
- is a fuzz-testing client that Tempest contains. These fuzz-testing
- clients are used to be able to throw random or invalid data at
- an endpoint and check for appropriate error messages returned
- from the endpoint.
- """
- pass
-
-
-class ComputeFuzzClientManager(FuzzClientManager):
-
- """
- Manager that uses the Tempest REST client that can send
- random or invalid data at the OpenStack Compute API
- """
-
- def __init__(self, username=None, password=None, tenant_name=None):
- """
- We allow overriding of the credentials used within the various
- client classes managed by the Manager object. Left as None, the
- standard username/password/tenant_name is used.
-
- :param username: Override of the username
- :param password: Override of the password
- :param tenant_name: Override of the tenant name
- """
- super(ComputeFuzzClientManager, self).__init__()
-
- # If no creds are provided, we fall back on the defaults
- # in the config file for the Compute API.
- username = username or self.config.identity.username
- password = password or self.config.identity.password
- tenant_name = tenant_name or self.config.identity.tenant_name
-
+ # we do this everywhere, have it be part of the super class
+ def _validate_credentials(self, username, password, tenant_name):
if None in (username, password, tenant_name):
msg = ("Missing required credentials. "
- "username: %(username)s, password: %(password)s, "
- "tenant_name: %(tenant_name)s") % locals()
+ "username: %(u)s, password: %(p)s, "
+ "tenant_name: %(t)s" %
+ {'u': username, 'p': password, 't': tenant_name})
raise exceptions.InvalidConfiguration(msg)
-
- auth_url = self.config.identity.uri
-
- # Ensure /tokens is in the URL for Keystone...
- if 'tokens' not in auth_url:
- auth_url = auth_url.rstrip('/') + '/tokens'
-
- client_args = (self.config, username, password, auth_url,
- tenant_name)
-
- self.servers_client = ServersClient(*client_args)
- self.flavors_client = FlavorsClient(*client_args)
- self.images_client = ImagesClient(*client_args)
- self.limits_client = LimitsClient(*client_args)
- self.extensions_client = ExtensionsClient(*client_args)
- self.keypairs_client = KeyPairsClient(*client_args)
- self.security_groups_client = SecurityGroupsClient(*client_args)
- self.floating_ips_client = FloatingIPsClient(*client_args)
- self.volumes_extensions_client = VolumesExtensionsClient(*client_args)
- self.volumes_client = VolumesClient(*client_args)
- self.snapshots_client = SnapshotsClient(*client_args)
- self.quotas_client = QuotasClient(*client_args)
- self.network_client = NetworkClient(*client_args)
- self.hypervisor_client = HypervisorClient(*client_args)
-
-
-class ComputeFuzzClientAltManager(Manager):
-
- """
- Manager object that uses the alt_XXX credentials for its
- managed client objects
- """
-
- def __init__(self):
- conf = tempest.config.TempestConfig()
- super(ComputeFuzzClientAltManager, self).__init__(
- conf.identity.alt_username,
- conf.identity.alt_password,
- conf.identity.alt_tenant_name)
-
-
-class ComputeFuzzClientAdminManager(Manager):
-
- """
- Manager object that uses the alt_XXX credentials for its
- managed client objects
- """
-
- def __init__(self):
- conf = tempest.config.TempestConfig()
- super(ComputeFuzzClientAdminManager, self).__init__(
- conf.compute_admin.username,
- conf.compute_admin.password,
- conf.compute_admin.tenant_name)
diff --git a/tempest/openstack/common/__init__.py b/tempest/openstack/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/openstack/common/__init__.py
diff --git a/tempest/openstack/common/excutils.py b/tempest/openstack/common/excutils.py
new file mode 100644
index 0000000..81aad14
--- /dev/null
+++ b/tempest/openstack/common/excutils.py
@@ -0,0 +1,98 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# Copyright 2012, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Exception related utilities.
+"""
+
+import logging
+import sys
+import time
+import traceback
+
+from tempest.openstack.common.gettextutils import _ # noqa
+
+
+class save_and_reraise_exception(object):
+ """Save current exception, run some code and then re-raise.
+
+ In some cases the exception context can be cleared, resulting in None
+ being attempted to be re-raised after an exception handler is run. This
+ can happen when eventlet switches greenthreads or when running an
+ exception handler, code raises and catches an exception. In both
+ cases the exception context will be cleared.
+
+ To work around this, we save the exception state, run handler code, and
+ then re-raise the original exception. If another exception occurs, the
+ saved exception is logged and the new exception is re-raised.
+
+ In some cases the caller may not want to re-raise the exception, and
+ for those circumstances this context provides a reraise flag that
+ can be used to suppress the exception. For example:
+
+ except Exception:
+ with save_and_reraise_exception() as ctxt:
+ decide_if_need_reraise()
+ if not should_be_reraised:
+ ctxt.reraise = False
+ """
+ def __init__(self):
+ self.reraise = True
+
+ def __enter__(self):
+ self.type_, self.value, self.tb, = sys.exc_info()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if exc_type is not None:
+ logging.error(_('Original exception being dropped: %s'),
+ traceback.format_exception(self.type_,
+ self.value,
+ self.tb))
+ return False
+ if self.reraise:
+ raise self.type_, self.value, self.tb
+
+
+def forever_retry_uncaught_exceptions(infunc):
+ def inner_func(*args, **kwargs):
+ last_log_time = 0
+ last_exc_message = None
+ exc_count = 0
+ while True:
+ try:
+ return infunc(*args, **kwargs)
+ except Exception as exc:
+ if exc.message == last_exc_message:
+ exc_count += 1
+ else:
+ exc_count = 1
+ # Do not log any more frequently than once a minute unless
+ # the exception message changes
+ cur_time = int(time.time())
+ if (cur_time - last_log_time > 60 or
+ exc.message != last_exc_message):
+ logging.exception(
+ _('Unexpected exception occurred %d time(s)... '
+ 'retrying.') % exc_count)
+ last_log_time = cur_time
+ last_exc_message = exc.message
+ exc_count = 0
+ # This should be a very rare event. In case it isn't, do
+ # a sleep.
+ time.sleep(1)
+ return inner_func
diff --git a/tempest/openstack/common/fileutils.py b/tempest/openstack/common/fileutils.py
new file mode 100644
index 0000000..d2e3d3e
--- /dev/null
+++ b/tempest/openstack/common/fileutils.py
@@ -0,0 +1,110 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import contextlib
+import errno
+import os
+
+from tempest.openstack.common import excutils
+from tempest.openstack.common.gettextutils import _ # noqa
+from tempest.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+_FILE_CACHE = {}
+
+
+def ensure_tree(path):
+ """Create a directory (and any ancestor directories required)
+
+ :param path: Directory to create
+ """
+ try:
+ os.makedirs(path)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST:
+ if not os.path.isdir(path):
+ raise
+ else:
+ raise
+
+
+def read_cached_file(filename, force_reload=False):
+ """Read from a file if it has been modified.
+
+ :param force_reload: Whether to reload the file.
+ :returns: A tuple with a boolean specifying if the data is fresh
+ or not.
+ """
+ global _FILE_CACHE
+
+ if force_reload and filename in _FILE_CACHE:
+ del _FILE_CACHE[filename]
+
+ reloaded = False
+ mtime = os.path.getmtime(filename)
+ cache_info = _FILE_CACHE.setdefault(filename, {})
+
+ if not cache_info or mtime > cache_info.get('mtime', 0):
+ LOG.debug(_("Reloading cached file %s") % filename)
+ with open(filename) as fap:
+ cache_info['data'] = fap.read()
+ cache_info['mtime'] = mtime
+ reloaded = True
+ return (reloaded, cache_info['data'])
+
+
+def delete_if_exists(path):
+ """Delete a file, but ignore file not found error.
+
+ :param path: File to delete
+ """
+
+ try:
+ os.unlink(path)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ return
+ else:
+ raise
+
+
+@contextlib.contextmanager
+def remove_path_on_error(path):
+ """Protect code that wants to operate on PATH atomically.
+ Any exception will cause PATH to be removed.
+
+ :param path: File to work with
+ """
+ try:
+ yield
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ delete_if_exists(path)
+
+
+def file_open(*args, **kwargs):
+ """Open file
+
+ see built-in file() documentation for more details
+
+ Note: The reason this is kept in a separate module is to easily
+ be able to provide a stub module that doesn't alter system
+ state at all (for unit tests)
+ """
+ return file(*args, **kwargs)
diff --git a/tempest/openstack/common/gettextutils.py b/tempest/openstack/common/gettextutils.py
new file mode 100644
index 0000000..8594937
--- /dev/null
+++ b/tempest/openstack/common/gettextutils.py
@@ -0,0 +1,259 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+# All Rights Reserved.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+gettext for openstack-common modules.
+
+Usual usage in an openstack.common module:
+
+ from tempest.openstack.common.gettextutils import _
+"""
+
+import copy
+import gettext
+import logging.handlers
+import os
+import re
+import UserString
+
+import six
+
+_localedir = os.environ.get('tempest'.upper() + '_LOCALEDIR')
+_t = gettext.translation('tempest', localedir=_localedir, fallback=True)
+
+
+def _(msg):
+ return _t.ugettext(msg)
+
+
+def install(domain):
+ """Install a _() function using the given translation domain.
+
+ Given a translation domain, install a _() function using gettext's
+ install() function.
+
+ The main difference from gettext.install() is that we allow
+ overriding the default localedir (e.g. /usr/share/locale) using
+ a translation-domain-specific environment variable (e.g.
+ NOVA_LOCALEDIR).
+ """
+ gettext.install(domain,
+ localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
+ unicode=True)
+
+
+"""
+Lazy gettext functionality.
+
+The following is an attempt to introduce a deferred way
+to do translations on messages in OpenStack. We attempt to
+override the standard _() function and % (format string) operation
+to build Message objects that can later be translated when we have
+more information. Also included is an example LogHandler that
+translates Messages to an associated locale, effectively allowing
+many logs, each with their own locale.
+"""
+
+
+def get_lazy_gettext(domain):
+ """Assemble and return a lazy gettext function for a given domain.
+
+ Factory method for a project/module to get a lazy gettext function
+ for its own translation domain (i.e. nova, glance, cinder, etc.)
+ """
+
+ def _lazy_gettext(msg):
+ """Create and return a Message object.
+
+ Message encapsulates a string so that we can translate it later when
+ needed.
+ """
+ return Message(msg, domain)
+
+ return _lazy_gettext
+
+
+class Message(UserString.UserString, object):
+ """Class used to encapsulate translatable messages."""
+ def __init__(self, msg, domain):
+ # _msg is the gettext msgid and should never change
+ self._msg = msg
+ self._left_extra_msg = ''
+ self._right_extra_msg = ''
+ self.params = None
+ self.locale = None
+ self.domain = domain
+
+ @property
+ def data(self):
+ # NOTE(mrodden): this should always resolve to a unicode string
+ # that best represents the state of the message currently
+
+ localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
+ if self.locale:
+ lang = gettext.translation(self.domain,
+ localedir=localedir,
+ languages=[self.locale],
+ fallback=True)
+ else:
+ # use system locale for translations
+ lang = gettext.translation(self.domain,
+ localedir=localedir,
+ fallback=True)
+
+ full_msg = (self._left_extra_msg +
+ lang.ugettext(self._msg) +
+ self._right_extra_msg)
+
+ if self.params is not None:
+ full_msg = full_msg % self.params
+
+ return six.text_type(full_msg)
+
+ def _save_dictionary_parameter(self, dict_param):
+ full_msg = self.data
+ # look for %(blah) fields in string;
+ # ignore %% and deal with the
+ # case where % is first character on the line
+ keys = re.findall('(?:[^%]|^)%\((\w*)\)[a-z]', full_msg)
+
+ # if we don't find any %(blah) blocks but have a %s
+ if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
+ # apparently the full dictionary is the parameter
+ params = copy.deepcopy(dict_param)
+ else:
+ params = {}
+ for key in keys:
+ try:
+ params[key] = copy.deepcopy(dict_param[key])
+ except TypeError:
+ # cast uncopyable thing to unicode string
+ params[key] = unicode(dict_param[key])
+
+ return params
+
+ def _save_parameters(self, other):
+ # we check for None later to see if
+ # we actually have parameters to inject,
+ # so encapsulate if our parameter is actually None
+ if other is None:
+ self.params = (other, )
+ elif isinstance(other, dict):
+ self.params = self._save_dictionary_parameter(other)
+ else:
+ # fallback to casting to unicode,
+ # this will handle the problematic python code-like
+ # objects that cannot be deep-copied
+ try:
+ self.params = copy.deepcopy(other)
+ except TypeError:
+ self.params = unicode(other)
+
+ return self
+
+ # overrides to be more string-like
+ def __unicode__(self):
+ return self.data
+
+ def __str__(self):
+ return self.data.encode('utf-8')
+
+ def __getstate__(self):
+ to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
+ 'domain', 'params', 'locale']
+ new_dict = self.__dict__.fromkeys(to_copy)
+ for attr in to_copy:
+ new_dict[attr] = copy.deepcopy(self.__dict__[attr])
+
+ return new_dict
+
+ def __setstate__(self, state):
+ for (k, v) in state.items():
+ setattr(self, k, v)
+
+ # operator overloads
+ def __add__(self, other):
+ copied = copy.deepcopy(self)
+ copied._right_extra_msg += other.__str__()
+ return copied
+
+ def __radd__(self, other):
+ copied = copy.deepcopy(self)
+ copied._left_extra_msg += other.__str__()
+ return copied
+
+ def __mod__(self, other):
+ # do a format string to catch and raise
+ # any possible KeyErrors from missing parameters
+ self.data % other
+ copied = copy.deepcopy(self)
+ return copied._save_parameters(other)
+
+ def __mul__(self, other):
+ return self.data * other
+
+ def __rmul__(self, other):
+ return other * self.data
+
+ def __getitem__(self, key):
+ return self.data[key]
+
+ def __getslice__(self, start, end):
+ return self.data.__getslice__(start, end)
+
+ def __getattribute__(self, name):
+ # NOTE(mrodden): handle lossy operations that we can't deal with yet
+ # These override the UserString implementation, since UserString
+ # uses our __class__ attribute to try and build a new message
+ # after running the inner data string through the operation.
+ # At that point, we have lost the gettext message id and can just
+ # safely resolve to a string instead.
+ ops = ['capitalize', 'center', 'decode', 'encode',
+ 'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
+ 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
+ if name in ops:
+ return getattr(self.data, name)
+ else:
+ return UserString.UserString.__getattribute__(self, name)
+
+
+class LocaleHandler(logging.Handler):
+ """Handler that can have a locale associated to translate Messages.
+
+ A quick example of how to utilize the Message class above.
+ LocaleHandler takes a locale and a target logging.Handler object
+ to forward LogRecord objects to after translating the internal Message.
+ """
+
+ def __init__(self, locale, target):
+ """Initialize a LocaleHandler
+
+ :param locale: locale to use for translating messages
+ :param target: logging.Handler object to forward
+ LogRecord objects to after translation
+ """
+ logging.Handler.__init__(self)
+ self.locale = locale
+ self.target = target
+
+ def emit(self, record):
+ if isinstance(record.msg, Message):
+ # set the locale and resolve to a string
+ record.msg.locale = self.locale
+
+ self.target.emit(record)
diff --git a/tempest/openstack/common/importutils.py b/tempest/openstack/common/importutils.py
new file mode 100644
index 0000000..7a303f9
--- /dev/null
+++ b/tempest/openstack/common/importutils.py
@@ -0,0 +1,68 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Import related utilities and helper functions.
+"""
+
+import sys
+import traceback
+
+
+def import_class(import_str):
+ """Returns a class from a string including module and class."""
+ mod_str, _sep, class_str = import_str.rpartition('.')
+ try:
+ __import__(mod_str)
+ return getattr(sys.modules[mod_str], class_str)
+ except (ValueError, AttributeError):
+ raise ImportError('Class %s cannot be found (%s)' %
+ (class_str,
+ traceback.format_exception(*sys.exc_info())))
+
+
+def import_object(import_str, *args, **kwargs):
+ """Import a class and return an instance of it."""
+ return import_class(import_str)(*args, **kwargs)
+
+
+def import_object_ns(name_space, import_str, *args, **kwargs):
+ """Tries to import object from default namespace.
+
+ Imports a class and return an instance of it, first by trying
+ to find the class in a default namespace, then failing back to
+ a full path if not found in the default namespace.
+ """
+ import_value = "%s.%s" % (name_space, import_str)
+ try:
+ return import_class(import_value)(*args, **kwargs)
+ except ImportError:
+ return import_class(import_str)(*args, **kwargs)
+
+
+def import_module(import_str):
+ """Import a module."""
+ __import__(import_str)
+ return sys.modules[import_str]
+
+
+def try_import(import_str, default=None):
+ """Try to import a module and if it fails return default."""
+ try:
+ return import_module(import_str)
+ except ImportError:
+ return default
diff --git a/tempest/openstack/common/jsonutils.py b/tempest/openstack/common/jsonutils.py
new file mode 100644
index 0000000..bd43e59
--- /dev/null
+++ b/tempest/openstack/common/jsonutils.py
@@ -0,0 +1,172 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+'''
+JSON related utilities.
+
+This module provides a few things:
+
+ 1) A handy function for getting an object down to something that can be
+ JSON serialized. See to_primitive().
+
+ 2) Wrappers around loads() and dumps(). The dumps() wrapper will
+ automatically use to_primitive() for you if needed.
+
+ 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
+ is available.
+'''
+
+
+import datetime
+import functools
+import inspect
+import itertools
+import json
+import types
+import xmlrpclib
+
+import netaddr
+import six
+
+from tempest.openstack.common import timeutils
+
+
+_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
+ inspect.isfunction, inspect.isgeneratorfunction,
+ inspect.isgenerator, inspect.istraceback, inspect.isframe,
+ inspect.iscode, inspect.isbuiltin, inspect.isroutine,
+ inspect.isabstract]
+
+_simple_types = (types.NoneType, int, basestring, bool, float, long)
+
+
+def to_primitive(value, convert_instances=False, convert_datetime=True,
+ level=0, max_depth=3):
+ """Convert a complex object into primitives.
+
+ Handy for JSON serialization. We can optionally handle instances,
+ but since this is a recursive function, we could have cyclical
+ data structures.
+
+ To handle cyclical data structures we could track the actual objects
+ visited in a set, but not all objects are hashable. Instead we just
+ track the depth of the object inspections and don't go too deep.
+
+ Therefore, convert_instances=True is lossy ... be aware.
+
+ """
+ # handle obvious types first - order of basic types determined by running
+ # full tests on nova project, resulting in the following counts:
+ # 572754 <type 'NoneType'>
+ # 460353 <type 'int'>
+ # 379632 <type 'unicode'>
+ # 274610 <type 'str'>
+ # 199918 <type 'dict'>
+ # 114200 <type 'datetime.datetime'>
+ # 51817 <type 'bool'>
+ # 26164 <type 'list'>
+ # 6491 <type 'float'>
+ # 283 <type 'tuple'>
+ # 19 <type 'long'>
+ if isinstance(value, _simple_types):
+ return value
+
+ if isinstance(value, datetime.datetime):
+ if convert_datetime:
+ return timeutils.strtime(value)
+ else:
+ return value
+
+ # value of itertools.count doesn't get caught by nasty_type_tests
+ # and results in infinite loop when list(value) is called.
+ if type(value) == itertools.count:
+ return six.text_type(value)
+
+ # FIXME(vish): Workaround for LP bug 852095. Without this workaround,
+ # tests that raise an exception in a mocked method that
+ # has a @wrap_exception with a notifier will fail. If
+ # we up the dependency to 0.5.4 (when it is released) we
+ # can remove this workaround.
+ if getattr(value, '__module__', None) == 'mox':
+ return 'mock'
+
+ if level > max_depth:
+ return '?'
+
+ # The try block may not be necessary after the class check above,
+ # but just in case ...
+ try:
+ recursive = functools.partial(to_primitive,
+ convert_instances=convert_instances,
+ convert_datetime=convert_datetime,
+ level=level,
+ max_depth=max_depth)
+ if isinstance(value, dict):
+ return dict((k, recursive(v)) for k, v in value.iteritems())
+ elif isinstance(value, (list, tuple)):
+ return [recursive(lv) for lv in value]
+
+ # It's not clear why xmlrpclib created their own DateTime type, but
+ # for our purposes, make it a datetime type which is explicitly
+ # handled
+ if isinstance(value, xmlrpclib.DateTime):
+ value = datetime.datetime(*tuple(value.timetuple())[:6])
+
+ if convert_datetime and isinstance(value, datetime.datetime):
+ return timeutils.strtime(value)
+ elif hasattr(value, 'iteritems'):
+ return recursive(dict(value.iteritems()), level=level + 1)
+ elif hasattr(value, '__iter__'):
+ return recursive(list(value))
+ elif convert_instances and hasattr(value, '__dict__'):
+ # Likely an instance of something. Watch for cycles.
+ # Ignore class member vars.
+ return recursive(value.__dict__, level=level + 1)
+ elif isinstance(value, netaddr.IPAddress):
+ return six.text_type(value)
+ else:
+ if any(test(value) for test in _nasty_type_tests):
+ return six.text_type(value)
+ return value
+ except TypeError:
+ # Class objects are tricky since they may define something like
+ # __iter__ defined but it isn't callable as list().
+ return six.text_type(value)
+
+
+def dumps(value, default=to_primitive, **kwargs):
+ return json.dumps(value, default=default, **kwargs)
+
+
+def loads(s):
+ return json.loads(s)
+
+
+def load(s):
+ return json.load(s)
+
+
+try:
+ import anyjson
+except ImportError:
+ pass
+else:
+ anyjson._modules.append((__name__, 'dumps', TypeError,
+ 'loads', ValueError, 'load'))
+ anyjson.force_implementation(__name__)
diff --git a/tempest/openstack/common/local.py b/tempest/openstack/common/local.py
new file mode 100644
index 0000000..f1bfc82
--- /dev/null
+++ b/tempest/openstack/common/local.py
@@ -0,0 +1,48 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Greenthread local storage of variables using weak references"""
+
+import weakref
+
+from eventlet import corolocal
+
+
+class WeakLocal(corolocal.local):
+ def __getattribute__(self, attr):
+ rval = corolocal.local.__getattribute__(self, attr)
+ if rval:
+ # NOTE(mikal): this bit is confusing. What is stored is a weak
+ # reference, not the value itself. We therefore need to lookup
+ # the weak reference and return the inner value here.
+ rval = rval()
+ return rval
+
+ def __setattr__(self, attr, value):
+ value = weakref.ref(value)
+ return corolocal.local.__setattr__(self, attr, value)
+
+
+# NOTE(mikal): the name "store" should be deprecated in the future
+store = WeakLocal()
+
+# A "weak" store uses weak references and allows an object to fall out of scope
+# when it falls out of scope in the code that uses the thread local storage. A
+# "strong" store will hold a reference to the object so that it never falls out
+# of scope.
+weak_store = WeakLocal()
+strong_store = corolocal.local
diff --git a/tempest/openstack/common/lockutils.py b/tempest/openstack/common/lockutils.py
new file mode 100644
index 0000000..3ff1a7a
--- /dev/null
+++ b/tempest/openstack/common/lockutils.py
@@ -0,0 +1,276 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import contextlib
+import errno
+import functools
+import os
+import time
+import weakref
+
+from eventlet import semaphore
+from oslo.config import cfg
+
+from tempest.openstack.common import fileutils
+from tempest.openstack.common.gettextutils import _ # noqa
+from tempest.openstack.common import local
+from tempest.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+util_opts = [
+ cfg.BoolOpt('disable_process_locking', default=False,
+ help='Whether to disable inter-process locks'),
+ cfg.StrOpt('lock_path',
+ help=('Directory to use for lock files.'))
+]
+
+
+CONF = cfg.CONF
+CONF.register_opts(util_opts)
+
+
+def set_defaults(lock_path):
+ cfg.set_defaults(util_opts, lock_path=lock_path)
+
+
+class _InterProcessLock(object):
+ """Lock implementation which allows multiple locks, working around
+ issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
+ not require any cleanup. Since the lock is always held on a file
+ descriptor rather than outside of the process, the lock gets dropped
+ automatically if the process crashes, even if __exit__ is not executed.
+
+ There are no guarantees regarding usage by multiple green threads in a
+ single process here. This lock works only between processes. Exclusive
+ access between local threads should be achieved using the semaphores
+ in the @synchronized decorator.
+
+ Note these locks are released when the descriptor is closed, so it's not
+ safe to close the file descriptor while another green thread holds the
+ lock. Just opening and closing the lock file can break synchronisation,
+ so lock files must be accessed only using this abstraction.
+ """
+
+ def __init__(self, name):
+ self.lockfile = None
+ self.fname = name
+
+ def __enter__(self):
+ self.lockfile = open(self.fname, 'w')
+
+ while True:
+ try:
+ # Using non-blocking locks since green threads are not
+ # patched to deal with blocking locking calls.
+ # Also upon reading the MSDN docs for locking(), it seems
+ # to have a laughable 10 attempts "blocking" mechanism.
+ self.trylock()
+ return self
+ except IOError as e:
+ if e.errno in (errno.EACCES, errno.EAGAIN):
+ # external locks synchronise things like iptables
+ # updates - give it some time to prevent busy spinning
+ time.sleep(0.01)
+ else:
+ raise
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ try:
+ self.unlock()
+ self.lockfile.close()
+ except IOError:
+ LOG.exception(_("Could not release the acquired lock `%s`"),
+ self.fname)
+
+ def trylock(self):
+ raise NotImplementedError()
+
+ def unlock(self):
+ raise NotImplementedError()
+
+
+class _WindowsLock(_InterProcessLock):
+ def trylock(self):
+ msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
+
+ def unlock(self):
+ msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
+
+
+class _PosixLock(_InterProcessLock):
+ def trylock(self):
+ fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
+
+ def unlock(self):
+ fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
+
+
+if os.name == 'nt':
+ import msvcrt
+ InterProcessLock = _WindowsLock
+else:
+ import fcntl
+ InterProcessLock = _PosixLock
+
+_semaphores = weakref.WeakValueDictionary()
+
+
+@contextlib.contextmanager
+def lock(name, lock_file_prefix=None, external=False, lock_path=None):
+ """Context based lock
+
+ This function yields a `semaphore.Semaphore` instance unless external is
+ True, in which case, it'll yield an InterProcessLock instance.
+
+ :param lock_file_prefix: The lock_file_prefix argument is used to provide
+ lock files on disk with a meaningful prefix.
+
+ :param external: The external keyword argument denotes whether this lock
+ should work across multiple processes. This means that if two different
+ workers both run a a method decorated with @synchronized('mylock',
+ external=True), only one of them will execute at a time.
+
+ :param lock_path: The lock_path keyword argument is used to specify a
+ special location for external lock files to live. If nothing is set, then
+ CONF.lock_path is used as a default.
+ """
+ # NOTE(soren): If we ever go natively threaded, this will be racy.
+ # See http://stackoverflow.com/questions/5390569/dyn
+ # amically-allocating-and-destroying-mutexes
+ sem = _semaphores.get(name, semaphore.Semaphore())
+ if name not in _semaphores:
+ # this check is not racy - we're already holding ref locally
+ # so GC won't remove the item and there was no IO switch
+ # (only valid in greenthreads)
+ _semaphores[name] = sem
+
+ with sem:
+ LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
+
+ # NOTE(mikal): I know this looks odd
+ if not hasattr(local.strong_store, 'locks_held'):
+ local.strong_store.locks_held = []
+ local.strong_store.locks_held.append(name)
+
+ try:
+ if external and not CONF.disable_process_locking:
+ LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
+ {'lock': name})
+
+ # We need a copy of lock_path because it is non-local
+ local_lock_path = lock_path or CONF.lock_path
+ if not local_lock_path:
+ raise cfg.RequiredOptError('lock_path')
+
+ if not os.path.exists(local_lock_path):
+ fileutils.ensure_tree(local_lock_path)
+ LOG.info(_('Created lock path: %s'), local_lock_path)
+
+ def add_prefix(name, prefix):
+ if not prefix:
+ return name
+ sep = '' if prefix.endswith('-') else '-'
+ return '%s%s%s' % (prefix, sep, name)
+
+ # NOTE(mikal): the lock name cannot contain directory
+ # separators
+ lock_file_name = add_prefix(name.replace(os.sep, '_'),
+ lock_file_prefix)
+
+ lock_file_path = os.path.join(local_lock_path, lock_file_name)
+
+ try:
+ lock = InterProcessLock(lock_file_path)
+ with lock as lock:
+ LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
+ {'lock': name, 'path': lock_file_path})
+ yield lock
+ finally:
+ LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
+ {'lock': name, 'path': lock_file_path})
+ else:
+ yield sem
+
+ finally:
+ local.strong_store.locks_held.remove(name)
+
+
+def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
+ """Synchronization decorator.
+
+ Decorating a method like so::
+
+ @synchronized('mylock')
+ def foo(self, *args):
+ ...
+
+ ensures that only one thread will execute the foo method at a time.
+
+ Different methods can share the same lock::
+
+ @synchronized('mylock')
+ def foo(self, *args):
+ ...
+
+ @synchronized('mylock')
+ def bar(self, *args):
+ ...
+
+ This way only one of either foo or bar can be executing at a time.
+ """
+
+ def wrap(f):
+ @functools.wraps(f)
+ def inner(*args, **kwargs):
+ with lock(name, lock_file_prefix, external, lock_path):
+ LOG.debug(_('Got semaphore / lock "%(function)s"'),
+ {'function': f.__name__})
+ return f(*args, **kwargs)
+
+ LOG.debug(_('Semaphore / lock released "%(function)s"'),
+ {'function': f.__name__})
+ return inner
+ return wrap
+
+
+def synchronized_with_prefix(lock_file_prefix):
+ """Partial object generator for the synchronization decorator.
+
+ Redefine @synchronized in each project like so::
+
+ (in nova/utils.py)
+ from nova.openstack.common import lockutils
+
+ synchronized = lockutils.synchronized_with_prefix('nova-')
+
+
+ (in nova/foo.py)
+ from nova import utils
+
+ @utils.synchronized('mylock')
+ def bar(self, *args):
+ ...
+
+ The lock_file_prefix argument is used to provide lock files on disk with a
+ meaningful prefix.
+ """
+
+ return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
diff --git a/tempest/openstack/common/log.py b/tempest/openstack/common/log.py
new file mode 100644
index 0000000..4133c30
--- /dev/null
+++ b/tempest/openstack/common/log.py
@@ -0,0 +1,559 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Openstack logging handler.
+
+This module adds to logging functionality by adding the option to specify
+a context object when calling the various log methods. If the context object
+is not specified, default formatting is used. Additionally, an instance uuid
+may be passed as part of the log message, which is intended to make it easier
+for admins to find messages related to a specific instance.
+
+It also allows setting of formatting information through conf.
+
+"""
+
+import inspect
+import itertools
+import logging
+import logging.config
+import logging.handlers
+import os
+import sys
+import traceback
+
+from oslo.config import cfg
+from six import moves
+
+from tempest.openstack.common.gettextutils import _ # noqa
+from tempest.openstack.common import importutils
+from tempest.openstack.common import jsonutils
+from tempest.openstack.common import local
+
+
+_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
+
+common_cli_opts = [
+ cfg.BoolOpt('debug',
+ short='d',
+ default=False,
+ help='Print debugging output (set logging level to '
+ 'DEBUG instead of default WARNING level).'),
+ cfg.BoolOpt('verbose',
+ short='v',
+ default=False,
+ help='Print more verbose output (set logging level to '
+ 'INFO instead of default WARNING level).'),
+]
+
+logging_cli_opts = [
+ cfg.StrOpt('log-config',
+ metavar='PATH',
+ help='If this option is specified, the logging configuration '
+ 'file specified is used and overrides any other logging '
+ 'options specified. Please see the Python logging module '
+ 'documentation for details on logging configuration '
+ 'files.'),
+ cfg.StrOpt('log-format',
+ default=None,
+ metavar='FORMAT',
+ help='DEPRECATED. '
+ 'A logging.Formatter log message format string which may '
+ 'use any of the available logging.LogRecord attributes. '
+ 'This option is deprecated. Please use '
+ 'logging_context_format_string and '
+ 'logging_default_format_string instead.'),
+ cfg.StrOpt('log-date-format',
+ default=_DEFAULT_LOG_DATE_FORMAT,
+ metavar='DATE_FORMAT',
+ help='Format string for %%(asctime)s in log records. '
+ 'Default: %(default)s'),
+ cfg.StrOpt('log-file',
+ metavar='PATH',
+ deprecated_name='logfile',
+ help='(Optional) Name of log file to output to. '
+ 'If no default is set, logging will go to stdout.'),
+ cfg.StrOpt('log-dir',
+ deprecated_name='logdir',
+ help='(Optional) The base directory used for relative '
+ '--log-file paths'),
+ cfg.BoolOpt('use-syslog',
+ default=False,
+ help='Use syslog for logging.'),
+ cfg.StrOpt('syslog-log-facility',
+ default='LOG_USER',
+ help='syslog facility to receive log lines')
+]
+
+generic_log_opts = [
+ cfg.BoolOpt('use_stderr',
+ default=True,
+ help='Log output to standard error')
+]
+
+log_opts = [
+ cfg.StrOpt('logging_context_format_string',
+ default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
+ '%(name)s [%(request_id)s %(user)s %(tenant)s] '
+ '%(instance)s%(message)s',
+ help='format string to use for log messages with context'),
+ cfg.StrOpt('logging_default_format_string',
+ default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
+ '%(name)s [-] %(instance)s%(message)s',
+ help='format string to use for log messages without context'),
+ cfg.StrOpt('logging_debug_format_suffix',
+ default='%(funcName)s %(pathname)s:%(lineno)d',
+ help='data to append to log format when level is DEBUG'),
+ cfg.StrOpt('logging_exception_prefix',
+ default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
+ '%(instance)s',
+ help='prefix each line of exception output with this format'),
+ cfg.ListOpt('default_log_levels',
+ default=[
+ 'amqplib=WARN',
+ 'sqlalchemy=WARN',
+ 'boto=WARN',
+ 'suds=INFO',
+ 'keystone=INFO',
+ 'eventlet.wsgi.server=WARN'
+ ],
+ help='list of logger=LEVEL pairs'),
+ cfg.BoolOpt('publish_errors',
+ default=False,
+ help='publish error events'),
+ cfg.BoolOpt('fatal_deprecations',
+ default=False,
+ help='make deprecations fatal'),
+
+ # NOTE(mikal): there are two options here because sometimes we are handed
+ # a full instance (and could include more information), and other times we
+ # are just handed a UUID for the instance.
+ cfg.StrOpt('instance_format',
+ default='[instance: %(uuid)s] ',
+ help='If an instance is passed with the log message, format '
+ 'it like this'),
+ cfg.StrOpt('instance_uuid_format',
+ default='[instance: %(uuid)s] ',
+ help='If an instance UUID is passed with the log message, '
+ 'format it like this'),
+]
+
+CONF = cfg.CONF
+CONF.register_cli_opts(common_cli_opts)
+CONF.register_cli_opts(logging_cli_opts)
+CONF.register_opts(generic_log_opts)
+CONF.register_opts(log_opts)
+
+# our new audit level
+# NOTE(jkoelker) Since we synthesized an audit level, make the logging
+# module aware of it so it acts like other levels.
+logging.AUDIT = logging.INFO + 1
+logging.addLevelName(logging.AUDIT, 'AUDIT')
+
+
+try:
+ NullHandler = logging.NullHandler
+except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
+ class NullHandler(logging.Handler):
+ def handle(self, record):
+ pass
+
+ def emit(self, record):
+ pass
+
+ def createLock(self):
+ self.lock = None
+
+
+def _dictify_context(context):
+ if context is None:
+ return None
+ if not isinstance(context, dict) and getattr(context, 'to_dict', None):
+ context = context.to_dict()
+ return context
+
+
+def _get_binary_name():
+ return os.path.basename(inspect.stack()[-1][1])
+
+
+def _get_log_file_path(binary=None):
+ logfile = CONF.log_file
+ logdir = CONF.log_dir
+
+ if logfile and not logdir:
+ return logfile
+
+ if logfile and logdir:
+ return os.path.join(logdir, logfile)
+
+ if logdir:
+ binary = binary or _get_binary_name()
+ return '%s.log' % (os.path.join(logdir, binary),)
+
+
+class BaseLoggerAdapter(logging.LoggerAdapter):
+
+ def audit(self, msg, *args, **kwargs):
+ self.log(logging.AUDIT, msg, *args, **kwargs)
+
+
+class LazyAdapter(BaseLoggerAdapter):
+ def __init__(self, name='unknown', version='unknown'):
+ self._logger = None
+ self.extra = {}
+ self.name = name
+ self.version = version
+
+ @property
+ def logger(self):
+ if not self._logger:
+ self._logger = getLogger(self.name, self.version)
+ return self._logger
+
+
+class ContextAdapter(BaseLoggerAdapter):
+ warn = logging.LoggerAdapter.warning
+
+ def __init__(self, logger, project_name, version_string):
+ self.logger = logger
+ self.project = project_name
+ self.version = version_string
+
+ @property
+ def handlers(self):
+ return self.logger.handlers
+
+ def deprecated(self, msg, *args, **kwargs):
+ stdmsg = _("Deprecated: %s") % msg
+ if CONF.fatal_deprecations:
+ self.critical(stdmsg, *args, **kwargs)
+ raise DeprecatedConfig(msg=stdmsg)
+ else:
+ self.warn(stdmsg, *args, **kwargs)
+
+ def process(self, msg, kwargs):
+ if 'extra' not in kwargs:
+ kwargs['extra'] = {}
+ extra = kwargs['extra']
+
+ context = kwargs.pop('context', None)
+ if not context:
+ context = getattr(local.store, 'context', None)
+ if context:
+ extra.update(_dictify_context(context))
+
+ instance = kwargs.pop('instance', None)
+ instance_extra = ''
+ if instance:
+ instance_extra = CONF.instance_format % instance
+ else:
+ instance_uuid = kwargs.pop('instance_uuid', None)
+ if instance_uuid:
+ instance_extra = (CONF.instance_uuid_format
+ % {'uuid': instance_uuid})
+ extra.update({'instance': instance_extra})
+
+ extra.update({"project": self.project})
+ extra.update({"version": self.version})
+ extra['extra'] = extra.copy()
+ return msg, kwargs
+
+
+class JSONFormatter(logging.Formatter):
+ def __init__(self, fmt=None, datefmt=None):
+ # NOTE(jkoelker) we ignore the fmt argument, but its still there
+ # since logging.config.fileConfig passes it.
+ self.datefmt = datefmt
+
+ def formatException(self, ei, strip_newlines=True):
+ lines = traceback.format_exception(*ei)
+ if strip_newlines:
+ lines = [itertools.ifilter(
+ lambda x: x,
+ line.rstrip().splitlines()) for line in lines]
+ lines = list(itertools.chain(*lines))
+ return lines
+
+ def format(self, record):
+ message = {'message': record.getMessage(),
+ 'asctime': self.formatTime(record, self.datefmt),
+ 'name': record.name,
+ 'msg': record.msg,
+ 'args': record.args,
+ 'levelname': record.levelname,
+ 'levelno': record.levelno,
+ 'pathname': record.pathname,
+ 'filename': record.filename,
+ 'module': record.module,
+ 'lineno': record.lineno,
+ 'funcname': record.funcName,
+ 'created': record.created,
+ 'msecs': record.msecs,
+ 'relative_created': record.relativeCreated,
+ 'thread': record.thread,
+ 'thread_name': record.threadName,
+ 'process_name': record.processName,
+ 'process': record.process,
+ 'traceback': None}
+
+ if hasattr(record, 'extra'):
+ message['extra'] = record.extra
+
+ if record.exc_info:
+ message['traceback'] = self.formatException(record.exc_info)
+
+ return jsonutils.dumps(message)
+
+
+def _create_logging_excepthook(product_name):
+ def logging_excepthook(type, value, tb):
+ extra = {}
+ if CONF.verbose:
+ extra['exc_info'] = (type, value, tb)
+ getLogger(product_name).critical(str(value), **extra)
+ return logging_excepthook
+
+
+class LogConfigError(Exception):
+
+ message = _('Error loading logging config %(log_config)s: %(err_msg)s')
+
+ def __init__(self, log_config, err_msg):
+ self.log_config = log_config
+ self.err_msg = err_msg
+
+ def __str__(self):
+ return self.message % dict(log_config=self.log_config,
+ err_msg=self.err_msg)
+
+
+def _load_log_config(log_config):
+ try:
+ logging.config.fileConfig(log_config)
+ except moves.configparser.Error as exc:
+ raise LogConfigError(log_config, str(exc))
+
+
+def setup(product_name):
+ """Setup logging."""
+ if CONF.log_config:
+ _load_log_config(CONF.log_config)
+ else:
+ _setup_logging_from_conf()
+ sys.excepthook = _create_logging_excepthook(product_name)
+
+
+def set_defaults(logging_context_format_string):
+ cfg.set_defaults(log_opts,
+ logging_context_format_string=
+ logging_context_format_string)
+
+
+def _find_facility_from_conf():
+ facility_names = logging.handlers.SysLogHandler.facility_names
+ facility = getattr(logging.handlers.SysLogHandler,
+ CONF.syslog_log_facility,
+ None)
+
+ if facility is None and CONF.syslog_log_facility in facility_names:
+ facility = facility_names.get(CONF.syslog_log_facility)
+
+ if facility is None:
+ valid_facilities = facility_names.keys()
+ consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
+ 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
+ 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
+ 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
+ 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
+ valid_facilities.extend(consts)
+ raise TypeError(_('syslog facility must be one of: %s') %
+ ', '.join("'%s'" % fac
+ for fac in valid_facilities))
+
+ return facility
+
+
+def _setup_logging_from_conf():
+ log_root = getLogger(None).logger
+ for handler in log_root.handlers:
+ log_root.removeHandler(handler)
+
+ if CONF.use_syslog:
+ facility = _find_facility_from_conf()
+ syslog = logging.handlers.SysLogHandler(address='/dev/log',
+ facility=facility)
+ log_root.addHandler(syslog)
+
+ logpath = _get_log_file_path()
+ if logpath:
+ filelog = logging.handlers.WatchedFileHandler(logpath)
+ log_root.addHandler(filelog)
+
+ if CONF.use_stderr:
+ streamlog = ColorHandler()
+ log_root.addHandler(streamlog)
+
+ elif not CONF.log_file:
+ # pass sys.stdout as a positional argument
+ # python2.6 calls the argument strm, in 2.7 it's stream
+ streamlog = logging.StreamHandler(sys.stdout)
+ log_root.addHandler(streamlog)
+
+ if CONF.publish_errors:
+ handler = importutils.import_object(
+ "tempest.openstack.common.log_handler.PublishErrorsHandler",
+ logging.ERROR)
+ log_root.addHandler(handler)
+
+ datefmt = CONF.log_date_format
+ for handler in log_root.handlers:
+ # NOTE(alaski): CONF.log_format overrides everything currently. This
+ # should be deprecated in favor of context aware formatting.
+ if CONF.log_format:
+ handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
+ datefmt=datefmt))
+ log_root.info('Deprecated: log_format is now deprecated and will '
+ 'be removed in the next release')
+ else:
+ handler.setFormatter(ContextFormatter(datefmt=datefmt))
+
+ if CONF.debug:
+ log_root.setLevel(logging.DEBUG)
+ elif CONF.verbose:
+ log_root.setLevel(logging.INFO)
+ else:
+ log_root.setLevel(logging.WARNING)
+
+ for pair in CONF.default_log_levels:
+ mod, _sep, level_name = pair.partition('=')
+ level = logging.getLevelName(level_name)
+ logger = logging.getLogger(mod)
+ logger.setLevel(level)
+
+_loggers = {}
+
+
+def getLogger(name='unknown', version='unknown'):
+ if name not in _loggers:
+ _loggers[name] = ContextAdapter(logging.getLogger(name),
+ name,
+ version)
+ return _loggers[name]
+
+
+def getLazyLogger(name='unknown', version='unknown'):
+ """Returns lazy logger.
+
+ Creates a pass-through logger that does not create the real logger
+ until it is really needed and delegates all calls to the real logger
+ once it is created.
+ """
+ return LazyAdapter(name, version)
+
+
+class WritableLogger(object):
+ """A thin wrapper that responds to `write` and logs."""
+
+ def __init__(self, logger, level=logging.INFO):
+ self.logger = logger
+ self.level = level
+
+ def write(self, msg):
+ self.logger.log(self.level, msg)
+
+
+class ContextFormatter(logging.Formatter):
+ """A context.RequestContext aware formatter configured through flags.
+
+ The flags used to set format strings are: logging_context_format_string
+ and logging_default_format_string. You can also specify
+ logging_debug_format_suffix to append extra formatting if the log level is
+ debug.
+
+ For information about what variables are available for the formatter see:
+ http://docs.python.org/library/logging.html#formatter
+
+ """
+
+ def format(self, record):
+ """Uses contextstring if request_id is set, otherwise default."""
+ # NOTE(sdague): default the fancier formating params
+ # to an empty string so we don't throw an exception if
+ # they get used
+ for key in ('instance', 'color'):
+ if key not in record.__dict__:
+ record.__dict__[key] = ''
+
+ if record.__dict__.get('request_id', None):
+ self._fmt = CONF.logging_context_format_string
+ else:
+ self._fmt = CONF.logging_default_format_string
+
+ if (record.levelno == logging.DEBUG and
+ CONF.logging_debug_format_suffix):
+ self._fmt += " " + CONF.logging_debug_format_suffix
+
+ # Cache this on the record, Logger will respect our formated copy
+ if record.exc_info:
+ record.exc_text = self.formatException(record.exc_info, record)
+ return logging.Formatter.format(self, record)
+
+ def formatException(self, exc_info, record=None):
+ """Format exception output with CONF.logging_exception_prefix."""
+ if not record:
+ return logging.Formatter.formatException(self, exc_info)
+
+ stringbuffer = moves.StringIO()
+ traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
+ None, stringbuffer)
+ lines = stringbuffer.getvalue().split('\n')
+ stringbuffer.close()
+
+ if CONF.logging_exception_prefix.find('%(asctime)') != -1:
+ record.asctime = self.formatTime(record, self.datefmt)
+
+ formatted_lines = []
+ for line in lines:
+ pl = CONF.logging_exception_prefix % record.__dict__
+ fl = '%s%s' % (pl, line)
+ formatted_lines.append(fl)
+ return '\n'.join(formatted_lines)
+
+
+class ColorHandler(logging.StreamHandler):
+ LEVEL_COLORS = {
+ logging.DEBUG: '\033[00;32m', # GREEN
+ logging.INFO: '\033[00;36m', # CYAN
+ logging.AUDIT: '\033[01;36m', # BOLD CYAN
+ logging.WARN: '\033[01;33m', # BOLD YELLOW
+ logging.ERROR: '\033[01;31m', # BOLD RED
+ logging.CRITICAL: '\033[01;31m', # BOLD RED
+ }
+
+ def format(self, record):
+ record.color = self.LEVEL_COLORS[record.levelno]
+ return logging.StreamHandler.format(self, record)
+
+
+class DeprecatedConfig(Exception):
+ message = _("Fatal call to deprecated config: %(msg)s")
+
+ def __init__(self, msg):
+ super(Exception, self).__init__(self.message % dict(msg=msg))
diff --git a/tempest/openstack/common/timeutils.py b/tempest/openstack/common/timeutils.py
new file mode 100644
index 0000000..bd60489
--- /dev/null
+++ b/tempest/openstack/common/timeutils.py
@@ -0,0 +1,188 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Time related utilities and helper functions.
+"""
+
+import calendar
+import datetime
+
+import iso8601
+import six
+
+
+# ISO 8601 extended time format with microseconds
+_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
+_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
+PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
+
+
+def isotime(at=None, subsecond=False):
+ """Stringify time in ISO 8601 format."""
+ if not at:
+ at = utcnow()
+ st = at.strftime(_ISO8601_TIME_FORMAT
+ if not subsecond
+ else _ISO8601_TIME_FORMAT_SUBSECOND)
+ tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
+ st += ('Z' if tz == 'UTC' else tz)
+ return st
+
+
+def parse_isotime(timestr):
+ """Parse time from ISO 8601 format."""
+ try:
+ return iso8601.parse_date(timestr)
+ except iso8601.ParseError as e:
+ raise ValueError(e.message)
+ except TypeError as e:
+ raise ValueError(e.message)
+
+
+def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
+ """Returns formatted utcnow."""
+ if not at:
+ at = utcnow()
+ return at.strftime(fmt)
+
+
+def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
+ """Turn a formatted time back into a datetime."""
+ return datetime.datetime.strptime(timestr, fmt)
+
+
+def normalize_time(timestamp):
+ """Normalize time in arbitrary timezone to UTC naive object."""
+ offset = timestamp.utcoffset()
+ if offset is None:
+ return timestamp
+ return timestamp.replace(tzinfo=None) - offset
+
+
+def is_older_than(before, seconds):
+ """Return True if before is older than seconds."""
+ if isinstance(before, six.string_types):
+ before = parse_strtime(before).replace(tzinfo=None)
+ return utcnow() - before > datetime.timedelta(seconds=seconds)
+
+
+def is_newer_than(after, seconds):
+ """Return True if after is newer than seconds."""
+ if isinstance(after, six.string_types):
+ after = parse_strtime(after).replace(tzinfo=None)
+ return after - utcnow() > datetime.timedelta(seconds=seconds)
+
+
+def utcnow_ts():
+ """Timestamp version of our utcnow function."""
+ return calendar.timegm(utcnow().timetuple())
+
+
+def utcnow():
+ """Overridable version of utils.utcnow."""
+ if utcnow.override_time:
+ try:
+ return utcnow.override_time.pop(0)
+ except AttributeError:
+ return utcnow.override_time
+ return datetime.datetime.utcnow()
+
+
+def iso8601_from_timestamp(timestamp):
+ """Returns a iso8601 formated date from timestamp."""
+ return isotime(datetime.datetime.utcfromtimestamp(timestamp))
+
+
+utcnow.override_time = None
+
+
+def set_time_override(override_time=datetime.datetime.utcnow()):
+ """Overrides utils.utcnow.
+
+ Make it return a constant time or a list thereof, one at a time.
+ """
+ utcnow.override_time = override_time
+
+
+def advance_time_delta(timedelta):
+ """Advance overridden time using a datetime.timedelta."""
+ assert(not utcnow.override_time is None)
+ try:
+ for dt in utcnow.override_time:
+ dt += timedelta
+ except TypeError:
+ utcnow.override_time += timedelta
+
+
+def advance_time_seconds(seconds):
+ """Advance overridden time by seconds."""
+ advance_time_delta(datetime.timedelta(0, seconds))
+
+
+def clear_time_override():
+ """Remove the overridden time."""
+ utcnow.override_time = None
+
+
+def marshall_now(now=None):
+ """Make an rpc-safe datetime with microseconds.
+
+ Note: tzinfo is stripped, but not required for relative times.
+ """
+ if not now:
+ now = utcnow()
+ return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
+ minute=now.minute, second=now.second,
+ microsecond=now.microsecond)
+
+
+def unmarshall_time(tyme):
+ """Unmarshall a datetime dict."""
+ return datetime.datetime(day=tyme['day'],
+ month=tyme['month'],
+ year=tyme['year'],
+ hour=tyme['hour'],
+ minute=tyme['minute'],
+ second=tyme['second'],
+ microsecond=tyme['microsecond'])
+
+
+def delta_seconds(before, after):
+ """Return the difference between two timing objects.
+
+ Compute the difference in seconds between two date, time, or
+ datetime objects (as a float, to microsecond resolution).
+ """
+ delta = after - before
+ try:
+ return delta.total_seconds()
+ except AttributeError:
+ return ((delta.days * 24 * 3600) + delta.seconds +
+ float(delta.microseconds) / (10 ** 6))
+
+
+def is_soon(dt, window):
+ """Determines if time is going to happen in the next window seconds.
+
+ :params dt: the time
+ :params window: minimum seconds to remain to consider the time not soon
+
+ :return: True if expiration is within the given duration
+ """
+ soon = (utcnow() + datetime.timedelta(seconds=window))
+ return normalize_time(dt) <= soon
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index fe6fbf5..d0f0127 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -29,11 +29,11 @@
from tempest.api.network import common as net_common
-from tempest.common import log as logging
+from tempest.common import isolated_creds
from tempest.common import ssh
from tempest.common.utils.data_utils import rand_name
-from tempest import exceptions
import tempest.manager
+from tempest.openstack.common import log as logging
import tempest.test
@@ -49,38 +49,25 @@
NOVACLIENT_VERSION = '2'
CINDERCLIENT_VERSION = '1'
- def __init__(self):
+ def __init__(self, username, password, tenant_name):
super(OfficialClientManager, self).__init__()
- self.compute_client = self._get_compute_client()
+ self.compute_client = self._get_compute_client(username,
+ password,
+ tenant_name)
+ self.identity_client = self._get_identity_client(username,
+ password,
+ tenant_name)
self.image_client = self._get_image_client()
- self.identity_client = self._get_identity_client()
self.network_client = self._get_network_client()
- self.volume_client = self._get_volume_client()
- self.client_attr_names = [
- 'compute_client',
- 'image_client',
- 'identity_client',
- 'network_client',
- 'volume_client'
- ]
+ self.volume_client = self._get_volume_client(username,
+ password,
+ tenant_name)
- def _get_compute_client(self, username=None, password=None,
- tenant_name=None):
+ def _get_compute_client(self, username, password, tenant_name):
# Novaclient will not execute operations for anyone but the
# identified user, so a new client needs to be created for
# each user that operations need to be performed for.
- if not username:
- username = self.config.identity.username
- if not password:
- password = self.config.identity.password
- if not tenant_name:
- tenant_name = self.config.identity.tenant_name
-
- if None in (username, password, tenant_name):
- msg = ("Missing required credentials for compute client. "
- "username: %(username)s, password: %(password)s, "
- "tenant_name: %(tenant_name)s") % locals()
- raise exceptions.InvalidConfiguration(msg)
+ self._validate_credentials(username, password, tenant_name)
auth_url = self.config.identity.uri
dscv = self.config.identity.disable_ssl_certificate_validation
@@ -96,23 +83,14 @@
insecure=dscv)
def _get_image_client(self):
- keystone = self._get_identity_client()
- token = keystone.auth_token
- endpoint = keystone.service_catalog.url_for(service_type='image',
- endpoint_type='publicURL')
+ token = self.identity_client.auth_token
+ endpoint = self.identity_client.service_catalog.url_for(
+ service_type='image', endpoint_type='publicURL')
dscv = self.config.identity.disable_ssl_certificate_validation
return glanceclient.Client('1', endpoint=endpoint, token=token,
insecure=dscv)
- def _get_volume_client(self, username=None, password=None,
- tenant_name=None):
- if not username:
- username = self.config.identity.username
- if not password:
- password = self.config.identity.password
- if not tenant_name:
- tenant_name = self.config.identity.tenant_name
-
+ def _get_volume_client(self, username, password, tenant_name):
auth_url = self.config.identity.uri
return cinderclient.client.Client(self.CINDERCLIENT_VERSION,
username,
@@ -120,22 +98,10 @@
tenant_name,
auth_url)
- def _get_identity_client(self, username=None, password=None,
- tenant_name=None):
+ def _get_identity_client(self, username, password, tenant_name):
# This identity client is not intended to check the security
# of the identity service, so use admin credentials by default.
- if not username:
- username = self.config.identity.admin_username
- if not password:
- password = self.config.identity.admin_password
- if not tenant_name:
- tenant_name = self.config.identity.admin_tenant_name
-
- if None in (username, password, tenant_name):
- msg = ("Missing required credentials for identity client. "
- "username: %(username)s, password: %(password)s, "
- "tenant_name: %(tenant_name)s") % locals()
- raise exceptions.InvalidConfiguration(msg)
+ self._validate_credentials(username, password, tenant_name)
auth_url = self.config.identity.uri
dscv = self.config.identity.disable_ssl_certificate_validation
@@ -157,11 +123,7 @@
password = self.config.identity.admin_password
tenant_name = self.config.identity.admin_tenant_name
- if None in (username, password, tenant_name):
- msg = ("Missing required credentials for network client. "
- "username: %(username)s, password: %(password)s, "
- "tenant_name: %(tenant_name)s") % locals()
- raise exceptions.InvalidConfiguration(msg)
+ self._validate_credentials(username, password, tenant_name)
auth_url = self.config.identity.uri
dscv = self.config.identity.disable_ssl_certificate_validation
@@ -173,7 +135,7 @@
insecure=dscv)
-class OfficialClientTest(tempest.test.TestCase):
+class OfficialClientTest(tempest.test.BaseTestCase):
"""
Official Client test base class for scenario testing.
@@ -186,7 +148,27 @@
* Use only the default client tool for calling an API
"""
- manager_class = OfficialClientManager
+ @classmethod
+ def setUpClass(cls):
+ super(OfficialClientTest, cls).setUpClass()
+ cls.isolated_creds = isolated_creds.IsolatedCreds(
+ __name__, tempest_client=False)
+ if cls.config.compute.allow_tenant_isolation:
+ creds = cls.isolated_creds.get_primary_creds()
+ username, tenant_name, password = creds
+ else:
+ username = cls.config.identity.username
+ password = cls.config.identity.password
+ tenant_name = cls.config.identity.tenant_name
+
+ cls.manager = OfficialClientManager(username, password, tenant_name)
+ cls.compute_client = cls.manager.compute_client
+ cls.image_client = cls.manager.image_client
+ cls.identity_client = cls.manager.identity_client
+ cls.network_client = cls.manager.network_client
+ cls.volume_client = cls.manager.volume_client
+ cls.resource_keys = {}
+ cls.os_resources = []
@classmethod
def tearDownClass(cls):
@@ -227,6 +209,54 @@
# Block until resource deletion has completed or timed-out
tempest.test.call_until_true(is_deletion_complete, 10, 1)
+ cls.isolated_creds.clear_isolated_creds()
+ super(OfficialClientTest, cls).tearDownClass()
+
+ @classmethod
+ def set_resource(cls, key, thing):
+ LOG.debug("Adding %r to shared resources of %s" %
+ (thing, cls.__name__))
+ cls.resource_keys[key] = thing
+ cls.os_resources.append(thing)
+
+ @classmethod
+ def get_resource(cls, key):
+ return cls.resource_keys[key]
+
+ @classmethod
+ def remove_resource(cls, key):
+ thing = cls.resource_keys[key]
+ cls.os_resources.remove(thing)
+ del cls.resource_keys[key]
+
+ def status_timeout(self, things, thing_id, expected_status):
+ """
+ Given a thing and an expected status, do a loop, sleeping
+ for a configurable amount of time, checking for the
+ expected status to show. At any time, if the returned
+ status of the thing is ERROR, fail out.
+ """
+ def check_status():
+ # python-novaclient has resources available to its client
+ # that all implement a get() method taking an identifier
+ # for the singular resource to retrieve.
+ thing = things.get(thing_id)
+ new_status = thing.status
+ if new_status == 'ERROR':
+ self.fail("%s failed to get to expected status. "
+ "In ERROR state."
+ % thing)
+ elif new_status == expected_status:
+ return True # All good.
+ LOG.debug("Waiting for %s to get to %s status. "
+ "Currently in %s status",
+ thing, expected_status, new_status)
+ if not tempest.test.call_until_true(
+ check_status,
+ self.config.compute.build_timeout,
+ self.config.compute.build_interval):
+ self.fail("Timed out waiting for thing %s to become %s"
+ % (thing_id, expected_status))
class NetworkScenarioTest(OfficialClientTest):
@@ -236,7 +266,7 @@
@classmethod
def check_preconditions(cls):
- if (cls.config.network.neutron_available):
+ if (cls.config.service_available.neutron):
cls.enabled = True
#verify that neutron_available is telling the truth
try:
@@ -438,24 +468,24 @@
if proc.returncode == 0:
return True
- # TODO(mnewby) Allow configuration of execution and sleep duration.
- return tempest.test.call_until_true(ping, 20, 1)
+ return tempest.test.call_until_true(
+ ping, self.config.compute.ping_timeout, 1)
def _is_reachable_via_ssh(self, ip_address, username, private_key,
- timeout=120):
+ timeout):
ssh_client = ssh.Client(ip_address, username,
pkey=private_key,
timeout=timeout)
return ssh_client.test_connection_auth()
- def _check_vm_connectivity(self, ip_address, username, private_key,
- timeout=120):
+ def _check_vm_connectivity(self, ip_address, username, private_key):
self.assertTrue(self._ping_ip_address(ip_address),
"Timed out waiting for %s to become "
"reachable" % ip_address)
- self.assertTrue(self._is_reachable_via_ssh(ip_address,
- username,
- private_key,
- timeout=timeout),
- 'Auth failure in connecting to %s@%s via ssh' %
- (username, ip_address))
+ self.assertTrue(self._is_reachable_via_ssh(
+ ip_address,
+ username,
+ private_key,
+ timeout=self.config.compute.ssh_timeout),
+ 'Auth failure in connecting to %s@%s via ssh' %
+ (username, ip_address))
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
new file mode 100644
index 0000000..9a45572
--- /dev/null
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -0,0 +1,72 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+import urllib2
+
+from lxml import html
+
+from tempest.scenario import manager
+
+
+class TestDashboardBasicOps(manager.OfficialClientTest):
+
+ """
+ This is a basic scenario test:
+ * checks that the login page is available
+ * logs in as a regular user
+ * checks that the user home page loads without error
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestDashboardBasicOps, cls).setUpClass()
+
+ if not cls.config.service_available.horizon:
+ raise cls.skipException("Horizon support is required")
+
+ def check_login_page(self):
+ response = urllib2.urlopen(self.config.dashboard.dashboard_url)
+ self.assertIn("<h3>Log In</h3>", response.read())
+
+ def user_login(self):
+ self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
+ response = self.opener.open(self.config.dashboard.dashboard_url).read()
+
+ # Grab the CSRF token and default region
+ csrf_token = html.fromstring(response).xpath(
+ '//input[@name="csrfmiddlewaretoken"]/@value')[0]
+ region = html.fromstring(response).xpath(
+ '//input[@name="region"]/@value')[0]
+
+ # Prepare login form request
+ req = urllib2.Request(self.config.dashboard.login_url)
+ req.add_header('Content-type', 'application/x-www-form-urlencoded')
+ req.add_header('Referer', self.config.dashboard.dashboard_url)
+ params = {'username': self.config.identity.username,
+ 'password': self.config.identity.password,
+ 'region': region,
+ 'csrfmiddlewaretoken': csrf_token}
+ self.opener.open(req, urllib.urlencode(params))
+
+ def check_home_page(self):
+ response = self.opener.open(self.config.dashboard.dashboard_url)
+ self.assertIn('Overview', response.read())
+
+ def test_basic_scenario(self):
+ self.check_login_page()
+ self.user_login()
+ self.check_home_page()
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
new file mode 100644
index 0000000..39b1e10
--- /dev/null
+++ b/tempest/scenario/test_large_ops.py
@@ -0,0 +1,103 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+
+
+LOG = logging.getLogger(__name__)
+
+
+class TestLargeOpsScenario(manager.OfficialClientTest):
+
+ """
+ Test large operations.
+
+ This test below:
+ * Spin up multiple instances in one nova call
+ * as a regular user
+ * TODO: same thing for cinder
+
+ """
+
+ def _wait_for_server_status(self, status):
+ for server in self.servers:
+ self.status_timeout(
+ self.compute_client.servers, server.id, status)
+
+ def _wait_for_volume_status(self, status):
+ volume_id = self.volume.id
+ self.status_timeout(
+ self.volume_client.volumes, volume_id, status)
+
+ def _image_create(self, name, fmt, path, properties={}):
+ name = rand_name('%s-' % name)
+ image_file = open(path, 'rb')
+ self.addCleanup(image_file.close)
+ params = {
+ 'name': name,
+ 'container_format': fmt,
+ 'disk_format': fmt,
+ 'is_public': 'True',
+ }
+ params.update(properties)
+ image = self.image_client.images.create(**params)
+ self.addCleanup(self.image_client.images.delete, image)
+ self.assertEqual("queued", image.status)
+ image.update(data=image_file)
+ return image.id
+
+ def glance_image_create(self):
+ aki_img_path = self.config.scenario.img_dir + "/" + \
+ self.config.scenario.aki_img_file
+ ari_img_path = self.config.scenario.img_dir + "/" + \
+ self.config.scenario.ari_img_file
+ ami_img_path = self.config.scenario.img_dir + "/" + \
+ self.config.scenario.ami_img_file
+ LOG.debug("paths: ami: %s, ari: %s, aki: %s"
+ % (ami_img_path, ari_img_path, aki_img_path))
+ kernel_id = self._image_create('scenario-aki', 'aki', aki_img_path)
+ ramdisk_id = self._image_create('scenario-ari', 'ari', ari_img_path)
+ properties = {
+ 'properties': {'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id}
+ }
+ self.image = self._image_create('scenario-ami', 'ami',
+ path=ami_img_path,
+ properties=properties)
+
+ def nova_boot(self):
+ def delete(servers):
+ [x.delete() for x in servers]
+
+ name = rand_name('scenario-server-')
+ client = self.compute_client
+ flavor_id = self.config.compute.flavor_ref
+ self.servers = client.servers.create(
+ name=name, image=self.image,
+ flavor=flavor_id,
+ min_count=self.config.scenario.large_ops_number)
+ # needed because of bug 1199788
+ self.servers = [x for x in client.servers.list() if name in x.name]
+ self.addCleanup(delete, self.servers)
+ self._wait_for_server_status('ACTIVE')
+
+ def test_large_ops_scenario(self):
+ if self.config.scenario.large_ops_number < 1:
+ return
+ self.glance_image_create()
+ self.nova_boot()
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 2097f50..13b31ec 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -15,10 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
-
from tempest.common.utils.data_utils import rand_name
from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest.openstack.common import log as logging
from tempest.scenario import manager
@@ -104,7 +103,7 @@
def nova_list(self):
servers = self.compute_client.servers.list()
LOG.debug("server_list:%s" % servers)
- self.assertTrue(self.server in servers)
+ self.assertIn(self.server, servers)
def nova_show(self):
got_server = self.compute_client.servers.get(self.server)
@@ -124,7 +123,7 @@
def cinder_list(self):
volumes = self.volume_client.volumes.list()
- self.assertTrue(self.volume in volumes)
+ self.assertIn(self.volume, volumes)
def cinder_show(self):
volume = self.volume_client.volumes.get(self.volume.id)
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 390e004..5311eae 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -16,8 +16,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.network import common as net_common
from tempest.common.utils.data_utils import rand_name
+from tempest import config
from tempest.scenario import manager
from tempest.test import attr
@@ -88,6 +91,8 @@
"""
+ CONF = config.TempestConfig()
+
@classmethod
def check_preconditions(cls):
super(TestNetworkBasicOps, cls).check_preconditions()
@@ -242,6 +247,8 @@
self.floating_ips[server].append(floating_ip)
@attr(type='smoke')
+ @testtools.skipIf(CONF.service_available.neutron,
+ "Skipped unti bug #1210664 is resolved")
def test_008_check_public_network_connectivity(self):
if not self.floating_ips:
raise self.skipTest('No floating ips have been allocated.')
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 6202e91..8ee740e 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -15,8 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
from tempest.scenario import manager
LOG = logging.getLogger(__name__)
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index d318dd9..0ec3a1d 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -15,8 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
from tempest.scenario import manager
LOG = logging.getLogger(__name__)
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index f21a00b..6e305c1 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -15,10 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import log as logging
-
from tempest.common.utils.data_utils import rand_name
from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest.openstack.common import log as logging
from tempest.scenario import manager
@@ -85,17 +84,21 @@
self.addCleanup(self.compute_client.security_group_rules.delete,
sg_rule.id)
- def _ssh_to_server(self, server):
+ def _ssh_to_server(self, server_or_ip):
+ if isinstance(server_or_ip, basestring):
+ ip = server_or_ip
+ else:
+ network_name_for_ssh = self.config.compute.network_for_ssh
+ ip = server_or_ip.networks[network_name_for_ssh][0]
username = self.config.scenario.ssh_user
- ip = server.networks[self.config.compute.network_for_ssh][0]
linux_client = RemoteClient(ip,
username,
pkey=self.keypair.private_key)
return linux_client.ssh_client
- def _write_timestamp(self, server):
- ssh_client = self._ssh_to_server(server)
+ def _write_timestamp(self, server_or_ip):
+ ssh_client = self._ssh_to_server(server_or_ip)
ssh_client.exec_command('date > /tmp/timestamp; sync')
self.timestamp = ssh_client.exec_command('cat /tmp/timestamp')
@@ -110,11 +113,19 @@
self.assertEquals(snapshot_name, snapshot_image.name)
return image_id
- def _check_timestamp(self, server):
- ssh_client = self._ssh_to_server(server)
+ def _check_timestamp(self, server_or_ip):
+ ssh_client = self._ssh_to_server(server_or_ip)
got_timestamp = ssh_client.exec_command('cat /tmp/timestamp')
self.assertEqual(self.timestamp, got_timestamp)
+ def _create_floating_ip(self):
+ floating_ip = self.compute_client.floating_ips.create()
+ self.addCleanup(floating_ip.delete)
+ return floating_ip
+
+ def _set_floating_ip_to_server(self, server, floating_ip):
+ server.add_floating_ip(floating_ip)
+
def test_snapshot_pattern(self):
# prepare for booting a instance
self._add_keypair()
@@ -122,7 +133,12 @@
# boot a instance and create a timestamp file in it
server = self._boot_image(self.config.compute.image_ref)
- self._write_timestamp(server)
+ if self.config.compute.use_floatingip_for_ssh:
+ fip_for_server = self._create_floating_ip()
+ self._set_floating_ip_to_server(server, fip_for_server)
+ self._write_timestamp(fip_for_server.ip)
+ else:
+ self._write_timestamp(server)
# snapshot the instance
snapshot_image_id = self._create_image(server)
@@ -131,4 +147,10 @@
server_from_snapshot = self._boot_image(snapshot_image_id)
# check the existence of the timestamp file in the second instance
- self._check_timestamp(server_from_snapshot)
+ if self.config.compute.use_floatingip_for_ssh:
+ fip_for_snapshot = self._create_floating_ip()
+ self._set_floating_ip_to_server(server_from_snapshot,
+ fip_for_snapshot)
+ self._check_timestamp(fip_for_snapshot.ip)
+ else:
+ self._check_timestamp(server_from_snapshot)
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
new file mode 100644
index 0000000..4434604
--- /dev/null
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -0,0 +1,272 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+
+from cinderclient import exceptions as cinder_exceptions
+import testtools
+
+from tempest.common.utils.data_utils import rand_name
+from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest import exceptions
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+import tempest.test
+
+LOG = logging.getLogger(__name__)
+
+
+class TestStampPattern(manager.OfficialClientTest):
+ """
+ This test is for snapshotting an instance/volume and attaching the volume
+ created from snapshot to the instance booted from snapshot.
+ The following is the scenario outline:
+ 1. Boot an instance "instance1"
+ 2. Create a volume "volume1"
+ 3. Attach volume1 to instance1
+ 4. Create a filesystem on volume1
+ 5. Mount volume1
+ 6. Create a file which timestamp is written in volume1
+ 7. Unmount volume1
+ 8. Detach volume1 from instance1
+ 9. Get a snapshot "snapshot_from_volume" of volume1
+ 10. Get a snapshot "snapshot_from_instance" of instance1
+ 11. Boot an instance "instance2" from snapshot_from_instance
+ 12. Create a volume "volume2" from snapshot_from_volume
+ 13. Attach volume2 to instance2
+ 14. Check the existence of a file which created at 6. in volume2
+ """
+
+ def _wait_for_server_status(self, server, status):
+ self.status_timeout(self.compute_client.servers,
+ server.id,
+ status)
+
+ def _wait_for_image_status(self, image_id, status):
+ self.status_timeout(self.image_client.images, image_id, status)
+
+ def _wait_for_volume_snapshot_status(self, volume_snapshot, status):
+ self.status_timeout(self.volume_client.volume_snapshots,
+ volume_snapshot.id, status)
+
+ def _boot_image(self, image_id):
+ name = rand_name('scenario-server-')
+ client = self.compute_client
+ flavor_id = self.config.compute.flavor_ref
+ LOG.debug("name:%s, image:%s" % (name, image_id))
+ server = client.servers.create(name=name,
+ image=image_id,
+ flavor=flavor_id,
+ key_name=self.keypair.name)
+ self.addCleanup(self.compute_client.servers.delete, server)
+ self.assertEqual(name, server.name)
+ self._wait_for_server_status(server, 'ACTIVE')
+ server = client.servers.get(server) # getting network information
+ LOG.debug("server:%s" % server)
+ return server
+
+ def _add_keypair(self):
+ name = rand_name('scenario-keypair-')
+ self.keypair = self.compute_client.keypairs.create(name=name)
+ self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
+ self.assertEqual(name, self.keypair.name)
+
+ def _create_floating_ip(self):
+ floating_ip = self.compute_client.floating_ips.create()
+ self.addCleanup(floating_ip.delete)
+ return floating_ip
+
+ def _add_floating_ip(self, server, floating_ip):
+ server.add_floating_ip(floating_ip)
+
+ def _create_security_group_rule(self):
+ sgs = self.compute_client.security_groups.list()
+ for sg in sgs:
+ if sg.name == 'default':
+ secgroup = sg
+
+ ruleset = {
+ # ssh
+ 'ip_protocol': 'tcp',
+ 'from_port': 22,
+ 'to_port': 22,
+ 'cidr': '0.0.0.0/0',
+ 'group_id': None
+ }
+ sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
+ **ruleset)
+ self.addCleanup(self.compute_client.security_group_rules.delete,
+ sg_rule.id)
+
+ def _remote_client_to_server(self, server_or_ip):
+ if isinstance(server_or_ip, basestring):
+ ip = server_or_ip
+ else:
+ network_name_for_ssh = self.config.compute.network_for_ssh
+ ip = server_or_ip.networks[network_name_for_ssh][0]
+ username = self.config.scenario.ssh_user
+ linux_client = RemoteClient(ip,
+ username,
+ pkey=self.keypair.private_key)
+ return linux_client
+
+ def _ssh_to_server(self, server_or_ip):
+ linux_client = self._remote_client_to_server(server_or_ip)
+ return linux_client.ssh_client
+
+ def _create_image(self, server):
+ snapshot_name = rand_name('scenario-snapshot-')
+ create_image_client = self.compute_client.servers.create_image
+ image_id = create_image_client(server, snapshot_name)
+ self.addCleanup(self.image_client.images.delete, image_id)
+ self._wait_for_server_status(server, 'ACTIVE')
+ self._wait_for_image_status(image_id, 'active')
+ snapshot_image = self.image_client.images.get(image_id)
+ self.assertEquals(snapshot_name, snapshot_image.name)
+ return image_id
+
+ def _create_volume_snapshot(self, volume):
+ snapshot_name = rand_name('scenario-snapshot-')
+ volume_snapshots = self.volume_client.volume_snapshots
+ snapshot = volume_snapshots.create(
+ volume.id, display_name=snapshot_name)
+
+ def cleaner():
+ volume_snapshots.delete(snapshot)
+ try:
+ while volume_snapshots.get(snapshot.id):
+ time.sleep(1)
+ except cinder_exceptions.NotFound:
+ pass
+ self.addCleanup(cleaner)
+ self._wait_for_volume_status(volume, 'available')
+ self._wait_for_volume_snapshot_status(snapshot, 'available')
+ self.assertEquals(snapshot_name, snapshot.display_name)
+ return snapshot
+
+ def _wait_for_volume_status(self, volume, status):
+ self.status_timeout(
+ self.volume_client.volumes, volume.id, status)
+
+ def _create_volume(self, snapshot_id=None):
+ name = rand_name('scenario-volume-')
+ LOG.debug("volume display-name:%s" % name)
+ volume = self.volume_client.volumes.create(size=1,
+ display_name=name,
+ snapshot_id=snapshot_id)
+ LOG.debug("volume created:%s" % volume.display_name)
+
+ def cleaner():
+ self._wait_for_volume_status(volume, 'available')
+ self.volume_client.volumes.delete(volume)
+ self.addCleanup(cleaner)
+ self._wait_for_volume_status(volume, 'available')
+ self.assertEqual(name, volume.display_name)
+ return volume
+
+ def _attach_volume(self, server, volume):
+ attach_volume_client = self.compute_client.volumes.create_server_volume
+ attached_volume = attach_volume_client(server.id,
+ volume.id,
+ '/dev/vdb')
+ self.assertEqual(volume.id, attached_volume.id)
+ self._wait_for_volume_status(attached_volume, 'in-use')
+
+ def _detach_volume(self, server, volume):
+ detach_volume_client = self.compute_client.volumes.delete_server_volume
+ detach_volume_client(server.id, volume.id)
+ self._wait_for_volume_status(volume, 'available')
+
+ def _wait_for_volume_availible_on_the_system(self, server_or_ip):
+ ssh = self._remote_client_to_server(server_or_ip)
+ conf = self.config
+
+ def _func():
+ part = ssh.get_partitions()
+ LOG.debug("Partitions:%s" % part)
+ return 'vdb' in part
+
+ if not tempest.test.call_until_true(_func,
+ conf.compute.build_timeout,
+ conf.compute.build_interval):
+ raise exceptions.TimeoutException
+
+ def _create_timestamp(self, server_or_ip):
+ ssh_client = self._ssh_to_server(server_or_ip)
+ ssh_client.exec_command('sudo /usr/sbin/mkfs.ext4 /dev/vdb')
+ ssh_client.exec_command('sudo mount /dev/vdb /mnt')
+ ssh_client.exec_command('sudo sh -c "date > /mnt/timestamp;sync"')
+ self.timestamp = ssh_client.exec_command('sudo cat /mnt/timestamp')
+ ssh_client.exec_command('sudo umount /mnt')
+
+ def _check_timestamp(self, server_or_ip):
+ ssh_client = self._ssh_to_server(server_or_ip)
+ ssh_client.exec_command('sudo mount /dev/vdb /mnt')
+ got_timestamp = ssh_client.exec_command('sudo cat /mnt/timestamp')
+ self.assertEqual(self.timestamp, got_timestamp)
+
+ @testtools.skip("Until Bug #1205344 is fixed")
+ def test_stamp_pattern(self):
+ # prepare for booting a instance
+ self._add_keypair()
+ self._create_security_group_rule()
+
+ # boot an instance and create a timestamp file in it
+ volume = self._create_volume()
+ server = self._boot_image(self.config.compute.image_ref)
+
+ # create and add floating IP to server1
+ if self.config.compute.use_floatingip_for_ssh:
+ floating_ip_for_server = self._create_floating_ip()
+ self._add_floating_ip(server, floating_ip_for_server)
+ ip_for_server = floating_ip_for_server.ip
+ else:
+ ip_for_server = server
+
+ self._attach_volume(server, volume)
+ self._wait_for_volume_availible_on_the_system(ip_for_server)
+ self._create_timestamp(ip_for_server)
+ self._detach_volume(server, volume)
+
+ # snapshot the volume
+ volume_snapshot = self._create_volume_snapshot(volume)
+
+ # snapshot the instance
+ snapshot_image_id = self._create_image(server)
+
+ # create second volume from the snapshot(volume2)
+ volume_from_snapshot = self._create_volume(
+ snapshot_id=volume_snapshot.id)
+
+ # boot second instance from the snapshot(instance2)
+ server_from_snapshot = self._boot_image(snapshot_image_id)
+
+ # create and add floating IP to server_from_snapshot
+ if self.config.compute.use_floatingip_for_ssh:
+ floating_ip_for_snapshot = self._create_floating_ip()
+ self._add_floating_ip(server_from_snapshot,
+ floating_ip_for_snapshot)
+ ip_for_snapshot = floating_ip_for_snapshot.ip
+ else:
+ ip_for_snapshot = server_from_snapshot
+
+ # attach volume2 to instance2
+ self._attach_volume(server_from_snapshot, volume_from_snapshot)
+ self._wait_for_volume_availible_on_the_system(ip_for_snapshot)
+
+ # check the existence of the timestamp file in the volume2
+ self._check_timestamp(ip_for_snapshot)
diff --git a/tempest/scenario/test_volume_snapshot_pattern.py b/tempest/scenario/test_volume_snapshot_pattern.py
new file mode 100644
index 0000000..4d8a400
--- /dev/null
+++ b/tempest/scenario/test_volume_snapshot_pattern.py
@@ -0,0 +1,122 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.openstack.common import log as logging
+
+from tempest.common.utils.data_utils import rand_name
+from tempest.scenario import manager
+
+LOG = logging.getLogger(__name__)
+
+
+class TestVolumeSnapshotPattern(manager.OfficialClientTest):
+
+ """
+ This test case attempts to reproduce the following steps:
+
+ * Create in Cinder some bootable volume importing a Glance image
+ * Boot an instance from the bootable volume
+ * Create a volume snapshot while the instance is running
+ * Boot an additional instance from the new snapshot based volume
+ """
+
+ def _create_volume_from_image(self):
+ img_uuid = self.config.compute.image_ref
+ vol_name = rand_name('volume-origin')
+ vol = self.volume_client.volumes.create(size=1,
+ display_name=vol_name,
+ imageRef=img_uuid)
+ self.set_resource(vol.id, vol)
+ self.status_timeout(self.volume_client.volumes,
+ vol.id,
+ 'available')
+ return vol
+
+ def _boot_instance_from_volume(self, vol_id):
+ # NOTE(gfidente): the img_uuid here is only needed because
+ # the novaclient requires it to be passed as arg
+ img_uuid = self.config.compute.image_ref
+ i_name = rand_name('instance')
+ flavor_id = self.config.compute.flavor_ref
+ # NOTE(gfidente): the syntax for block_device_mapping is
+ # dev_name=id:type:size:delete_on_terminate
+ # where type needs to be "snap" if the server is booted
+ # from a snapshot, size instead can be safely left empty
+ bd_map = {
+ 'vda': vol_id + ':::0'
+ }
+ create_kwargs = {
+ 'block_device_mapping': bd_map
+ }
+ i = self.compute_client.servers.create(name=i_name,
+ image=img_uuid,
+ flavor=flavor_id,
+ **create_kwargs)
+ self.set_resource(i.id, i)
+ self.status_timeout(self.compute_client.servers,
+ i.id,
+ 'ACTIVE')
+ return i
+
+ def _create_snapshot_from_volume(self, vol_id):
+ volume_snapshots = self.volume_client.volume_snapshots
+ snap_name = rand_name('snapshot')
+ snap = volume_snapshots.create(volume_id=vol_id,
+ force=True,
+ display_name=snap_name)
+ self.set_resource(snap.id, snap)
+ self.status_timeout(volume_snapshots,
+ snap.id,
+ 'available')
+ return snap
+
+ def _create_volume_from_snapshot(self, snap_id):
+ vol_name = rand_name('volume')
+ vol = self.volume_client.volumes.create(size=1,
+ display_name=vol_name,
+ snapshot_id=snap_id)
+ self.set_resource(vol.id, vol)
+ self.status_timeout(self.volume_client.volumes,
+ vol.id,
+ 'available')
+ return vol
+
+ def _stop_instances(self, instances):
+ # NOTE(gfidente): two loops so we do not wait for the status twice
+ for i in instances:
+ self.compute_client.servers.stop(i)
+ for i in instances:
+ self.status_timeout(self.compute_client.servers,
+ i.id,
+ 'SHUTOFF')
+
+ def _detach_volumes(self, volumes):
+ # NOTE(gfidente): two loops so we do not wait for the status twice
+ for v in volumes:
+ self.volume_client.volumes.detach(v)
+ for v in volumes:
+ self.status_timeout(self.volume_client.volumes,
+ v.id,
+ 'available')
+
+ def test_volume_snapshot_pattern(self):
+ volume_origin = self._create_volume_from_image()
+ i_origin = self._boot_instance_from_volume(volume_origin.id)
+ snapshot = self._create_snapshot_from_volume(volume_origin.id)
+ volume = self._create_volume_from_snapshot(snapshot.id)
+ i = self._boot_instance_from_volume(volume.id)
+ # NOTE(gfidente): ensure resources are in clean state for
+ # deletion operations to succeed
+ self._stop_instances([i_origin, i])
+ self._detach_volumes([volume_origin, volume])
diff --git a/tempest/services/botoclients.py b/tempest/services/botoclients.py
index 628151a..66fb7af 100644
--- a/tempest/services/botoclients.py
+++ b/tempest/services/botoclients.py
@@ -132,6 +132,7 @@
ALLOWED_METHODS = set(('create_key_pair', 'get_key_pair',
'delete_key_pair', 'import_key_pair',
'get_all_key_pairs',
+ 'get_all_tags',
'create_image', 'get_image',
'register_image', 'deregister_image',
'get_all_images', 'get_image_attribute',
@@ -179,7 +180,7 @@
:return: Returns with the first available zone name
"""
for zone in self.get_all_zones():
- #NOTE(afazekas): zone.region_name was None
+ # NOTE(afazekas): zone.region_name was None
if (zone.state == "available" and
zone.region.name == self.connection_data["region"].name):
return zone.name
diff --git a/tempest/services/compute/json/flavors_client.py b/tempest/services/compute/json/flavors_client.py
index 1b965f3..5f58c43 100644
--- a/tempest/services/compute/json/flavors_client.py
+++ b/tempest/services/compute/json/flavors_client.py
@@ -79,9 +79,9 @@
return self.delete("flavors/%s" % str(flavor_id))
def is_resource_deleted(self, id):
- #Did not use get_flavor_details(id) for verification as it gives
- #200 ok even for deleted id. LP #981263
- #we can remove the loop here and use get by ID when bug gets sortedout
+ # Did not use get_flavor_details(id) for verification as it gives
+ # 200 ok even for deleted id. LP #981263
+ # we can remove the loop here and use get by ID when bug gets sortedout
resp, flavors = self.list_flavors_with_detail()
for flavor in flavors:
if flavor['id'] == id:
diff --git a/tempest/services/compute/json/keypairs_client.py b/tempest/services/compute/json/keypairs_client.py
index 90b2096..a464816 100644
--- a/tempest/services/compute/json/keypairs_client.py
+++ b/tempest/services/compute/json/keypairs_client.py
@@ -30,11 +30,11 @@
def list_keypairs(self):
resp, body = self.get("os-keypairs")
body = json.loads(body)
- #Each returned keypair is embedded within an unnecessary 'keypair'
- #element which is a deviation from other resources like floating-ips,
- #servers, etc. A bug?
- #For now we shall adhere to the spec, but the spec for keypairs
- #is yet to be found
+ # Each returned keypair is embedded within an unnecessary 'keypair'
+ # element which is a deviation from other resources like floating-ips,
+ # servers, etc. A bug?
+ # For now we shall adhere to the spec, but the spec for keypairs
+ # is yet to be found
return resp, body['keypairs']
def get_keypair(self, key_name):
diff --git a/tempest/services/compute/json/services_client.py b/tempest/services/compute/json/services_client.py
index d054f72..4db7596 100644
--- a/tempest/services/compute/json/services_client.py
+++ b/tempest/services/compute/json/services_client.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 NEC Corporation
+# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -16,6 +17,7 @@
# under the License.
import json
+import urllib
from tempest.common.rest_client import RestClient
@@ -27,7 +29,33 @@
auth_url, tenant_name)
self.service = self.config.compute.catalog_type
- def list_services(self):
- resp, body = self.get("os-services")
+ def list_services(self, params=None):
+ url = 'os-services'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
body = json.loads(body)
return resp, body['services']
+
+ def enable_service(self, host_name, binary):
+ """
+ Enable service on a host
+ host_name: Name of host
+ binary: Service binary
+ """
+ post_body = json.dumps({'binary': binary, 'host': host_name})
+ resp, body = self.put('os-services/enable', post_body, self.headers)
+ body = json.loads(body)
+ return resp, body['service']
+
+ def disable_service(self, host_name, binary):
+ """
+ Disable service on a host
+ host_name: Name of host
+ binary: Service binary
+ """
+ post_body = json.dumps({'binary': binary, 'host': host_name})
+ resp, body = self.put('os-services/disable', post_body, self.headers)
+ body = json.loads(body)
+ return resp, body['service']
diff --git a/tempest/services/compute/xml/flavors_client.py b/tempest/services/compute/xml/flavors_client.py
index 2de20f1..3a8986c 100644
--- a/tempest/services/compute/xml/flavors_client.py
+++ b/tempest/services/compute/xml/flavors_client.py
@@ -124,9 +124,9 @@
return self.delete("flavors/%s" % str(flavor_id), self.headers)
def is_resource_deleted(self, id):
- #Did not use get_flavor_details(id) for verification as it gives
- #200 ok even for deleted id. LP #981263
- #we can remove the loop here and use get by ID when bug gets sortedout
+ # Did not use get_flavor_details(id) for verification as it gives
+ # 200 ok even for deleted id. LP #981263
+ # we can remove the loop here and use get by ID when bug gets sortedout
resp, flavors = self.list_flavors_with_detail()
for flavor in flavors:
if flavor['id'] == id:
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index f2cca72..12e7034 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -21,9 +21,9 @@
from lxml import etree
-from tempest.common import log as logging
from tempest.common.rest_client import RestClientXML
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import Text
@@ -437,6 +437,12 @@
def revert_resize(self, server_id, **kwargs):
return self.action(server_id, 'revertResize', None, **kwargs)
+ def stop(self, server_id, **kwargs):
+ return self.action(server_id, 'os-stop', None, **kwargs)
+
+ def start(self, server_id, **kwargs):
+ return self.action(server_id, 'os-start', None, **kwargs)
+
def create_image(self, server_id, name):
return self.action(server_id, 'createImage', None, name=name)
diff --git a/tempest/services/compute/xml/services_client.py b/tempest/services/compute/xml/services_client.py
index ce23403..ac304e2 100644
--- a/tempest/services/compute/xml/services_client.py
+++ b/tempest/services/compute/xml/services_client.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 NEC Corporation
+# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,8 +16,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import urllib
+
from lxml import etree
from tempest.common.rest_client import RestClientXML
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import xml_to_json
@@ -27,8 +32,42 @@
auth_url, tenant_name)
self.service = self.config.compute.catalog_type
- def list_services(self):
- resp, body = self.get("os-services", self.headers)
+ def list_services(self, params=None):
+ url = 'os-services'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url, self.headers)
node = etree.fromstring(body)
body = [xml_to_json(x) for x in node.getchildren()]
return resp, body
+
+ def enable_service(self, host_name, binary):
+ """
+ Enable service on a host
+ host_name: Name of host
+ binary: Service binary
+ """
+ post_body = Element("service")
+ post_body.add_attr('binary', binary)
+ post_body.add_attr('host', host_name)
+
+ resp, body = self.put('os-services/enable', str(Document(post_body)),
+ self.headers)
+ body = xml_to_json(etree.fromstring(body))
+ return resp, body
+
+ def disable_service(self, host_name, binary):
+ """
+ Disable service on a host
+ host_name: Name of host
+ binary: Service binary
+ """
+ post_body = Element("service")
+ post_body.add_attr('binary', binary)
+ post_body.add_attr('host', host_name)
+
+ resp, body = self.put('os-services/disable', str(Document(post_body)),
+ self.headers)
+ body = xml_to_json(etree.fromstring(body))
+ return resp, body
diff --git a/tempest/services/identity/json/identity_client.py b/tempest/services/identity/json/identity_client.py
index a216b55..90e64e7 100644
--- a/tempest/services/identity/json/identity_client.py
+++ b/tempest/services/identity/json/identity_client.py
@@ -1,3 +1,17 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
import httplib2
import json
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index adbdc83..0a56e84 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -63,11 +63,12 @@
def update_user(self, user_id, name, **kwargs):
"""Updates a user."""
- email = kwargs.get('email', None)
- en = kwargs.get('enabled', True)
- project_id = kwargs.get('project_id', None)
- description = kwargs.get('description', None)
- domain_id = kwargs.get('domain_id', 'default')
+ resp, body = self.get_user(user_id)
+ email = kwargs.get('email', body['email'])
+ en = kwargs.get('enabled', body['enabled'])
+ project_id = kwargs.get('project_id', body['project_id'])
+ description = kwargs.get('description', body['description'])
+ domain_id = kwargs.get('domain_id', body['domain_id'])
post_body = {
'name': name,
'email': email,
@@ -149,6 +150,17 @@
body = json.loads(body)
return resp, body['role']
+ def update_role(self, name, role_id):
+ """Create a Role."""
+ post_body = {
+ 'name': name
+ }
+ post_body = json.dumps({'role': post_body})
+ resp, body = self.patch('roles/%s' % str(role_id), post_body,
+ self.headers)
+ body = json.loads(body)
+ return resp, body['role']
+
def delete_role(self, role_id):
"""Delete a role."""
resp, body = self.delete('roles/%s' % str(role_id))
@@ -208,3 +220,162 @@
resp, body = self.get('domains/%s' % domain_id)
body = json.loads(body)
return resp, body['domain']
+
+ def get_token(self, resp_token):
+ """Get token details."""
+ headers = {'X-Subject-Token': resp_token}
+ resp, body = self.get("auth/tokens", headers=headers)
+ body = json.loads(body)
+ return resp, body['token']
+
+ def delete_token(self, resp_token):
+ """Deletes token."""
+ headers = {'X-Subject-Token': resp_token}
+ resp, body = self.delete("auth/tokens", headers=headers)
+ return resp, body
+
+ def create_group(self, name, **kwargs):
+ """Creates a group."""
+ description = kwargs.get('description', None)
+ domain_id = kwargs.get('domain_id', 'default')
+ project_id = kwargs.get('project_id', None)
+ post_body = {
+ 'description': description,
+ 'domain_id': domain_id,
+ 'project_id': project_id,
+ 'name': name
+ }
+ post_body = json.dumps({'group': post_body})
+ resp, body = self.post('groups', post_body, self.headers)
+ body = json.loads(body)
+ return resp, body['group']
+
+ def delete_group(self, group_id):
+ """Delete a group."""
+ resp, body = self.delete('groups/%s' % str(group_id))
+ return resp, body
+
+ def assign_user_role_on_project(self, project_id, user_id, role_id):
+ """Add roles to a user on a project."""
+ resp, body = self.put('projects/%s/users/%s/roles/%s' %
+ (project_id, user_id, role_id), None,
+ self.headers)
+ return resp, body
+
+ def assign_user_role_on_domain(self, domain_id, user_id, role_id):
+ """Add roles to a user on a domain."""
+ resp, body = self.put('domains/%s/users/%s/roles/%s' %
+ (domain_id, user_id, role_id), None,
+ self.headers)
+ return resp, body
+
+ def list_user_roles_on_project(self, project_id, user_id):
+ """list roles of a user on a project."""
+ resp, body = self.get('projects/%s/users/%s/roles' %
+ (project_id, user_id))
+ body = json.loads(body)
+ return resp, body['roles']
+
+ def list_user_roles_on_domain(self, domain_id, user_id):
+ """list roles of a user on a domain."""
+ resp, body = self.get('domains/%s/users/%s/roles' %
+ (domain_id, user_id))
+ body = json.loads(body)
+ return resp, body['roles']
+
+ def revoke_role_from_user_on_project(self, project_id, user_id, role_id):
+ """Delete role of a user on a project."""
+ resp, body = self.delete('projects/%s/users/%s/roles/%s' %
+ (project_id, user_id, role_id))
+ return resp, body
+
+ def revoke_role_from_user_on_domain(self, domain_id, user_id, role_id):
+ """Delete role of a user on a domain."""
+ resp, body = self.delete('domains/%s/users/%s/roles/%s' %
+ (domain_id, user_id, role_id))
+ return resp, body
+
+ def assign_group_role_on_project(self, project_id, group_id, role_id):
+ """Add roles to a user on a project."""
+ resp, body = self.put('projects/%s/groups/%s/roles/%s' %
+ (project_id, group_id, role_id), None,
+ self.headers)
+ return resp, body
+
+ def assign_group_role_on_domain(self, domain_id, group_id, role_id):
+ """Add roles to a user on a domain."""
+ resp, body = self.put('domains/%s/groups/%s/roles/%s' %
+ (domain_id, group_id, role_id), None,
+ self.headers)
+ return resp, body
+
+ def list_group_roles_on_project(self, project_id, group_id):
+ """list roles of a user on a project."""
+ resp, body = self.get('projects/%s/groups/%s/roles' %
+ (project_id, group_id))
+ body = json.loads(body)
+ return resp, body['roles']
+
+ def list_group_roles_on_domain(self, domain_id, group_id):
+ """list roles of a user on a domain."""
+ resp, body = self.get('domains/%s/groups/%s/roles' %
+ (domain_id, group_id))
+ body = json.loads(body)
+ return resp, body['roles']
+
+ def revoke_role_from_group_on_project(self, project_id, group_id, role_id):
+ """Delete role of a user on a project."""
+ resp, body = self.delete('projects/%s/groups/%s/roles/%s' %
+ (project_id, group_id, role_id))
+ return resp, body
+
+ def revoke_role_from_group_on_domain(self, domain_id, group_id, role_id):
+ """Delete role of a user on a domain."""
+ resp, body = self.delete('domains/%s/groups/%s/roles/%s' %
+ (domain_id, group_id, role_id))
+ return resp, body
+
+
+class V3TokenClientJSON(RestClient):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(V3TokenClientJSON, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.identity.catalog_type
+ self.endpoint_url = 'adminURL'
+
+ auth_url = config.identity.uri
+
+ if 'tokens' not in auth_url:
+ auth_url = auth_url.rstrip('/') + '/tokens'
+
+ self.auth_url = auth_url
+ self.config = config
+
+ def auth(self, user_id, password):
+ creds = {
+ 'auth': {
+ 'identity': {
+ 'methods': ['password'],
+ 'password': {
+ 'user': {
+ 'id': user_id,
+ 'password': password
+ }
+ }
+ }
+ }
+ }
+ headers = {'Content-Type': 'application/json'}
+ body = json.dumps(creds)
+ resp, body = self.post("auth/tokens", headers=headers, body=body)
+ return resp, body
+
+ def request(self, method, url, headers=None, body=None, wait=None):
+ """Overriding the existing HTTP request in super class rest_client."""
+ self._set_auth()
+ self.base_url = self.base_url.replace(urlparse(self.base_url).path,
+ "/v3")
+ return super(V3TokenClientJSON, self).request(method, url,
+ headers=headers,
+ body=body)
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 708ee28..03e06dc 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -22,9 +22,9 @@
from tempest.common.rest_client import RestClientXML
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import Text
from tempest.services.compute.xml.common import xml_to_json
-
XMLNS = "http://docs.openstack.org/identity/api/v3"
@@ -52,6 +52,14 @@
array.append(xml_to_json(child))
return array
+ def _parse_roles(self, node):
+ array = []
+ for child in node.getchildren():
+ tag_list = child.tag.split('}', 1)
+ if tag_list[1] == "role":
+ array.append(xml_to_json(child))
+ return array
+
def _parse_array(self, node):
array = []
for child in node.getchildren():
@@ -95,11 +103,12 @@
def update_user(self, user_id, name, **kwargs):
"""Updates a user."""
- email = kwargs.get('email', None)
- en = kwargs.get('enabled', True)
- project_id = kwargs.get('project_id', None)
- domain_id = kwargs.get('domain_id', 'default')
- description = kwargs.get('description', None)
+ resp, body = self.get_user(user_id)
+ email = kwargs.get('email', body['email'])
+ en = kwargs.get('enabled', body['enabled'])
+ project_id = kwargs.get('project_id', body['project_id'])
+ description = kwargs.get('description', body['description'])
+ domain_id = kwargs.get('domain_id', body['domain_id'])
update_user = Element("user",
xmlns=XMLNS,
name=name,
@@ -182,6 +191,17 @@
body = self._parse_body(etree.fromstring(body))
return resp, body
+ def update_role(self, name, role_id):
+ """Updates a Role."""
+ post_body = Element("role",
+ xmlns=XMLNS,
+ name=name)
+ resp, body = self.patch('roles/%s' % str(role_id),
+ str(Document(post_body)),
+ self.headers)
+ body = self._parse_body(etree.fromstring(body))
+ return resp, body
+
def delete_role(self, role_id):
"""Delete a role."""
resp, body = self.delete('roles/%s' % str(role_id),
@@ -241,3 +261,166 @@
resp, body = self.get('domains/%s' % domain_id, self.headers)
body = self._parse_body(etree.fromstring(body))
return resp, body
+
+ def get_token(self, resp_token):
+ """GET a Token Details."""
+ headers = {'Content-Type': 'application/xml',
+ 'Accept': 'application/xml',
+ 'X-Subject-Token': resp_token}
+ resp, body = self.get("auth/tokens", headers=headers)
+ body = self._parse_body(etree.fromstring(body))
+ return resp, body
+
+ def delete_token(self, resp_token):
+ """Delete a Given Token."""
+ headers = {'X-Subject-Token': resp_token}
+ resp, body = self.delete("auth/tokens", headers=headers)
+ return resp, body
+
+ def create_group(self, name, **kwargs):
+ """Creates a group."""
+ description = kwargs.get('description', None)
+ domain_id = kwargs.get('domain_id', 'default')
+ project_id = kwargs.get('project_id', None)
+ post_body = Element("group",
+ xmlns=XMLNS,
+ name=name,
+ description=description,
+ domain_id=domain_id,
+ project_id=project_id)
+ resp, body = self.post('groups', str(Document(post_body)),
+ self.headers)
+ body = self._parse_body(etree.fromstring(body))
+ return resp, body
+
+ def delete_group(self, group_id):
+ """Delete a group."""
+ resp, body = self.delete('groups/%s' % group_id, self.headers)
+ return resp, body
+
+ def assign_user_role_on_project(self, project_id, user_id, role_id):
+ """Add roles to a user on a project."""
+ resp, body = self.put('projects/%s/users/%s/roles/%s' %
+ (project_id, user_id, role_id), '',
+ self.headers)
+ return resp, body
+
+ def assign_user_role_on_domain(self, domain_id, user_id, role_id):
+ """Add roles to a user on a domain."""
+ resp, body = self.put('domains/%s/users/%s/roles/%s' %
+ (domain_id, user_id, role_id), '',
+ self.headers)
+ return resp, body
+
+ def list_user_roles_on_project(self, project_id, user_id):
+ """list roles of a user on a project."""
+ resp, body = self.get('projects/%s/users/%s/roles' %
+ (project_id, user_id), self.headers)
+ body = self._parse_roles(etree.fromstring(body))
+ return resp, body
+
+ def list_user_roles_on_domain(self, domain_id, user_id):
+ """list roles of a user on a domain."""
+ resp, body = self.get('domains/%s/users/%s/roles' %
+ (domain_id, user_id), self.headers)
+ body = self._parse_roles(etree.fromstring(body))
+ return resp, body
+
+ def revoke_role_from_user_on_project(self, project_id, user_id, role_id):
+ """Delete role of a user on a project."""
+ resp, body = self.delete('projects/%s/users/%s/roles/%s' %
+ (project_id, user_id, role_id), self.headers)
+ return resp, body
+
+ def revoke_role_from_user_on_domain(self, domain_id, user_id, role_id):
+ """Delete role of a user on a domain."""
+ resp, body = self.delete('domains/%s/users/%s/roles/%s' %
+ (domain_id, user_id, role_id), self.headers)
+ return resp, body
+
+ def assign_group_role_on_project(self, project_id, group_id, role_id):
+ """Add roles to a user on a project."""
+ resp, body = self.put('projects/%s/groups/%s/roles/%s' %
+ (project_id, group_id, role_id), '',
+ self.headers)
+ return resp, body
+
+ def assign_group_role_on_domain(self, domain_id, group_id, role_id):
+ """Add roles to a user on a domain."""
+ resp, body = self.put('domains/%s/groups/%s/roles/%s' %
+ (domain_id, group_id, role_id), '',
+ self.headers)
+ return resp, body
+
+ def list_group_roles_on_project(self, project_id, group_id):
+ """list roles of a user on a project."""
+ resp, body = self.get('projects/%s/groups/%s/roles' %
+ (project_id, group_id), self.headers)
+ body = self._parse_roles(etree.fromstring(body))
+ return resp, body
+
+ def list_group_roles_on_domain(self, domain_id, group_id):
+ """list roles of a user on a domain."""
+ resp, body = self.get('domains/%s/groups/%s/roles' %
+ (domain_id, group_id), self.headers)
+ body = self._parse_roles(etree.fromstring(body))
+ return resp, body
+
+ def revoke_role_from_group_on_project(self, project_id, group_id, role_id):
+ """Delete role of a user on a project."""
+ resp, body = self.delete('projects/%s/groups/%s/roles/%s' %
+ (project_id, group_id, role_id), self.headers)
+ return resp, body
+
+ def revoke_role_from_group_on_domain(self, domain_id, group_id, role_id):
+ """Delete role of a user on a domain."""
+ resp, body = self.delete('domains/%s/groups/%s/roles/%s' %
+ (domain_id, group_id, role_id), self.headers)
+ return resp, body
+
+
+class V3TokenClientXML(RestClientXML):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(V3TokenClientXML, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.identity.catalog_type
+ self.endpoint_url = 'adminURL'
+
+ auth_url = config.identity.uri
+
+ if 'tokens' not in auth_url:
+ auth_url = auth_url.rstrip('/') + '/tokens'
+
+ self.auth_url = auth_url
+ self.config = config
+
+ def auth(self, user_id, password):
+ user = Element('user',
+ id=user_id,
+ password=password)
+ password = Element('password')
+ password.append(user)
+
+ method = Element('method')
+ method.append(Text('password'))
+ methods = Element('methods')
+ methods.append(method)
+ identity = Element('identity')
+ identity.append(methods)
+ identity.append(password)
+ auth = Element('auth')
+ auth.append(identity)
+ headers = {'Content-Type': 'application/xml'}
+ resp, body = self.post("auth/tokens", headers=headers,
+ body=str(Document(auth)))
+ return resp, body
+
+ def request(self, method, url, headers=None, body=None, wait=None):
+ """Overriding the existing HTTP request in super class rest_client."""
+ self._set_auth()
+ self.base_url = self.base_url.replace(urlparse(self.base_url).path,
+ "/v3")
+ return super(V3TokenClientXML, self).request(method, url,
+ headers=headers,
+ body=body)
diff --git a/tempest/services/image/v1/json/image_client.py b/tempest/services/image/v1/json/image_client.py
index dac77a2..1921d78 100644
--- a/tempest/services/image/v1/json/image_client.py
+++ b/tempest/services/image/v1/json/image_client.py
@@ -23,9 +23,9 @@
import urllib
from tempest.common import glance_http
-from tempest.common import log as logging
from tempest.common.rest_client import RestClient
from tempest import exceptions
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -247,13 +247,13 @@
data = json.loads(data)
return resp, data
- #NOTE(afazekas): just for the wait function
+ # NOTE(afazekas): just for the wait function
def _get_image_status(self, image_id):
resp, meta = self.get_image_meta(image_id)
status = meta['status']
return status
- #NOTE(afazkas): Wait reinvented again. It is not in the correct layer
+ # NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_image_status(self, image_id, status):
"""Waits for a Image to reach a given status."""
start_time = time.time()
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index c4fe6b1..2c808a9 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -1,4 +1,19 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
import json
+
from tempest.common.rest_client import RestClient
@@ -8,13 +23,11 @@
Tempest REST client for Neutron. Uses v2 of the Neutron API, since the
V1 API has been removed from the code base.
- Implements the following operations for each one of the basic Neutron
+ Implements create, delete, list and show for the basic Neutron
abstractions (networks, sub-networks and ports):
- create
- delete
- list
- show
+ It also implements list, show, update and reset for OpenStack Networking
+ quotas
"""
def __init__(self, config, username, password, auth_url, tenant_name=None):
@@ -113,3 +126,64 @@
resp, body = self.get(uri, self.headers)
body = json.loads(body)
return resp, body
+
+ def update_quotas(self, tenant_id, **kwargs):
+ put_body = {'quota': kwargs}
+ body = json.dumps(put_body)
+ uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
+ resp, body = self.put(uri, body, self.headers)
+ body = json.loads(body)
+ return resp, body['quota']
+
+ def show_quotas(self, tenant_id):
+ uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body['quota']
+
+ def reset_quotas(self, tenant_id):
+ uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def list_quotas(self):
+ uri = '%s/quotas' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body['quotas']
+
+ def update_subnet(self, subnet_id, new_name):
+ put_body = {
+ 'subnet': {
+ 'name': new_name,
+ }
+ }
+ body = json.dumps(put_body)
+ uri = '%s/subnets/%s' % (self.uri_prefix, subnet_id)
+ resp, body = self.put(uri, body=body, headers=self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def update_port(self, port_id, new_name):
+ put_body = {
+ 'port': {
+ 'name': new_name,
+ }
+ }
+ body = json.dumps(put_body)
+ uri = '%s/ports/%s' % (self.uri_prefix, port_id)
+ resp, body = self.put(uri, body=body, headers=self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def update_network(self, network_id, new_name):
+ put_body = {
+ "network": {
+ "name": new_name,
+ }
+ }
+ body = json.dumps(put_body)
+ uri = '%s/networks/%s' % (self.uri_prefix, network_id)
+ resp, body = self.put(uri, body=body, headers=self.headers)
+ body = json.loads(body)
+ return resp, body
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index a71a287..8defbbb 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -84,10 +84,13 @@
DEFAULT: Python-List returned in response body
"""
- url = '?format=%s' % self.format
if params:
- url += '&%s' + urllib.urlencode(params)
+ if 'format' not in params:
+ params['format'] = self.format
+ else:
+ params = {'format': self.format}
+ url = '?' + urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body
@@ -99,7 +102,7 @@
super(AccountClientCustomizedHeader, self).__init__(config, username,
password, auth_url,
tenant_name)
- #Overwrites json-specific header encoding in RestClient
+ # Overwrites json-specific header encoding in RestClient
self.service = self.config.object_storage.catalog_type
self.format = 'json'
diff --git a/tempest/services/object_storage/container_client.py b/tempest/services/object_storage/container_client.py
index 93477fa..dd5f3ec 100644
--- a/tempest/services/object_storage/container_client.py
+++ b/tempest/services/object_storage/container_client.py
@@ -26,7 +26,7 @@
super(ContainerClient, self).__init__(config, username, password,
auth_url, tenant_name)
- #Overwrites json-specific header encoding in RestClient
+ # Overwrites json-specific header encoding in RestClient
self.headers = {}
self.service = self.config.object_storage.catalog_type
self.format = 'json'
@@ -94,8 +94,8 @@
item count is beyond 10,000 item listing limit.
Does not require any paramaters aside from container name.
"""
- #TODO(dwalleck): Rewite using json format to avoid newlines at end of
- #obj names. Set limit to API limit - 1 (max returned items = 9999)
+ # TODO(dwalleck): Rewite using json format to avoid newlines at end of
+ # obj names. Set limit to API limit - 1 (max returned items = 9999)
limit = 9999
if params is not None:
if 'limit' in params:
@@ -114,16 +114,16 @@
if len(objlist) >= limit:
- #Increment marker
+ # Increment marker
marker = objlist[len(objlist) - 1]
- #Get the next chunk of the list
+ # Get the next chunk of the list
objlist.extend(_list_all_container_objects(container,
params={'marker': marker,
'limit': limit}))
return objlist
else:
- #Return final, complete list
+ # Return final, complete list
return objlist"""
def list_container_contents(self, container, params=None):
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index c894612..181838e 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -155,7 +155,7 @@
super(ObjectClientCustomizedHeader, self).__init__(config, username,
password, auth_url,
tenant_name)
- #Overwrites json-specific header encoding in RestClient
+ # Overwrites json-specific header encoding in RestClient
self.service = self.config.object_storage.catalog_type
self.format = 'json'
diff --git a/tempest/services/volume/json/snapshots_client.py b/tempest/services/volume/json/snapshots_client.py
index 17f6cba..ce2da90 100644
--- a/tempest/services/volume/json/snapshots_client.py
+++ b/tempest/services/volume/json/snapshots_client.py
@@ -16,9 +16,9 @@
import time
import urllib
-from tempest.common import log as logging
from tempest.common.rest_client import RestClient
from tempest import exceptions
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -76,11 +76,11 @@
body = json.loads(body)
return resp, body['snapshot']
- #NOTE(afazekas): just for the wait function
+ # NOTE(afazekas): just for the wait function
def _get_snapshot_status(self, snapshot_id):
resp, body = self.get_snapshot(snapshot_id)
status = body['status']
- #NOTE(afazekas): snapshot can reach an "error"
+ # NOTE(afazekas): snapshot can reach an "error"
# state in a "normal" lifecycle
if (status == 'error'):
raise exceptions.SnapshotBuildErrorException(
@@ -88,7 +88,7 @@
return status
- #NOTE(afazkas): Wait reinvented again. It is not in the correct layer
+ # NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_snapshot_status(self, snapshot_id, status):
"""Waits for a Snapshot to reach a given status."""
start_time = time.time()
diff --git a/tempest/services/volume/xml/snapshots_client.py b/tempest/services/volume/xml/snapshots_client.py
index b35c43e..51c46da 100644
--- a/tempest/services/volume/xml/snapshots_client.py
+++ b/tempest/services/volume/xml/snapshots_client.py
@@ -17,9 +17,9 @@
from lxml import etree
-from tempest.common import log as logging
from tempest.common.rest_client import RestClientXML
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import xml_to_json
@@ -81,7 +81,7 @@
display_name: Optional snapshot Name.
display_description: User friendly snapshot description.
"""
- #NOTE(afazekas): it should use the volume namaspace
+ # NOTE(afazekas): it should use the volume namaspace
snapshot = Element("snapshot", xmlns=XMLNS_11, volume_id=volume_id)
for key, value in kwargs.items():
snapshot.add_attr(key, value)
@@ -90,11 +90,11 @@
body = xml_to_json(etree.fromstring(body))
return resp, body
- #NOTE(afazekas): just for the wait function
+ # NOTE(afazekas): just for the wait function
def _get_snapshot_status(self, snapshot_id):
resp, body = self.get_snapshot(snapshot_id)
status = body['status']
- #NOTE(afazekas): snapshot can reach an "error"
+ # NOTE(afazekas): snapshot can reach an "error"
# state in a "normal" lifecycle
if (status == 'error'):
raise exceptions.SnapshotBuildErrorException(
@@ -102,7 +102,7 @@
return status
- #NOTE(afazkas): Wait reinvented again. It is not in the correct layer
+ # NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_snapshot_status(self, snapshot_id, status):
"""Waits for a Snapshot to reach a given status."""
start_time = time.time()
diff --git a/tempest/services/volume/xml/volumes_client.py b/tempest/services/volume/xml/volumes_client.py
index 8eda26b..eaa3ae0 100644
--- a/tempest/services/volume/xml/volumes_client.py
+++ b/tempest/services/volume/xml/volumes_client.py
@@ -103,7 +103,7 @@
:param imageRef: When specified the volume is created from this
image
"""
- #NOTE(afazekas): it should use a volume namespace
+ # NOTE(afazekas): it should use a volume namespace
volume = Element("volume", xmlns=XMLNS_11, size=size)
if 'metadata' in kwargs:
diff --git a/tempest/stress/README.rst b/tempest/stress/README.rst
index 661763c..7c180f6 100644
--- a/tempest/stress/README.rst
+++ b/tempest/stress/README.rst
@@ -23,14 +23,15 @@
target_controller = "hostname or ip of controller node (for nova-manage)
log_check_interval = "time between checking logs for errors (default 60s)"
-
+To activate logging on your console please make sure that you activate `use_stderr`
+in tempest.conf or use the default `logging.conf.sample` file.
Running the sample test
-----------------------
To test installation, do the following (from the tempest/stress directory):
- ./run_stress.py etc/sample-test.json -d 30
+ ./run_stress.py etc/server-create-destroy-test.json -d 30
This sample test tries to create a few VMs and kill a few VMs.
diff --git a/tempest/stress/actions/create_destroy_server.py b/tempest/stress/actions/create_destroy_server.py
deleted file mode 100644
index 44b149f..0000000
--- a/tempest/stress/actions/create_destroy_server.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2013 Quanta Research Cambridge, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from tempest.common.utils.data_utils import rand_name
-
-
-def create_destroy(manager, logger):
- image = manager.config.compute.image_ref
- flavor = manager.config.compute.flavor_ref
- while True:
- name = rand_name("instance")
- logger.info("creating %s" % name)
- resp, server = manager.servers_client.create_server(
- name, image, flavor)
- server_id = server['id']
- assert(resp.status == 202)
- manager.servers_client.wait_for_server_status(server_id, 'ACTIVE')
- logger.info("created %s" % server_id)
- logger.info("deleting %s" % name)
- resp, _ = manager.servers_client.delete_server(server_id)
- assert(resp.status == 204)
- manager.servers_client.wait_for_server_termination(server_id)
- logger.info("deleted %s" % server_id)
diff --git a/tempest/stress/actions/server_create_destroy.py b/tempest/stress/actions/server_create_destroy.py
new file mode 100644
index 0000000..1a1e30b
--- /dev/null
+++ b/tempest/stress/actions/server_create_destroy.py
@@ -0,0 +1,39 @@
+# Copyright 2013 Quanta Research Cambridge, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.common.utils.data_utils import rand_name
+import tempest.stress.stressaction as stressaction
+
+
+class ServerCreateDestroyTest(stressaction.StressAction):
+
+ def setUp(self, **kwargs):
+ self.image = self.manager.config.compute.image_ref
+ self.flavor = self.manager.config.compute.flavor_ref
+
+ def run(self):
+ name = rand_name("instance")
+ self.logger.info("creating %s" % name)
+ resp, server = self.manager.servers_client.create_server(
+ name, self.image, self.flavor)
+ server_id = server['id']
+ assert(resp.status == 202)
+ self.manager.servers_client.wait_for_server_status(server_id,
+ 'ACTIVE')
+ self.logger.info("created %s" % server_id)
+ self.logger.info("deleting %s" % name)
+ resp, _ = self.manager.servers_client.delete_server(server_id)
+ assert(resp.status == 204)
+ self.manager.servers_client.wait_for_server_termination(server_id)
+ self.logger.info("deleted %s" % server_id)
diff --git a/tempest/stress/actions/volume_attach_delete.py b/tempest/stress/actions/volume_attach_delete.py
new file mode 100644
index 0000000..a7b872f
--- /dev/null
+++ b/tempest/stress/actions/volume_attach_delete.py
@@ -0,0 +1,70 @@
+# (c) 2013 Deutsche Telekom AG
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.common.utils.data_utils import rand_name
+import tempest.stress.stressaction as stressaction
+
+
+class VolumeAttachDeleteTest(stressaction.StressAction):
+
+ def setUp(self, **kwargs):
+ self.image = self.manager.config.compute.image_ref
+ self.flavor = self.manager.config.compute.flavor_ref
+
+ def run(self):
+ # Step 1: create volume
+ name = rand_name("volume")
+ self.logger.info("creating volume: %s" % name)
+ resp, volume = self.manager.volumes_client.create_volume(size=1,
+ display_name=
+ name)
+ assert(resp.status == 200)
+ self.manager.volumes_client.wait_for_volume_status(volume['id'],
+ 'available')
+ self.logger.info("created volume: %s" % volume['id'])
+
+ # Step 2: create vm instance
+ vm_name = rand_name("instance")
+ self.logger.info("creating vm: %s" % vm_name)
+ resp, server = self.manager.servers_client.create_server(
+ vm_name, self.image, self.flavor)
+ server_id = server['id']
+ assert(resp.status == 202)
+ self.manager.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+ self.logger.info("created vm %s" % server_id)
+
+ # Step 3: attach volume to vm
+ self.logger.info("attach volume (%s) to vm %s" %
+ (volume['id'], server_id))
+ resp, body = self.manager.servers_client.attach_volume(server_id,
+ volume['id'],
+ '/dev/vdc')
+ assert(resp.status == 200)
+ self.manager.volumes_client.wait_for_volume_status(volume['id'],
+ 'in-use')
+ self.logger.info("volume (%s) attached to vm %s" %
+ (volume['id'], server_id))
+
+ # Step 4: delete vm
+ self.logger.info("deleting vm: %s" % vm_name)
+ resp, _ = self.manager.servers_client.delete_server(server_id)
+ assert(resp.status == 204)
+ self.manager.servers_client.wait_for_server_termination(server_id)
+ self.logger.info("deleted vm: %s" % server_id)
+
+ # Step 5: delete volume
+ self.logger.info("deleting volume: %s" % volume['id'])
+ resp, _ = self.manager.volumes_client.delete_volume(volume['id'])
+ assert(resp.status == 202)
+ self.manager.volumes_client.wait_for_resource_deletion(volume['id'])
+ self.logger.info("deleted volume: %s" % volume['id'])
diff --git a/tempest/stress/actions/volume_create_delete.py b/tempest/stress/actions/volume_create_delete.py
index e0c95b5..e29d9c4 100644
--- a/tempest/stress/actions/volume_create_delete.py
+++ b/tempest/stress/actions/volume_create_delete.py
@@ -11,20 +11,23 @@
# limitations under the License.
from tempest.common.utils.data_utils import rand_name
+import tempest.stress.stressaction as stressaction
-def create_delete(manager, logger):
- while True:
+class VolumeCreateDeleteTest(stressaction.StressAction):
+
+ def run(self):
name = rand_name("volume")
- logger.info("creating %s" % name)
- resp, volume = manager.volumes_client.create_volume(size=1,
- display_name=name)
+ self.logger.info("creating %s" % name)
+ volumes_client = self.manager.volumes_client
+ resp, volume = volumes_client.create_volume(size=1,
+ display_name=name)
assert(resp.status == 200)
- manager.volumes_client.wait_for_volume_status(volume['id'],
- 'available')
- logger.info("created %s" % volume['id'])
- logger.info("deleting %s" % name)
- resp, _ = manager.volumes_client.delete_volume(volume['id'])
+ vol_id = volume['id']
+ volumes_client.wait_for_volume_status(vol_id, 'available')
+ self.logger.info("created %s" % volume['id'])
+ self.logger.info("deleting %s" % name)
+ resp, _ = volumes_client.delete_volume(vol_id)
assert(resp.status == 202)
- manager.volumes_client.wait_for_resource_deletion(volume['id'])
- logger.info("deleted %s" % volume['id'])
+ volumes_client.wait_for_resource_deletion(vol_id)
+ self.logger.info("deleted %s" % vol_id)
diff --git a/tempest/stress/cleanup.py b/tempest/stress/cleanup.py
index 3b1c871..1bd9485 100644
--- a/tempest/stress/cleanup.py
+++ b/tempest/stress/cleanup.py
@@ -17,12 +17,16 @@
# limitations under the License.
from tempest import clients
+from tempest.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
def cleanup():
admin_manager = clients.AdminManager()
_, body = admin_manager.servers_client.list_servers({"all_tenants": True})
+ LOG.info("Cleanup::remove %s servers" % len(body['servers']))
for s in body['servers']:
try:
admin_manager.servers_client.delete_server(s['id'])
@@ -36,6 +40,7 @@
pass
_, keypairs = admin_manager.keypairs_client.list_keypairs()
+ LOG.info("Cleanup::remove %s keypairs" % len(keypairs))
for k in keypairs:
try:
admin_manager.keypairs_client.delete_keypair(k['name'])
@@ -43,6 +48,7 @@
pass
_, floating_ips = admin_manager.floating_ips_client.list_floating_ips()
+ LOG.info("Cleanup::remove %s floating ips" % len(floating_ips))
for f in floating_ips:
try:
admin_manager.floating_ips_client.delete_floating_ip(f['id'])
@@ -50,18 +56,43 @@
pass
_, users = admin_manager.identity_client.get_users()
+ LOG.info("Cleanup::remove %s users" % len(users))
for user in users:
if user['name'].startswith("stress_user"):
admin_manager.identity_client.delete_user(user['id'])
_, tenants = admin_manager.identity_client.list_tenants()
+ LOG.info("Cleanup::remove %s tenants" % len(tenants))
for tenant in tenants:
if tenant['name'].startswith("stress_tenant"):
admin_manager.identity_client.delete_tenant(tenant['id'])
+ # We have to delete snapshots first or
+ # volume deletion may block
+
+ _, snaps = admin_manager.snapshots_client.\
+ list_snapshots({"all_tenants": True})
+ LOG.info("Cleanup::remove %s snapshots" % len(snaps))
+ for v in snaps:
+ try:
+ admin_manager.snapshots_client.\
+ wait_for_snapshot_status(v['id'], 'available')
+ admin_manager.snapshots_client.delete_snapshot(v['id'])
+ except Exception:
+ pass
+
+ for v in snaps:
+ try:
+ admin_manager.snapshots_client.wait_for_resource_deletion(v['id'])
+ except Exception:
+ pass
+
_, vols = admin_manager.volumes_client.list_volumes({"all_tenants": True})
+ LOG.info("Cleanup::remove %s volumes" % len(vols))
for v in vols:
try:
+ admin_manager.volumes_client.\
+ wait_for_volume_status(v['id'], 'available')
admin_manager.volumes_client.delete_volume(v['id'])
except Exception:
pass
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index 51f159d..efc57a9 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -12,39 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import importlib
-import logging
import multiprocessing
+import signal
import time
from tempest import clients
from tempest.common import ssh
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
+from tempest.openstack.common import importutils
+from tempest.openstack.common import log as logging
from tempest.stress import cleanup
admin_manager = clients.AdminManager()
-# setup logging to file
-logging.basicConfig(
- format='%(asctime)s %(process)d %(name)-20s %(levelname)-8s %(message)s',
- datefmt='%m-%d %H:%M:%S',
- filename="stress.debug.log",
- filemode="w",
- level=logging.DEBUG,
-)
-
-# define a Handler which writes INFO messages or higher to the sys.stdout
-_console = logging.StreamHandler()
-_console.setLevel(logging.INFO)
-# set a format which is simpler for console use
-format_str = '%(asctime)s %(process)d %(name)-20s: %(levelname)-8s %(message)s'
-_formatter = logging.Formatter(format_str)
-# tell the handler to use this format
-_console.setFormatter(_formatter)
-# add the handler to the root logger
-logger = logging.getLogger('tempest.stress')
-logger.addHandler(_console)
+LOG = logging.getLogger(__name__)
+processes = []
def do_ssh(command, host):
@@ -88,20 +71,34 @@
if not errors:
return None
if len(errors) > 0:
- logger.error('%s: %s' % (node, errors))
+ LOG.error('%s: %s' % (node, errors))
return errors
return None
-def get_action_function(path):
- (module_part, _, function) = path.rpartition('.')
- return getattr(importlib.import_module(module_part), function)
+def sigchld_handler(signal, frame):
+ """
+ Signal handler (only active if stop_on_error is True).
+ """
+ terminate_all_processes()
-def stress_openstack(tests, duration):
+def terminate_all_processes():
+ """
+ Goes through the process list and terminates all child processes.
+ """
+ for process in processes:
+ if process['process'].is_alive():
+ try:
+ process['process'].terminate()
+ except Exception:
+ pass
+ process['process'].join()
+
+
+def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
"""
Workload driver. Executes an action function against a nova-cluster.
-
"""
logfiles = admin_manager.config.stress.target_logfiles
log_check_interval = int(admin_manager.config.stress.log_check_interval)
@@ -110,13 +107,12 @@
computes = _get_compute_nodes(controller)
for node in computes:
do_ssh("rm -f %s" % logfiles, node)
- processes = []
for test in tests:
if test.get('use_admin', False):
manager = admin_manager
else:
manager = clients.Manager()
- for _ in xrange(test.get('threads', 1)):
+ for p_number in xrange(test.get('threads', 1)):
if test.get('use_isolated_tenants', False):
username = rand_name("stress_user")
tenant_name = rand_name("stress_tenant")
@@ -130,27 +126,88 @@
manager = clients.Manager(username=username,
password="pass",
tenant_name=tenant_name)
- target = get_action_function(test['action'])
- p = multiprocessing.Process(target=target,
- args=(manager, logger),
- kwargs=test.get('kwargs', {}))
- processes.append(p)
+
+ test_obj = importutils.import_class(test['action'])
+ test_run = test_obj(manager, max_runs, stop_on_error)
+
+ kwargs = test.get('kwargs', {})
+ test_run.setUp(**dict(kwargs.iteritems()))
+
+ LOG.debug("calling Target Object %s" %
+ test_run.__class__.__name__)
+
+ mp_manager = multiprocessing.Manager()
+ shared_statistic = mp_manager.dict()
+ shared_statistic['runs'] = 0
+ shared_statistic['fails'] = 0
+
+ p = multiprocessing.Process(target=test_run.execute,
+ args=(shared_statistic,))
+
+ process = {'process': p,
+ 'p_number': p_number,
+ 'action': test['action'],
+ 'statistic': shared_statistic}
+
+ processes.append(process)
p.start()
+ if stop_on_error:
+ # NOTE(mkoderer): only the parent should register the handler
+ signal.signal(signal.SIGCHLD, sigchld_handler)
end_time = time.time() + duration
had_errors = False
while True:
- remaining = end_time - time.time()
- if remaining <= 0:
- break
+ if max_runs is None:
+ remaining = end_time - time.time()
+ if remaining <= 0:
+ break
+ else:
+ remaining = log_check_interval
+ all_proc_term = True
+ for process in processes:
+ if process['process'].is_alive():
+ all_proc_term = False
+ break
+ if all_proc_term:
+ break
+
time.sleep(min(remaining, log_check_interval))
+ if stop_on_error:
+ for process in processes:
+ if process['statistic']['fails'] > 0:
+ break
+
if not logfiles:
continue
errors = _error_in_logs(logfiles, computes)
if errors:
had_errors = True
break
- for p in processes:
- p.terminate()
+
+ terminate_all_processes()
+
+ sum_fails = 0
+ sum_runs = 0
+
+ LOG.info("Statistics (per process):")
+ for process in processes:
+ if process['statistic']['fails'] > 0:
+ had_errors = True
+ sum_runs += process['statistic']['runs']
+ sum_fails += process['statistic']['fails']
+ LOG.info(" Process %d (%s): Run %d actions (%d failed)" %
+ (process['p_number'],
+ process['action'],
+ process['statistic']['runs'],
+ process['statistic']['fails']))
+ LOG.info("Summary:")
+ LOG.info("Run %d actions (%d failed)" %
+ (sum_runs, sum_fails))
+
if not had_errors:
- logger.info("cleaning up")
+ LOG.info("cleaning up")
cleanup.cleanup()
+ if had_errors:
+ return 1
+ else:
+ return 0
diff --git a/tempest/stress/etc/sample-test.json b/tempest/stress/etc/sample-test.json
deleted file mode 100644
index 5a0189c..0000000
--- a/tempest/stress/etc/sample-test.json
+++ /dev/null
@@ -1,7 +0,0 @@
-[{"action": "tempest.stress.actions.create_destroy_server.create_destroy",
- "threads": 8,
- "use_admin": false,
- "use_isolated_tenants": false,
- "kwargs": {}
- }
-]
diff --git a/tempest/stress/etc/server-create-destroy-test.json b/tempest/stress/etc/server-create-destroy-test.json
new file mode 100644
index 0000000..17d5e1a
--- /dev/null
+++ b/tempest/stress/etc/server-create-destroy-test.json
@@ -0,0 +1,7 @@
+[{"action": "tempest.stress.actions.server_create_destroy.ServerCreateDestroyTest",
+ "threads": 8,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {}
+ }
+]
diff --git a/tempest/stress/etc/stress-tox-job.json b/tempest/stress/etc/stress-tox-job.json
new file mode 100644
index 0000000..dffc469
--- /dev/null
+++ b/tempest/stress/etc/stress-tox-job.json
@@ -0,0 +1,19 @@
+[{"action": "tempest.stress.actions.server_create_destroy.ServerCreateDestroyTest",
+ "threads": 8,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {}
+ },
+ {"action": "tempest.stress.actions.volume_create_delete.VolumeCreateDeleteTest",
+ "threads": 4,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {}
+ },
+ {"action": "tempest.stress.actions.volume_attach_delete.VolumeAttachDeleteTest",
+ "threads": 2,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {}
+ }
+]
diff --git a/tempest/stress/etc/volume-attach-delete-test.json b/tempest/stress/etc/volume-attach-delete-test.json
new file mode 100644
index 0000000..4553ff8
--- /dev/null
+++ b/tempest/stress/etc/volume-attach-delete-test.json
@@ -0,0 +1,7 @@
+[{"action": "tempest.stress.actions.volume_attach_delete.VolumeAttachDeleteTest",
+ "threads": 4,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {}
+ }
+]
diff --git a/tempest/stress/etc/volume-create-delete-test.json b/tempest/stress/etc/volume-create-delete-test.json
index ed0aaeb..e8a58f7 100644
--- a/tempest/stress/etc/volume-create-delete-test.json
+++ b/tempest/stress/etc/volume-create-delete-test.json
@@ -1,4 +1,4 @@
-[{"action": "tempest.stress.actions.volume_create_delete.create_delete",
+[{"action": "tempest.stress.actions.volume_create_delete.VolumeCreateDeleteTest",
"threads": 4,
"use_admin": false,
"use_isolated_tenants": false,
diff --git a/tempest/stress/run_stress.py b/tempest/stress/run_stress.py
index ef0ec8e..32e3ae0 100755
--- a/tempest/stress/run_stress.py
+++ b/tempest/stress/run_stress.py
@@ -18,17 +18,38 @@
import argparse
import json
-
-from tempest.stress import driver
+import sys
def main(ns):
+ # NOTE(mkoderer): moved import to make "-h" possible without OpenStack
+ from tempest.stress import driver
+ result = 0
tests = json.load(open(ns.tests, 'r'))
- driver.stress_openstack(tests, ns.duration)
+ if ns.serial:
+ for test in tests:
+ step_result = driver.stress_openstack([test],
+ ns.duration,
+ ns.number,
+ ns.stop)
+ # NOTE(mkoderer): we just save the last result code
+ if (step_result != 0):
+ result = step_result
+ else:
+ driver.stress_openstack(tests, ns.duration, ns.number, ns.stop)
+ return result
parser = argparse.ArgumentParser(description='Run stress tests. ')
parser.add_argument('-d', '--duration', default=300, type=int,
- help="Duration of test.")
+ help="Duration of test in secs.")
+parser.add_argument('-s', '--serial', action='store_true',
+ help="Trigger running tests serially.")
+parser.add_argument('-S', '--stop', action='store_true',
+ default=False, help="Stop on first error.")
+parser.add_argument('-n', '--number', type=int,
+ help="How often an action is executed for each process.")
parser.add_argument('tests', help="Name of the file with test description.")
-main(parser.parse_args())
+
+if __name__ == "__main__":
+ sys.exit(main(parser.parse_args()))
diff --git a/tempest/stress/stressaction.py b/tempest/stress/stressaction.py
new file mode 100644
index 0000000..3719841
--- /dev/null
+++ b/tempest/stress/stressaction.py
@@ -0,0 +1,78 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import signal
+import sys
+
+from tempest.openstack.common import log as logging
+
+
+class StressAction(object):
+
+ def __init__(self, manager, max_runs=None, stop_on_error=False):
+ full_cname = self.__module__ + "." + self.__class__.__name__
+ self.logger = logging.getLogger(full_cname)
+ self.manager = manager
+ self.max_runs = max_runs
+ self.stop_on_error = stop_on_error
+
+ def _shutdown_handler(self, signal, frame):
+ self.tearDown()
+ sys.exit(0)
+
+ def setUp(self, **kwargs):
+ """This method is called before the run method
+ to help the test initiatlize any structures.
+ kwargs contains arguments passed in from the
+ configuration json file.
+
+ setUp doesn't count against the time duration.
+ """
+ self.logger.debug("setUp")
+
+ def tearDown(self):
+ """This method is called to do any cleanup
+ after the test is complete.
+ """
+ self.logger.debug("tearDown")
+
+ def execute(self, shared_statistic):
+ """This is the main execution entry point called
+ by the driver. We register a signal handler to
+ allow us to gracefull tearDown, and then exit.
+ We also keep track of how many runs we do.
+ """
+ signal.signal(signal.SIGHUP, self._shutdown_handler)
+ signal.signal(signal.SIGTERM, self._shutdown_handler)
+
+ while self.max_runs is None or (shared_statistic['runs'] <
+ self.max_runs):
+ try:
+ self.run()
+ except Exception:
+ shared_statistic['fails'] += 1
+ self.logger.exception("Failure in run")
+ finally:
+ shared_statistic['runs'] += 1
+ if self.stop_on_error and (shared_statistic['fails'] > 1):
+ self.logger.warn("Stop process due to"
+ "\"stop-on-error\" argument")
+ self.tearDown()
+ sys.exit(1)
+
+ def run(self):
+ """This method is where the stress test code runs."""
+ raise NotImplemented()
diff --git a/tempest/stress/tools/cleanup.py b/tempest/stress/tools/cleanup.py
index 7139d6c..3885ba0 100755
--- a/tempest/stress/tools/cleanup.py
+++ b/tempest/stress/tools/cleanup.py
@@ -16,5 +16,4 @@
from tempest.stress import cleanup
-
cleanup.cleanup()
diff --git a/tempest/test.py b/tempest/test.py
index 6be37be..7787790 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -15,15 +15,18 @@
# License for the specific language governing permissions and limitations
# under the License.
+import atexit
+import os
import time
+import fixtures
import nose.plugins.attrib
import testresources
import testtools
-from tempest.common import log as logging
+from tempest import clients
from tempest import config
-from tempest import manager
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -54,16 +57,115 @@
return decorator
+# there is a mis-match between nose and testtools for older pythons.
+# testtools will set skipException to be either
+# unittest.case.SkipTest, unittest2.case.SkipTest or an internal skip
+# exception, depending on what it can find. Python <2.7 doesn't have
+# unittest.case.SkipTest; so if unittest2 is not installed it falls
+# back to the internal class.
+#
+# The current nose skip plugin will decide to raise either
+# unittest.case.SkipTest or its own internal exception; it does not
+# look for unittest2 or the internal unittest exception. Thus we must
+# monkey-patch testtools.TestCase.skipException to be the exception
+# the nose skip plugin expects.
+#
+# However, with the switch to testr nose may not be available, so we
+# require you to opt-in to this fix with an environment variable.
+#
+# This is temporary until upstream nose starts looking for unittest2
+# as testtools does; we can then remove this and ensure unittest2 is
+# available for older pythons; then nose and testtools will agree
+# unittest2.case.SkipTest is the one-true skip test exception.
+#
+# https://review.openstack.org/#/c/33056
+# https://github.com/nose-devs/nose/pull/699
+if 'TEMPEST_PY26_NOSE_COMPAT' in os.environ:
+ try:
+ import unittest.case.SkipTest
+ # convince pep8 we're using the import...
+ if unittest.case.SkipTest:
+ pass
+ raise RuntimeError("You have unittest.case.SkipTest; "
+ "no need to override")
+ except ImportError:
+ LOG.info("Overriding skipException to nose SkipTest")
+ testtools.TestCase.skipException = nose.plugins.skip.SkipTest
+
+at_exit_set = set()
+
+
+def validate_tearDownClass():
+ if at_exit_set:
+ raise RuntimeError("tearDownClass does not calls the super's"
+ "tearDownClass in these classes: "
+ + str(at_exit_set))
+
+atexit.register(validate_tearDownClass)
+
+
class BaseTestCase(testtools.TestCase,
testtools.testcase.WithAttributes,
testresources.ResourcedTestCase):
config = config.TempestConfig()
+ setUpClassCalled = False
+
@classmethod
def setUpClass(cls):
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
+ cls.setUpClassCalled = True
+
+ @classmethod
+ def tearDownClass(cls):
+ at_exit_set.remove(cls)
+ if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
+ super(BaseTestCase, cls).tearDownClass()
+
+ def setUp(self):
+ super(BaseTestCase, self).setUp()
+ if not self.setUpClassCalled:
+ raise RuntimeError("setUpClass does not calls the super's"
+ "setUpClass in the "
+ + self.__class__.__name__)
+ at_exit_set.add(self.__class__)
+ test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
+ try:
+ test_timeout = int(test_timeout)
+ except ValueError:
+ test_timeout = 0
+ if test_timeout > 0:
+ self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
+
+ if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
+ os.environ.get('OS_STDOUT_CAPTURE') == '1'):
+ stdout = self.useFixture(fixtures.StringStream('stdout')).stream
+ self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
+ if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
+ os.environ.get('OS_STDERR_CAPTURE') == '1'):
+ stderr = self.useFixture(fixtures.StringStream('stderr')).stream
+ self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
+
+ @classmethod
+ def _get_identity_admin_client(cls):
+ """
+ Returns an instance of the Identity Admin API client
+ """
+ os = clients.AdminManager(interface=cls._interface)
+ admin_client = os.identity_client
+ return admin_client
+
+ @classmethod
+ def _get_client_args(cls):
+
+ return (
+ cls.config,
+ cls.config.identity.admin_username,
+ cls.config.identity.admin_password,
+ cls.config.identity.uri
+ )
def call_until_true(func, duration, sleep_for):
@@ -87,77 +189,3 @@
time.sleep(sleep_for)
now = time.time()
return False
-
-
-class TestCase(BaseTestCase):
- """Base test case class for all Tempest tests
-
- Contains basic setup and convenience methods
- """
-
- manager_class = None
-
- @classmethod
- def setUpClass(cls):
- cls.manager = cls.manager_class()
- for attr_name in cls.manager.client_attr_names:
- # Ensure that pre-existing class attributes won't be
- # accidentally overriden.
- assert not hasattr(cls, attr_name)
- client = getattr(cls.manager, attr_name)
- setattr(cls, attr_name, client)
- cls.resource_keys = {}
- cls.os_resources = []
-
- def set_resource(self, key, thing):
- LOG.debug("Adding %r to shared resources of %s" %
- (thing, self.__class__.__name__))
- self.resource_keys[key] = thing
- self.os_resources.append(thing)
-
- def get_resource(self, key):
- return self.resource_keys[key]
-
- def remove_resource(self, key):
- thing = self.resource_keys[key]
- self.os_resources.remove(thing)
- del self.resource_keys[key]
-
- def status_timeout(self, things, thing_id, expected_status):
- """
- Given a thing and an expected status, do a loop, sleeping
- for a configurable amount of time, checking for the
- expected status to show. At any time, if the returned
- status of the thing is ERROR, fail out.
- """
- def check_status():
- # python-novaclient has resources available to its client
- # that all implement a get() method taking an identifier
- # for the singular resource to retrieve.
- thing = things.get(thing_id)
- new_status = thing.status
- if new_status == 'ERROR':
- self.fail("%s failed to get to expected status."
- "In ERROR state."
- % thing)
- elif new_status == expected_status:
- return True # All good.
- LOG.debug("Waiting for %s to get to %s status. "
- "Currently in %s status",
- thing, expected_status, new_status)
- conf = config.TempestConfig()
- if not call_until_true(check_status,
- conf.compute.build_timeout,
- conf.compute.build_interval):
- self.fail("Timed out waiting for thing %s to become %s"
- % (thing_id, expected_status))
-
-
-class ComputeFuzzClientTest(TestCase):
-
- """
- Base test case class for OpenStack Compute API (Nova)
- that uses the Tempest REST fuzz client libs for calling the API.
- """
-
- manager_class = manager.ComputeFuzzClientManager
diff --git a/tempest/thirdparty/README.rst b/tempest/thirdparty/README.rst
index 41d31f3..b775817 100644
--- a/tempest/thirdparty/README.rst
+++ b/tempest/thirdparty/README.rst
@@ -1,9 +1,9 @@
Tempest Guide to Third Party API tests
-========
+======================================
What are these tests?
---------
+---------------------
Third party tests are tests for non native OpenStack APIs that are
part of OpenStack projects. If we ship an API, we're really required
@@ -14,14 +14,14 @@
Why are these tests in tempest?
---------
+-------------------------------
If we ship an API in an OpenStack component, there should be tests in
tempest to exercise it in some way.
Scope of these tests
---------
+--------------------
Third party API testing should be limited to the functional testing of
third party API compliance. Complex scenarios should be avoided, and
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
index 9ff628c..e0c9f06 100644
--- a/tempest/thirdparty/boto/test.py
+++ b/tempest/thirdparty/boto/test.py
@@ -28,10 +28,10 @@
import keystoneclient.exceptions
import tempest.clients
-from tempest.common import log as logging
from tempest.common.utils.file_utils import have_effective_read_access
import tempest.config
from tempest import exceptions
+from tempest.openstack.common import log as logging
import tempest.test
from tempest.thirdparty.boto.utils.wait import re_search_wait
from tempest.thirdparty.boto.utils.wait import state_wait
@@ -58,8 +58,9 @@
A_I_IMAGES_READY = all_read(ami_path, aki_path, ari_path)
boto_logger = logging.getLogger('boto')
- level = boto_logger.level
- boto_logger.setLevel(orig_logging.CRITICAL) # suppress logging for these
+ level = boto_logger.logger.level
+ boto_logger.logger.setLevel(orig_logging.CRITICAL) # suppress logging
+ # for these
def _cred_sub_check(connection_data):
if not id_matcher.match(connection_data["aws_access_key_id"]):
@@ -99,7 +100,7 @@
except keystoneclient.exceptions.Unauthorized:
S3_CAN_CONNECT_ERROR = "AWS credentials not set," +\
" faild to get them even by keystoneclient"
- boto_logger.setLevel(level)
+ boto_logger.logger.setLevel(level)
return {'A_I_IMAGES_READY': A_I_IMAGES_READY,
'S3_CAN_CONNECT_ERROR': S3_CAN_CONNECT_ERROR,
'EC2_CAN_CONNECT_ERROR': EC2_CAN_CONNECT_ERROR}
@@ -169,7 +170,7 @@
add_cls = getattr(add_cls, part)
-#TODO(afazekas): classmethod handling
+# TODO(afazekas): classmethod handling
def friendly_function_name_simple(call_able):
name = ""
if hasattr(call_able, "im_class"):
@@ -196,6 +197,7 @@
@classmethod
def setUpClass(cls):
+ super(BotoTestCase, cls).setUpClass()
# The trash contains cleanup functions and paramaters in tuples
# (function, *args, **kwargs)
cls._resource_trash_bin = {}
@@ -223,7 +225,7 @@
"""Cancel Clean up request."""
del cls._resource_trash_bin[key]
- #TODO(afazekas): Add "with" context handling
+ # TODO(afazekas): Add "with" context handling
def assertBotoError(self, excMatcher, callableObj,
*args, **kwargs):
"""Example usage:
@@ -260,6 +262,10 @@
LOG.exception(exc)
finally:
del cls._resource_trash_bin[key]
+ super(BotoTestCase, cls).tearDownClass()
+ # NOTE(afazekas): let the super called even on exceptions
+ # The real exceptions already logged, if the super throws another,
+ # does not causes hidden issues
if fail_count:
raise exceptions.TearDownException(num=fail_count)
@@ -271,7 +277,7 @@
s3_error_code.server = ServerError()
s3_error_code.client = ClientError()
valid_image_state = set(('available', 'pending', 'failed'))
- #NOTE(afazekas): 'paused' is not valid status in EC2, but it does not have
+ # NOTE(afazekas): 'paused' is not valid status in EC2, but it does not have
# a good mapping, because it uses memory, but not really a running machine
valid_instance_state = set(('pending', 'running', 'shutting-down',
'terminated', 'stopping', 'stopped', 'paused'))
@@ -379,7 +385,7 @@
def assertAddressReleasedWait(self, address):
def _address_delete():
- #NOTE(afazekas): the filter gives back IP
+ # NOTE(afazekas): the filter gives back IP
# even if it is not associated to my tenant
if (address.public_ip not in map(lambda a: a.public_ip,
self.ec2_client.get_all_addresses())):
@@ -447,7 +453,7 @@
if cls.ec2_error_code.\
client.InvalidInstanceID.NotFound.match(exc):
return "_GONE"
- #NOTE(afazekas): incorrect code,
+ # NOTE(afazekas): incorrect code,
# but the resource must be destoreyd
if exc.error_code == "InstanceNotFound":
return "_GONE"
@@ -464,7 +470,7 @@
if exc_num:
raise exceptions.TearDownException(num=exc_num)
- #NOTE(afazekas): The incorrect ErrorCodes makes very, very difficult
+ # NOTE(afazekas): The incorrect ErrorCodes makes very, very difficult
# to write better teardown
@classmethod
@@ -472,7 +478,7 @@
"""Delete group.
Use just for teardown!
"""
- #NOTE(afazekas): should wait/try until all related instance terminates
+ # NOTE(afazekas): should wait/try until all related instance terminates
group.delete()
@classmethod
@@ -486,7 +492,7 @@
LOG.critical("%s Volume has %s snapshot(s)", volume.id,
map(snaps.id, snaps))
- #Note(afazekas): detaching/attching not valid EC2 status
+ # NOTE(afazekas): detaching/attching not valid EC2 status
def _volume_state():
volume.update(validate=True)
try:
@@ -494,7 +500,7 @@
volume.detach(force=True)
except BaseException as exc:
LOG.exception(exc)
- #exc_num += 1 "nonlocal" not in python2
+ # exc_num += 1 "nonlocal" not in python2
return volume.status
try:
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index 89891d2..5007503 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -19,10 +19,10 @@
import testtools
from tempest import clients
-from tempest.common import log as logging
from tempest.common.utils.data_utils import rand_name
from tempest.common.utils.linux.remote_client import RemoteClient
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.test import attr
from tempest.thirdparty.boto.test import BotoTestCase
from tempest.thirdparty.boto.utils.s3 import s3_upload_dir
@@ -88,6 +88,53 @@
image["image_id"])
@attr(type='smoke')
+ def test_run_idempotent_instances(self):
+ # EC2 run instances idempotently
+
+ def _run_instance(client_token):
+ reservation = self.ec2_client.run_instances(
+ image_id=self.images["ami"]["image_id"],
+ kernel_id=self.images["aki"]["image_id"],
+ ramdisk_id=self.images["ari"]["image_id"],
+ instance_type=self.instance_type,
+ client_token=client_token)
+ rcuk = self.addResourceCleanUp(self.destroy_reservation,
+ reservation)
+ return (reservation, rcuk)
+
+ def _terminate_reservation(reservation, rcuk):
+ for instance in reservation.instances:
+ instance.terminate()
+ self.cancelResourceCleanUp(rcuk)
+
+ reservation_1, rcuk_1 = _run_instance('token_1')
+ reservation_2, rcuk_2 = _run_instance('token_2')
+ reservation_1a, rcuk_1a = _run_instance('token_1')
+
+ self.assertIsNotNone(reservation_1)
+ self.assertIsNotNone(reservation_2)
+ self.assertIsNotNone(reservation_1a)
+
+ # same reservation for token_1
+ self.assertEqual(reservation_1.id, reservation_1a.id)
+
+ # Cancel cleanup -- since it's a duplicate, it's
+ # handled by rcuk1
+ self.cancelResourceCleanUp(rcuk_1a)
+
+ _terminate_reservation(reservation_1, rcuk_1)
+ _terminate_reservation(reservation_2, rcuk_2)
+
+ reservation_3, rcuk_3 = _run_instance('token_1')
+ self.assertIsNotNone(reservation_3)
+
+ # make sure we don't get the old reservation back
+ self.assertNotEqual(reservation_1.id, reservation_3.id)
+
+ # clean up
+ _terminate_reservation(reservation_3, rcuk_3)
+
+ @attr(type='smoke')
def test_run_stop_terminate_instance(self):
# EC2 run, stop and terminate instance
image_ami = self.ec2_client.get_image(self.images["ami"]
@@ -113,6 +160,53 @@
self.cancelResourceCleanUp(rcuk)
@attr(type='smoke')
+ def test_run_stop_terminate_instance_with_tags(self):
+ # EC2 run, stop and terminate instance with tags
+ image_ami = self.ec2_client.get_image(self.images["ami"]
+ ["image_id"])
+ reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
+ ramdisk_id=self.images["ari"]["image_id"],
+ instance_type=self.instance_type)
+ rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
+
+ for instance in reservation.instances:
+ LOG.info("state: %s", instance.state)
+ if instance.state != "running":
+ self.assertInstanceStateWait(instance, "running")
+ instance.add_tag('key1', value='value1')
+
+ tags = self.ec2_client.get_all_tags()
+ self.assertEquals(tags[0].name, 'key1')
+ self.assertEquals(tags[0].value, 'value1')
+
+ tags = self.ec2_client.get_all_tags(filters={'key': 'key1'})
+ self.assertEquals(tags[0].name, 'key1')
+ self.assertEquals(tags[0].value, 'value1')
+
+ tags = self.ec2_client.get_all_tags(filters={'value': 'value1'})
+ self.assertEquals(tags[0].name, 'key1')
+ self.assertEquals(tags[0].value, 'value1')
+
+ tags = self.ec2_client.get_all_tags(filters={'key': 'value2'})
+ self.assertEquals(len(tags), 0)
+
+ for instance in reservation.instances:
+ instance.remove_tag('key1', value='value1')
+
+ tags = self.ec2_client.get_all_tags()
+ self.assertEquals(len(tags), 0)
+
+ for instance in reservation.instances:
+ instance.stop()
+ LOG.info("state: %s", instance.state)
+ if instance.state != "stopped":
+ self.assertInstanceStateWait(instance, "stopped")
+
+ for instance in reservation.instances:
+ instance.terminate()
+ self.cancelResourceCleanUp(rcuk)
+
+ @attr(type='smoke')
@testtools.skip("Skipped until the Bug #1098891 is resolved")
def test_run_terminate_instance(self):
# EC2 run, terminate immediately
@@ -137,7 +231,7 @@
else:
self.assertNotEqual(instance.state, "running")
- #NOTE(afazekas): doctored test case,
+ # NOTE(afazekas): doctored test case,
# with normal validation it would fail
@testtools.skip("Until Bug #1182679 is fixed")
@attr(type='smoke')
@@ -183,10 +277,10 @@
self.assertTrue(address.associate(instance.id))
rcuk_da = self.addResourceCleanUp(address.disassociate)
- #TODO(afazekas): ping test. dependecy/permission ?
+ # TODO(afazekas): ping test. dependecy/permission ?
self.assertVolumeStatusWait(volume, "available")
- #NOTE(afazekas): it may be reports availble before it is available
+ # NOTE(afazekas): it may be reports availble before it is available
ssh = RemoteClient(address.public_ip,
self.os.config.compute.ssh_user,
@@ -210,7 +304,7 @@
self.assertVolumeStatusWait(_volume_state, "in-use")
re_search_wait(_volume_state, "in-use")
- #NOTE(afazekas): Different Hypervisor backends names
+ # NOTE(afazekas): Different Hypervisor backends names
# differently the devices,
# now we just test is the partition number increased/decrised
@@ -225,7 +319,7 @@
state_wait(_part_state, 'INCREASE')
part_lines = ssh.get_partitions().split('\n')
- #TODO(afazekas): Resource compare to the flavor settings
+ # TODO(afazekas): Resource compare to the flavor settings
volume.detach()
@@ -246,7 +340,7 @@
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
- #TODO(afazekas): move steps from teardown to the test case
+ # TODO(afazekas): move steps from teardown to the test case
-#TODO(afazekas): Snapshot/volume read/write test case
+# TODO(afazekas): Snapshot/volume read/write test case
diff --git a/tempest/thirdparty/boto/test_ec2_keys.py b/tempest/thirdparty/boto/test_ec2_keys.py
index 1072356..1b4d7ec 100644
--- a/tempest/thirdparty/boto/test_ec2_keys.py
+++ b/tempest/thirdparty/boto/test_ec2_keys.py
@@ -37,7 +37,7 @@
cls.client = cls.os.ec2api_client
cls.ec = cls.ec2_error_code
-#TODO(afazekas): merge create, delete, get test cases
+# TODO(afazekas): merge create, delete, get test cases
@attr(type='smoke')
def test_create_ec2_keypair(self):
# EC2 create KeyPair
diff --git a/tempest/thirdparty/boto/test_ec2_network.py b/tempest/thirdparty/boto/test_ec2_network.py
index f4602d8..6226dbb 100644
--- a/tempest/thirdparty/boto/test_ec2_network.py
+++ b/tempest/thirdparty/boto/test_ec2_network.py
@@ -30,7 +30,7 @@
cls.os = clients.Manager()
cls.client = cls.os.ec2api_client
-#Note(afazekas): these tests for things duable without an instance
+# Note(afazekas): these tests for things duable without an instance
@testtools.skip("Skipped until the Bug #1080406 is resolved")
@attr(type='smoke')
def test_disassociate_not_associated_floating_ip(self):
diff --git a/tempest/thirdparty/boto/test_ec2_security_groups.py b/tempest/thirdparty/boto/test_ec2_security_groups.py
index 3db9a88..81ddcf6 100644
--- a/tempest/thirdparty/boto/test_ec2_security_groups.py
+++ b/tempest/thirdparty/boto/test_ec2_security_groups.py
@@ -43,7 +43,7 @@
group_get = groups_get[0]
self.assertEqual(group.name, group_get.name)
self.assertEqual(group.name, group_get.name)
- #ping (icmp_echo) and other icmp allowed from everywhere
+ # ping (icmp_echo) and other icmp allowed from everywhere
# from_port and to_port act as icmp type
success = self.client.authorize_security_group(group_name,
ip_protocol="icmp",
@@ -51,17 +51,17 @@
from_port=-1,
to_port=-1)
self.assertTrue(success)
- #allow standard ssh port from anywhere
+ # allow standard ssh port from anywhere
success = self.client.authorize_security_group(group_name,
ip_protocol="tcp",
cidr_ip="0.0.0.0/0",
from_port=22,
to_port=22)
self.assertTrue(success)
- #TODO(afazekas): Duplicate tests
+ # TODO(afazekas): Duplicate tests
group_get = self.client.get_all_security_groups(
groupnames=(group_name,))[0]
- #remove listed rules
+ # remove listed rules
for ip_permission in group_get.rules:
for cidr in ip_permission.grants:
self.assertTrue(self.client.revoke_security_group(group_name,
@@ -72,5 +72,5 @@
group_get = self.client.get_all_security_groups(
groupnames=(group_name,))[0]
- #all rules shuld be removed now
+ # all rules shuld be removed now
self.assertEqual(0, len(group_get.rules))
diff --git a/tempest/thirdparty/boto/test_ec2_volumes.py b/tempest/thirdparty/boto/test_ec2_volumes.py
index c90c586..dbb3104 100644
--- a/tempest/thirdparty/boto/test_ec2_volumes.py
+++ b/tempest/thirdparty/boto/test_ec2_volumes.py
@@ -16,7 +16,7 @@
# under the License.
from tempest import clients
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
from tempest.test import attr
from tempest.thirdparty.boto.test import BotoTestCase
diff --git a/tempest/thirdparty/boto/test_s3_ec2_images.py b/tempest/thirdparty/boto/test_s3_ec2_images.py
index 0f836d0..26c2701 100644
--- a/tempest/thirdparty/boto/test_s3_ec2_images.py
+++ b/tempest/thirdparty/boto/test_s3_ec2_images.py
@@ -17,14 +17,11 @@
import os
-import testtools
-
from tempest import clients
from tempest.common.utils.data_utils import rand_name
from tempest.test import attr
from tempest.thirdparty.boto.test import BotoTestCase
from tempest.thirdparty.boto.utils.s3 import s3_upload_dir
-from tempest.thirdparty.boto.utils.wait import state_wait
class S3ImagesTest(BotoTestCase):
@@ -53,8 +50,6 @@
cls.bucket_name)
s3_upload_dir(bucket, cls.materials_path)
- #Note(afazekas): Without the normal status change test!
- # otherwise I would skip it too
@attr(type='smoke')
def test_register_get_deregister_ami_image(self):
# Register and deregister ami image
@@ -64,7 +59,7 @@
image["image_id"] = self.images_client.register_image(
name=image["name"],
image_location=image["location"])
- #Note(afazekas): delete_snapshot=True might trigger boto lib? bug
+ # NOTE(afazekas): delete_snapshot=True might trigger boto lib? bug
image["cleanUp"] = self.addResourceCleanUp(
self.images_client.deregister_image,
image["image_id"])
@@ -72,13 +67,8 @@
retrieved_image = self.images_client.get_image(image["image_id"])
self.assertTrue(retrieved_image.name == image["name"])
self.assertTrue(retrieved_image.id == image["image_id"])
- state = retrieved_image.state
- if state != "available":
- def _state():
- retr = self.images_client.get_image(image["image_id"])
- return retr.state
- state = state_wait(_state, "available")
- self.assertEqual("available", state)
+ if retrieved_image.state != "available":
+ self.assertImageStateWait(retrieved_image, "available")
self.images_client.deregister_image(image["image_id"])
self.assertNotIn(image["image_id"], str(
self.images_client.get_all_images()))
@@ -107,7 +97,6 @@
self.images_client.get_all_images()))
self.cancelResourceCleanUp(image["cleanUp"])
- @testtools.skip("Skipped until the Bug #1074908 and #1074904 is resolved")
def test_register_get_deregister_ari_image(self):
# Register and deregister ari image
image = {"name": rand_name("ari-name-"),
@@ -130,4 +119,4 @@
self.images_client.deregister_image(image["image_id"])
self.cancelResourceCleanUp(image["cleanUp"])
-#TODO(afazekas): less copy-paste style
+# TODO(afazekas): less copy-paste style
diff --git a/tempest/thirdparty/boto/utils/s3.py b/tempest/thirdparty/boto/utils/s3.py
index a309a12..f8fa61b 100644
--- a/tempest/thirdparty/boto/utils/s3.py
+++ b/tempest/thirdparty/boto/utils/s3.py
@@ -22,7 +22,7 @@
import boto
import boto.s3.key
-from tempest.common import log as logging
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/tempest/thirdparty/boto/utils/wait.py b/tempest/thirdparty/boto/utils/wait.py
index 6b3ef27..1507deb 100644
--- a/tempest/thirdparty/boto/utils/wait.py
+++ b/tempest/thirdparty/boto/utils/wait.py
@@ -21,8 +21,8 @@
import boto.exception
from testtools import TestCase
-from tempest.common import log as logging
import tempest.config
+from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -34,7 +34,7 @@
def state_wait(lfunction, final_set=set(), valid_set=None):
- #TODO(afazekas): evaluate using ABC here
+ # TODO(afazekas): evaluate using ABC here
if not isinstance(final_set, set):
final_set = set((final_set,))
if not isinstance(valid_set, set) and valid_set is not None:
@@ -112,7 +112,7 @@
time.sleep(default_check_interval)
-#NOTE(afazekas): EC2/boto normally raise exception instead of empty list
+# NOTE(afazekas): EC2/boto normally raise exception instead of empty list
def wait_exception(lfunction):
"""Returns with the exception or raises one."""
start_time = time.time()
@@ -129,4 +129,4 @@
dtime)
time.sleep(default_check_interval)
-#TODO(afazekas): consider strategy design pattern..
+# TODO(afazekas): consider strategy design pattern..
diff --git a/tempest/whitebox/manager.py b/tempest/whitebox/manager.py
index 3bd057c..b2632f1 100644
--- a/tempest/whitebox/manager.py
+++ b/tempest/whitebox/manager.py
@@ -22,11 +22,11 @@
from sqlalchemy import create_engine, MetaData
-from tempest.common import log as logging
from tempest.common.ssh import Client
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
-from tempest import test
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
LOG = logging.getLogger(__name__)
@@ -47,7 +47,7 @@
pass
-class ComputeWhiteboxTest(test.ComputeFuzzClientTest, WhiteboxTest):
+class ComputeWhiteboxTest(manager.OfficialClientTest):
"""
Base smoke test case class for OpenStack Compute API (Nova)
@@ -64,15 +64,6 @@
cls.nova_dir = cls.config.whitebox.source_dir
cls.compute_bin_dir = cls.config.whitebox.bin_dir
cls.compute_config_path = cls.config.whitebox.config_path
- cls.servers_client = cls.manager.servers_client
- cls.images_client = cls.manager.images_client
- cls.flavors_client = cls.manager.flavors_client
- cls.extensions_client = cls.manager.extensions_client
- cls.floating_ips_client = cls.manager.floating_ips_client
- cls.keypairs_client = cls.manager.keypairs_client
- cls.security_groups_client = cls.manager.security_groups_client
- cls.limits_client = cls.manager.limits_client
- cls.volumes_client = cls.manager.volumes_client
cls.build_interval = cls.config.compute.build_interval
cls.build_timeout = cls.config.compute.build_timeout
cls.ssh_user = cls.config.compute.ssh_user
@@ -80,38 +71,27 @@
cls.image_ref_alt = cls.config.compute.image_ref_alt
cls.flavor_ref = cls.config.compute.flavor_ref
cls.flavor_ref_alt = cls.config.compute.flavor_ref_alt
- cls.servers = []
+ #NOTE(afazekas): Mimics the helper method used in the api tests
@classmethod
- def tearDownClass(cls):
- # NOTE(jaypipes): Tests often add things in a particular order
- # so we destroy resources in the reverse order in which resources
- # are added to the test class object
- if not cls.os_resources:
- return
- thing = cls.os_resources.pop()
- while True:
- LOG.debug("Deleting %r from shared resources of %s" %
- (thing, cls.__name__))
- # Resources in novaclient all have a delete() method
- # which destroys the resource...
- thing.delete()
- if not cls.os_resources:
- return
- thing = cls.os_resources.pop()
+ def create_server(cls, **kwargs):
+ flavor_ref = cls.config.compute.flavor_ref
+ image_ref = cls.config.compute.image_ref
+ name = rand_name(cls.__name__ + "-instance")
+ if 'name' in kwargs:
+ name = kwargs.pop('name')
+ flavor = kwargs.get('flavor', flavor_ref)
+ image_id = kwargs.get('image_id', image_ref)
- @classmethod
- def create_server(cls, image_id=None):
- """Wrapper utility that returns a test server."""
- server_name = rand_name(cls.__name__ + "-instance")
- flavor = cls.flavor_ref
- if not image_id:
- image_id = cls.image_ref
+ server = cls.compute_client.servers.create(
+ name, image_id, flavor, **kwargs)
- resp, server = cls.servers_client.create_server(
- server_name, image_id, flavor)
- cls.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
- cls.servers.append(server)
+ if 'wait_until' in kwargs:
+ cls.status_timeout(cls.compute_client.servers, server.id,
+ server['id'], kwargs['wait_until'])
+
+ server = cls.compute_client.servers.get(server.id)
+ cls.set_resource(name, server)
return server
@classmethod
diff --git a/tempest/whitebox/test_images_whitebox.py b/tempest/whitebox/test_images_whitebox.py
index dc68336..0afb17e 100644
--- a/tempest/whitebox/test_images_whitebox.py
+++ b/tempest/whitebox/test_images_whitebox.py
@@ -15,23 +15,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_name
-from tempest import exceptions
from tempest.whitebox import manager
-#TODO(afazekas): The whitebox tests are using complex testclass/manager
-# hierarchy, without a real need. It is difficult to maintain.
-# They could share more code with scenario tests.
+from novaclient import exceptions
-class ImagesWhiteboxTest(manager.ComputeWhiteboxTest, base.BaseComputeTest):
+class ImagesWhiteboxTest(manager.ComputeWhiteboxTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(ImagesWhiteboxTest, cls).setUpClass()
- cls.client = cls.images_client
+ cls.create_image = cls.compute_client.servers.create_image
cls.connection, cls.meta = cls.get_db_handle_and_meta()
cls.shared_server = cls.create_server()
cls.image_ids = []
@@ -39,7 +35,6 @@
@classmethod
def tearDownClass(cls):
"""Delete images and server after a test is executed."""
- cls.servers_client.delete_server(cls.shared_server['id'])
for image_id in cls.image_ids:
cls.client.delete_image(image_id)
cls.image_ids.remove(image_id)
@@ -62,18 +57,18 @@
def _test_create_image_409_base(self, vm_state, task_state, deleted=0):
"""Base method for create image tests based on vm and task states."""
try:
- self.update_state(self.shared_server['id'], vm_state,
+ self.update_state(self.shared_server.id, vm_state,
task_state, deleted)
image_name = rand_name('snap-')
- self.assertRaises(exceptions.Duplicate,
- self.client.create_image,
- self.shared_server['id'], image_name)
+ self.assertRaises(exceptions.Conflict,
+ self.create_image,
+ self.shared_server.id, image_name)
except Exception:
self.fail("Should not allow create image when vm_state=%s and "
"task_state=%s" % (vm_state, task_state))
finally:
- self.update_state(self.shared_server['id'], 'active', None)
+ self.update_state(self.shared_server.id, 'active', None)
def test_create_image_when_vm_eq_building_task_eq_scheduling(self):
# 409 error when instance states are building,scheduling
diff --git a/tempest/whitebox/test_servers_whitebox.py b/tempest/whitebox/test_servers_whitebox.py
index 2694b95..1c1cdeb 100644
--- a/tempest/whitebox/test_servers_whitebox.py
+++ b/tempest/whitebox/test_servers_whitebox.py
@@ -25,7 +25,6 @@
@classmethod
def setUpClass(cls):
- raise cls.skipException("Until Bug 1034129 is fixed")
super(ServersWhiteboxTest, cls).setUpClass()
#NOTE(afazekas): Strange relationship
BaseIdentityAdminTest.setUpClass()
@@ -80,7 +79,7 @@
stmt = instances.select().where(instances.c.uuid == server['id'])
result = self.connection.execute(stmt).first()
- self.assertEqual(1, result.deleted)
+ self.assertEqual(True, result.deleted > 0)
self.assertEqual('deleted', result.vm_state)
self.assertEqual(None, result.task_state)
except Exception:
diff --git a/test-requirements.txt b/test-requirements.txt
index 2185997..236a473 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -2,7 +2,7 @@
pep8==1.4.5
pyflakes==0.7.2
flake8==2.0
-hacking>=0.5.3,<0.6
+hacking>=0.5.6,<0.7
# needed for doc build
+docutils==0.9.1
sphinx>=1.1.2
-
diff --git a/tools/colorizer.py b/tools/colorizer.py
new file mode 100755
index 0000000..76a3bd3
--- /dev/null
+++ b/tools/colorizer.py
@@ -0,0 +1,333 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013, Nebula, Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Colorizer Code is borrowed from Twisted:
+# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+"""Display a subunit stream through a colorized unittest test runner."""
+
+import heapq
+import subunit
+import sys
+import unittest
+
+import testtools
+
+
+class _AnsiColorizer(object):
+ """
+ A colorizer is an object that loosely wraps around a stream, allowing
+ callers to write text to the stream in a particular color.
+
+ Colorizer classes must implement C{supported()} and C{write(text, color)}.
+ """
+ _colors = dict(black=30, red=31, green=32, yellow=33,
+ blue=34, magenta=35, cyan=36, white=37)
+
+ def __init__(self, stream):
+ self.stream = stream
+
+ def supported(cls, stream=sys.stdout):
+ """
+ A class method that returns True if the current platform supports
+ coloring terminal output using this method. Returns False otherwise.
+ """
+ if not stream.isatty():
+ return False # auto color only on TTYs
+ try:
+ import curses
+ except ImportError:
+ return False
+ else:
+ try:
+ try:
+ return curses.tigetnum("colors") > 2
+ except curses.error:
+ curses.setupterm()
+ return curses.tigetnum("colors") > 2
+ except Exception:
+ # guess false in case of error
+ return False
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ """
+ Write the given text to the stream in the given color.
+
+ @param text: Text to be written to the stream.
+
+ @param color: A string label for a color. e.g. 'red', 'white'.
+ """
+ color = self._colors[color]
+ self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
+
+
+class _Win32Colorizer(object):
+ """
+ See _AnsiColorizer docstring.
+ """
+ def __init__(self, stream):
+ import win32console
+ red, green, blue, bold = (win32console.FOREGROUND_RED,
+ win32console.FOREGROUND_GREEN,
+ win32console.FOREGROUND_BLUE,
+ win32console.FOREGROUND_INTENSITY)
+ self.stream = stream
+ self.screenBuffer = win32console.GetStdHandle(
+ win32console.STD_OUT_HANDLE)
+ self._colors = {'normal': red | green | blue,
+ 'red': red | bold,
+ 'green': green | bold,
+ 'blue': blue | bold,
+ 'yellow': red | green | bold,
+ 'magenta': red | blue | bold,
+ 'cyan': green | blue | bold,
+ 'white': red | green | blue | bold}
+
+ def supported(cls, stream=sys.stdout):
+ try:
+ import win32console
+ screenBuffer = win32console.GetStdHandle(
+ win32console.STD_OUT_HANDLE)
+ except ImportError:
+ return False
+ import pywintypes
+ try:
+ screenBuffer.SetConsoleTextAttribute(
+ win32console.FOREGROUND_RED |
+ win32console.FOREGROUND_GREEN |
+ win32console.FOREGROUND_BLUE)
+ except pywintypes.error:
+ return False
+ else:
+ return True
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ color = self._colors[color]
+ self.screenBuffer.SetConsoleTextAttribute(color)
+ self.stream.write(text)
+ self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
+
+
+class _NullColorizer(object):
+ """
+ See _AnsiColorizer docstring.
+ """
+ def __init__(self, stream):
+ self.stream = stream
+
+ def supported(cls, stream=sys.stdout):
+ return True
+ supported = classmethod(supported)
+
+ def write(self, text, color):
+ self.stream.write(text)
+
+
+def get_elapsed_time_color(elapsed_time):
+ if elapsed_time > 1.0:
+ return 'red'
+ elif elapsed_time > 0.25:
+ return 'yellow'
+ else:
+ return 'green'
+
+
+class NovaTestResult(testtools.TestResult):
+ def __init__(self, stream, descriptions, verbosity):
+ super(NovaTestResult, self).__init__()
+ self.stream = stream
+ self.showAll = verbosity > 1
+ self.num_slow_tests = 10
+ self.slow_tests = [] # this is a fixed-sized heap
+ self.colorizer = None
+ # NOTE(vish): reset stdout for the terminal check
+ stdout = sys.stdout
+ sys.stdout = sys.__stdout__
+ for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
+ if colorizer.supported():
+ self.colorizer = colorizer(self.stream)
+ break
+ sys.stdout = stdout
+ self.start_time = None
+ self.last_time = {}
+ self.results = {}
+ self.last_written = None
+
+ def _writeElapsedTime(self, elapsed):
+ color = get_elapsed_time_color(elapsed)
+ self.colorizer.write(" %.2f" % elapsed, color)
+
+ def _addResult(self, test, *args):
+ try:
+ name = test.id()
+ except AttributeError:
+ name = 'Unknown.unknown'
+ test_class, test_name = name.rsplit('.', 1)
+
+ elapsed = (self._now() - self.start_time).total_seconds()
+ item = (elapsed, test_class, test_name)
+ if len(self.slow_tests) >= self.num_slow_tests:
+ heapq.heappushpop(self.slow_tests, item)
+ else:
+ heapq.heappush(self.slow_tests, item)
+
+ self.results.setdefault(test_class, [])
+ self.results[test_class].append((test_name, elapsed) + args)
+ self.last_time[test_class] = self._now()
+ self.writeTests()
+
+ def _writeResult(self, test_name, elapsed, long_result, color,
+ short_result, success):
+ if self.showAll:
+ self.stream.write(' %s' % str(test_name).ljust(66))
+ self.colorizer.write(long_result, color)
+ if success:
+ self._writeElapsedTime(elapsed)
+ self.stream.writeln()
+ else:
+ self.colorizer.write(short_result, color)
+
+ def addSuccess(self, test):
+ super(NovaTestResult, self).addSuccess(test)
+ self._addResult(test, 'OK', 'green', '.', True)
+
+ def addFailure(self, test, err):
+ if test.id() == 'process-returncode':
+ return
+ super(NovaTestResult, self).addFailure(test, err)
+ self._addResult(test, 'FAIL', 'red', 'F', False)
+
+ def addError(self, test, err):
+ super(NovaTestResult, self).addFailure(test, err)
+ self._addResult(test, 'ERROR', 'red', 'E', False)
+
+ def addSkip(self, test, reason=None, details=None):
+ super(NovaTestResult, self).addSkip(test, reason, details)
+ self._addResult(test, 'SKIP', 'blue', 'S', True)
+
+ def startTest(self, test):
+ self.start_time = self._now()
+ super(NovaTestResult, self).startTest(test)
+
+ def writeTestCase(self, cls):
+ if not self.results.get(cls):
+ return
+ if cls != self.last_written:
+ self.colorizer.write(cls, 'white')
+ self.stream.writeln()
+ for result in self.results[cls]:
+ self._writeResult(*result)
+ del self.results[cls]
+ self.stream.flush()
+ self.last_written = cls
+
+ def writeTests(self):
+ time = self.last_time.get(self.last_written, self._now())
+ if not self.last_written or (self._now() - time).total_seconds() > 2.0:
+ diff = 3.0
+ while diff > 2.0:
+ classes = self.results.keys()
+ oldest = min(classes, key=lambda x: self.last_time[x])
+ diff = (self._now() - self.last_time[oldest]).total_seconds()
+ self.writeTestCase(oldest)
+ else:
+ self.writeTestCase(self.last_written)
+
+ def done(self):
+ self.stopTestRun()
+
+ def stopTestRun(self):
+ for cls in list(self.results.iterkeys()):
+ self.writeTestCase(cls)
+ self.stream.writeln()
+ self.writeSlowTests()
+
+ def writeSlowTests(self):
+ # Pare out 'fast' tests
+ slow_tests = [item for item in self.slow_tests
+ if get_elapsed_time_color(item[0]) != 'green']
+ if slow_tests:
+ slow_total_time = sum(item[0] for item in slow_tests)
+ slow = ("Slowest %i tests took %.2f secs:"
+ % (len(slow_tests), slow_total_time))
+ self.colorizer.write(slow, 'yellow')
+ self.stream.writeln()
+ last_cls = None
+ # sort by name
+ for elapsed, cls, name in sorted(slow_tests,
+ key=lambda x: x[1] + x[2]):
+ if cls != last_cls:
+ self.colorizer.write(cls, 'white')
+ self.stream.writeln()
+ last_cls = cls
+ self.stream.write(' %s' % str(name).ljust(68))
+ self._writeElapsedTime(elapsed)
+ self.stream.writeln()
+
+ def printErrors(self):
+ if self.showAll:
+ self.stream.writeln()
+ self.printErrorList('ERROR', self.errors)
+ self.printErrorList('FAIL', self.failures)
+
+ def printErrorList(self, flavor, errors):
+ for test, err in errors:
+ self.colorizer.write("=" * 70, 'red')
+ self.stream.writeln()
+ self.colorizer.write(flavor, 'red')
+ self.stream.writeln(": %s" % test.id())
+ self.colorizer.write("-" * 70, 'red')
+ self.stream.writeln()
+ self.stream.writeln("%s" % err)
+
+
+test = subunit.ProtocolTestCase(sys.stdin, passthrough=None)
+
+if sys.version_info[0:2] <= (2, 6):
+ runner = unittest.TextTestRunner(verbosity=2)
+else:
+ runner = unittest.TextTestRunner(verbosity=2, resultclass=NovaTestResult)
+
+if runner.run(test).wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh
new file mode 100755
index 0000000..a5a6076
--- /dev/null
+++ b/tools/pretty_tox.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+TESTRARGS=$1
+python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit2pyunit
diff --git a/tools/pretty_tox_serial.sh b/tools/pretty_tox_serial.sh
new file mode 100755
index 0000000..45f05bd
--- /dev/null
+++ b/tools/pretty_tox_serial.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+TESTRARGS=$@
+
+if [ ! -d .testrepository ]; then
+ testr init
+fi
+testr run --subunit $TESTRARGS | subunit2pyunit
+retval=$?
+testr slowest
+exit $retval
diff --git a/tox.ini b/tox.ini
index 964dbca..dc8980d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,71 +3,49 @@
[testenv]
setenv = VIRTUAL_ENV={envdir}
- NOSE_WITH_OPENSTACK=1
- NOSE_OPENSTACK_COLOR=1
- NOSE_OPENSTACK_RED=15
- NOSE_OPENSTACK_YELLOW=3
- NOSE_OPENSTACK_SHOW_ELAPSED=1
- NOSE_OPENSTACK_STDOUT=1
+ LANG=en_US.UTF-8
+ LANGUAGE=en_US:en
+ LC_ALL=C
[testenv:all]
sitepackages = True
setenv = VIRTUAL_ENV={envdir}
- NOSE_WITH_OPENSTACK=1
- NOSE_OPENSTACK_COLOR=1
- NOSE_OPENSTACK_RED=15
- NOSE_OPENSTACK_YELLOW=3
- NOSE_OPENSTACK_SHOW_ELAPSED=1
- NOSE_OPENSTACK_STDOUT=1
commands =
- nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-all.xml -sv tempest
+ python setup.py testr --slowest --testr-args='{posargs}'
[testenv:full]
sitepackages = True
setenv = VIRTUAL_ENV={envdir}
- NOSE_WITH_OPENSTACK=1
- NOSE_OPENSTACK_COLOR=1
- NOSE_OPENSTACK_RED=15
- NOSE_OPENSTACK_YELLOW=3
- NOSE_OPENSTACK_SHOW_ELAPSED=1
- NOSE_OPENSTACK_STDOUT=1
+# The regex below is used to select which tests to run and exclude the slow tag:
+# See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610
commands =
- nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest/api tempest/scenario tempest/thirdparty tempest/cli
+ sh tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
+
+[testenv:testr-full]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+commands =
+ sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
[testenv:smoke]
sitepackages = True
setenv = VIRTUAL_ENV={envdir}
- NOSE_WITH_OPENSTACK=1
- NOSE_OPENSTACK_COLOR=1
- NOSE_OPENSTACK_RED=15
- NOSE_OPENSTACK_YELLOW=3
- NOSE_OPENSTACK_SHOW_ELAPSED=1
- NOSE_OPENSTACK_STDOUT=1
commands =
- nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit -sv --attr=type=smoke --xunit-file=nosetests-smoke.xml tempest
-
+ sh tools/pretty_tox_serial.sh 'smoke {posargs}'
[testenv:coverage]
sitepackages = True
setenv = VIRTUAL_ENV={envdir}
- NOSE_WITH_OPENSTACK=1
- NOSE_OPENSTACK_COLOR=1
- NOSE_OPENSTACK_RED=15
- NOSE_OPENSTACK_YELLOW=3
- NOSE_OPENSTACK_SHOW_ELAPSED=1
- NOSE_OPENSTACK_STDOUT=1
commands =
python -m tools/tempest_coverage -c start --combine
- nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest/api tempest/scenario tempest/thirdparty tempest/cli
+ sh tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli))'
python -m tools/tempest_coverage -c report --html {posargs}
[testenv:stress]
sitepackages = True
setenv = VIRTUAL_ENV={envdir}
commands =
- python -m tempest/stress/run_stress tempest/stress/etc/sample-test.json -d 60
- python -m tempest/stress/run_stress tempest/stress/etc/volume-create-delete-test.json -d 60
-
+ python -m tempest/stress/run_stress tempest/stress/etc/stress-tox-job.json -d 3600
[testenv:venv]
commands = {posargs}
@@ -83,6 +61,7 @@
local-check-factory = tempest.hacking.checks.factory
[flake8]
+# E125 is a won't fix until https://github.com/jcrocholl/pep8/issues/126 is resolved. For further detail see https://review.openstack.org/#/c/36788/
ignore = E125,H302,H404
show-source = True
exclude = .git,.venv,.tox,dist,doc,openstack,*egg