Merge "Add a create_server test for flavor authorization"
diff --git a/HACKING.rst b/HACKING.rst
index 03e7dc3..2fa949d 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -20,6 +20,7 @@
- [T102] Cannot import OpenStack python clients in tempest/api tests
- [T103] tempest/tests is deprecated
+- [T104] Scenario tests require a services decorator
Test Data/Configuration
-----------------------
@@ -96,6 +97,24 @@
credentials management, testresources and so on. These facilities, MUST be able
to work even if just one ``test_method`` selected for execution.
+Service Tagging
+---------------
+Service tagging is used to specify which services are exercised by a particular
+test method. You specify the services with the tempest.test.services decorator.
+For example:
+
+@services('compute', 'image')
+
+Valid service tag names are the same as the list of directories in tempest.api
+that have tests.
+
+For scenario tests having a service tag is required. For the api tests service
+tags are only needed if the test method makes an api call (either directly or
+indirectly through another service) that differs from the parent directory
+name. For example, any test that make an api call to a service other than nova
+in tempest.api.compute would require a service tag for those services, however
+they do not need to be tagged as compute.
+
Guidelines
----------
- Do not submit changesets with only testcases which are skipped as
@@ -103,3 +122,28 @@
- Consistently check the status code of responses in testcases. The
earlier a problem is detected the easier it is to debug, especially
where there is complicated setup required.
+
+Parallel Test Exectution
+------------------------
+Tempest by default runs its tests in parallel this creates the possibility for
+interesting interactions between tests which can cause unexpected failures.
+Tenant isolation provides protection from most of the potential race conditions
+between tests outside the same class. But there are still a few of things to
+watch out for to try to avoid issues when running your tests in parallel.
+
+- Resources outside of a tenant scope still have the potential to conflict. This
+ is a larger concern for the admin tests since most resources and actions that
+ require admin privleges are outside of tenants.
+
+- Races between methods in the same class are not a problem because
+ parallelization in tempest is at the test class level, but if there is a json
+ and xml version of the same test class there could still be a race between
+ methods.
+
+- The rand_name() function from tempest.common.utils.data_utils should be used
+ anywhere a resource is created with a name. Static naming should be avoided
+ to prevent resource conflicts.
+
+- If the execution of a set of tests is required to be serialized then locking
+ can be used to perform this. See AggregatesAdminTest in
+ tempest.api.compute.admin for an example of using locking.
diff --git a/bin/tempest b/bin/tempest
deleted file mode 100755
index 87ba6d5..0000000
--- a/bin/tempest
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env bash
-
-function usage {
- echo "Usage: $0 [OPTION]..."
- echo "Run Tempest test suite"
- echo ""
- echo " -s, --smoke Only run smoke tests"
- echo " -w, --whitebox Only run whitebox tests"
- echo " -h, --help Print this usage message"
- echo " -d. --debug Debug this script -- set -o xtrace"
- exit
-}
-
-function process_option {
- case "$1" in
- -h|--help) usage;;
- -d|--debug) set -o xtrace;;
- -s|--smoke) noseargs="$noseargs --attr=type=smoke";;
- -w|--whitebox) noseargs="$noseargs --attr=type=whitebox";;
- *) noseargs="$noseargs $1"
- esac
-}
-
-noseargs=""
-
-export NOSE_WITH_OPENSTACK=1
-export NOSE_OPENSTACK_COLOR=1
-export NOSE_OPENSTACK_RED=15.00
-export NOSE_OPENSTACK_YELLOW=3.00
-export NOSE_OPENSTACK_SHOW_ELAPSED=1
-export NOSE_OPENSTACK_STDOUT=1
-
-for arg in "$@"; do
- process_option $arg
-done
-
-
-# only add tempest default if we don't specify a test
-if [[ "x$noseargs" =~ "tempest" ]]; then
- noseargs="$noseargs"
-else
- noseargs="$noseargs tempest"
-fi
-
-
-function run_tests {
- $NOSETESTS
-}
-
-NOSETESTS="nosetests $noseargs"
-
-run_tests || exit
diff --git a/doc/source/field_guide/whitebox.rst b/doc/source/field_guide/whitebox.rst
deleted file mode 120000
index 47f6069..0000000
--- a/doc/source/field_guide/whitebox.rst
+++ /dev/null
@@ -1 +0,0 @@
-../../../tempest/whitebox/README.rst
\ No newline at end of file
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f70cdd1..1c32b9c 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -31,7 +31,6 @@
field_guide/scenario
field_guide/stress
field_guide/thirdparty
- field_guide/whitebox
field_guide/unit_tests
------------------
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index d39ef70..cd57354 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -53,6 +53,9 @@
# The above administrative user's tenant name
admin_tenant_name = admin
+# The role that is required to administrate keystone.
+admin_role = admin
+
[compute]
# This section contains configuration options used when executing tests
# against the OpenStack Compute API.
@@ -76,10 +79,18 @@
flavor_ref = 1
flavor_ref_alt = 2
-# User names used to authenticate to an instance for a given image.
+# User name used to authenticate to an instance
image_ssh_user = root
+
+# Password used to authenticate to an instance
+image_ssh_password = password
+
+# User name used to authenticate to an instance using the alternate image
image_alt_ssh_user = root
+# Password used to authenticate to an instance using the alternate image
+image_alt_ssh_password = password
+
# Number of seconds to wait while looping to check the status of an
# instance that is building.
build_interval = 10
@@ -93,7 +104,7 @@
# executing the tests
run_ssh = false
-# Name of a user used to authenticated to an instance
+# Name of a user used to authenticate to an instance.
ssh_user = cirros
# Visible fixed network name
@@ -111,6 +122,10 @@
# Number of seconds to wait to authenticate to an instance
ssh_timeout = 300
+# Additinal wait time for clean state, when there is
+# no OS-EXT-STS extension availiable
+ready_wait = 0
+
# Number of seconds to wait for output from ssh channel
ssh_channel_timeout = 60
@@ -150,28 +165,8 @@
# When set to false, flavor extra data tests are forced to skip
flavor_extra_enabled = true
-[whitebox]
-# Whitebox options for compute. Whitebox options enable the
-# whitebox test cases, which look at internal Nova database state,
-# SSH into VMs to check instance state, etc.
-
-# Should we run whitebox tests for Compute?
-whitebox_enabled = true
-
-# Path of nova source directory
-source_dir = /opt/stack/nova
-
-# Path of nova configuration file
-config_path = /etc/nova/nova.conf
-
-# Directory containing nova binaries such as nova-manage
-bin_dir = /usr/local/bin
-
-# Connection string to the database of Compute service
-db_uri = mysql://nova:secret@localhost/nova
-
-# Path to a private key file for SSH access to remote hosts
-path_to_private_key = /home/user/.ssh/id_rsa
+# Expected first device name when a volume is attached to an instance
+volume_device_name = vdb
[compute-admin]
# This should be the username of a user WITH administrative privileges
@@ -235,6 +230,8 @@
# Unless you have a custom Keystone service catalog implementation, you
# probably want to leave this value as "volume"
catalog_type = volume
+# The disk format to use when copying a volume to image
+disk_format = raw
# Number of seconds to wait while looping to check the status of a
# volume that is being made available
build_interval = 10
diff --git a/requirements.txt b/requirements.txt
index 877b23c..b15fb92 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -18,6 +18,7 @@
keyring>=1.6.1
testrepository>=0.0.17
oslo.config>=1.1.0
-# Needed for whitebox testing
-SQLAlchemy>=0.7.8,<=0.7.99
eventlet>=0.13.0
+six<1.4.0
+iso8601>=0.1.4
+fixtures>=0.3.14
diff --git a/run_tests.sh b/run_tests.sh
index d672b62..710fbaa 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -10,7 +10,6 @@
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -s, --smoke Only run smoke tests"
- echo " -w, --whitebox Only run whitebox tests"
echo " -t, --serial Run testr serially"
echo " -c, --nova-coverage Enable Nova coverage collection"
echo " -C, --config Config file location"
@@ -38,7 +37,7 @@
logging=0
logging_config=etc/logging.conf
-if ! options=$(getopt -o VNnfuswtcphdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,whitebox,serial,nova-coverage,pep8,help,debug,config:,logging,logging-config: -- "$@")
+if ! options=$(getopt -o VNnfustcphdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,serial,nova-coverage,pep8,help,debug,config:,logging,logging-config: -- "$@")
then
# parse error
usage
@@ -60,7 +59,6 @@
-C|--config) config_file=$2; shift;;
-p|--pep8) let just_pep8=1;;
-s|--smoke) testrargs="$testrargs smoke";;
- -w|--whitebox) testrargs="$testrargs whitebox";;
-t|--serial) serial=1;;
-l|--logging) logging=1;;
-L|--logging-config) logging_config=$2; shift;;
diff --git a/setup.cfg b/setup.cfg
index 7cfc4ce..a4cf118 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -21,10 +21,6 @@
setup-hooks =
pbr.hooks.setup_hook
-[files]
-scripts =
- bin/tempest
-
[build_sphinx]
all_files = 1
build-dir = doc/build
diff --git a/tempest/README.rst b/tempest/README.rst
index 33021c8..dbac809 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -18,7 +18,6 @@
| scenario/ - complex scenario tests
| stress/ - stress tests
| thirdparty/ - 3rd party api tests
-| whitebox/ - white box testing
Each of these directories contains different types of tests. What
belongs in each directory, the rules and examples for good tests, are
@@ -78,11 +77,3 @@
completely legitimate for Tempest to include tests of 3rdparty APIs,
but those should be kept separate from the normal OpenStack
validation.
-
-
-whitebox
---------
-
-Whitebox tests are tests which require access to the database of the
-target OpenStack machine to verify internal state after operations
-are made. White box tests are allowed to use the python clients.
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index 303bc0c..0bb0460 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import fixtures
+
from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
@@ -22,6 +24,16 @@
from tempest.test import attr
+class LockFixture(fixtures.Fixture):
+ def __init__(self, name):
+ self.mgr = lockutils.lock(name, 'tempest-', True)
+
+ def setUp(self):
+ super(LockFixture, self).setUp()
+ self.addCleanup(self.mgr.__exit__, None, None, None)
+ self.mgr.__enter__()
+
+
class AggregatesAdminTestJSON(base.BaseComputeAdminTest):
"""
@@ -146,9 +158,9 @@
self.client.get_aggregate, -1)
@attr(type='gate')
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_remove_host(self):
# Add an host to the given aggregate and remove.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
@@ -168,9 +180,9 @@
self.assertNotIn(self.host, body['hosts'])
@attr(type='gate')
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_host_list(self):
# Add an host to the given aggregate and list.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
@@ -186,9 +198,9 @@
self.assertIn(self.host, agg['hosts'])
@attr(type='gate')
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_host_get_details(self):
# Add an host to the given aggregate and get details.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
@@ -201,9 +213,9 @@
self.assertIn(self.host, body['hosts'])
@attr(type='gate')
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_host_create_server_with_az(self):
# Add an host to the given aggregate and create a server.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
az_name = rand_name(self.az_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name, az_name)
@@ -248,9 +260,9 @@
aggregate['id'], self.host)
@attr(type=['negative', 'gate'])
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_remove_host_as_user(self):
# Regular user is not allowed to remove a host from an aggregate.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
diff --git a/tempest/api/compute/admin/test_availability_zone.py b/tempest/api/compute/admin/test_availability_zone.py
index 8a56b89..d1e1be6 100644
--- a/tempest/api/compute/admin/test_availability_zone.py
+++ b/tempest/api/compute/admin/test_availability_zone.py
@@ -51,7 +51,7 @@
@attr(type='gate')
def test_get_availability_zone_list_with_non_admin_user(self):
- # List of availability zone with non admin user
+ # List of availability zone with non-administrator user
resp, availability_zone = \
self.non_adm_client.get_availability_zone_list()
self.assertEqual(200, resp.status)
@@ -59,7 +59,8 @@
@attr(type=['negative', 'gate'])
def test_get_availability_zone_list_detail_with_non_admin_user(self):
- # List of availability zones and available services with non admin user
+ # List of availability zones and available services with
+ # non-administrator user
self.assertRaises(
exceptions.Unauthorized,
self.non_adm_client.get_availability_zone_list_detail)
diff --git a/tempest/api/compute/admin/test_fixed_ips.py b/tempest/api/compute/admin/test_fixed_ips.py
index 895f773..85b03e6 100644
--- a/tempest/api/compute/admin/test_fixed_ips.py
+++ b/tempest/api/compute/admin/test_fixed_ips.py
@@ -21,42 +21,29 @@
from tempest.test import attr
-class FixedIPsBase(base.BaseComputeAdminTest):
- _interface = 'json'
- ip = None
-
- @classmethod
- def setUpClass(cls):
- super(FixedIPsBase, cls).setUpClass()
- if cls.config.service_available.neutron:
- msg = ("%s skipped as neutron is available" % cls.__name__)
- raise cls.skipException(msg)
- # NOTE(maurosr): The idea here is: the server creation is just an
- # auxiliary element to the ip details or reservation, there was no way
- # (at least none in my mind) to get an valid and existing ip except
- # by creating a server and using its ip. So the intention is to create
- # fewer server possible (one) and use it to both: json and xml tests.
- # This decreased time to run both tests, in my test machine, from 53
- # secs to 29 (agains 23 secs when running only json tests)
- if cls.ip is None:
- cls.client = cls.os_adm.fixed_ips_client
- cls.non_admin_client = cls.fixed_ips_client
- resp, server = cls.create_server(wait_until='ACTIVE')
- resp, server = cls.servers_client.get_server(server['id'])
- for ip_set in server['addresses']:
- for ip in server['addresses'][ip_set]:
- if ip['OS-EXT-IPS:type'] == 'fixed':
- cls.ip = ip['addr']
- break
- if cls.ip:
- break
-
-
-class FixedIPsTestJson(FixedIPsBase):
+class FixedIPsTestJson(base.BaseComputeAdminTest):
_interface = 'json'
CONF = config.TempestConfig()
+ @classmethod
+ def setUpClass(cls):
+ super(FixedIPsTestJson, cls).setUpClass()
+ if cls.config.service_available.neutron:
+ msg = ("%s skipped as neutron is available" % cls.__name__)
+ raise cls.skipException(msg)
+ cls.client = cls.os_adm.fixed_ips_client
+ cls.non_admin_client = cls.fixed_ips_client
+ resp, server = cls.create_server(wait_until='ACTIVE')
+ resp, server = cls.servers_client.get_server(server['id'])
+ for ip_set in server['addresses']:
+ for ip in server['addresses'][ip_set]:
+ if ip['OS-EXT-IPS:type'] == 'fixed':
+ cls.ip = ip['addr']
+ break
+ if cls.ip:
+ break
+
@attr(type='gate')
def test_list_fixed_ip_details(self):
resp, fixed_ip = self.client.get_fixed_ip_details(self.ip)
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index f2f82b5..ace77a6 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -17,6 +17,8 @@
from tempest.api import compute
from tempest.api.compute import base
+from tempest.common.utils.data_utils import rand_int_id
+from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest.test import attr
@@ -39,12 +41,12 @@
raise cls.skipException(msg)
cls.client = cls.os_adm.flavors_client
- flavor_name = 'test_flavor2'
+ flavor_name = rand_name('test_flavor')
ram = 512
vcpus = 1
disk = 10
ephemeral = 10
- cls.new_flavor_id = 12345
+ cls.new_flavor_id = rand_int_id(start=1000)
swap = 1024
rxtx = 1
# Create a flavor so as to set/get/unset extra specs
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index acf0275..09d9bc0 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -73,6 +73,8 @@
cls.build_interval = cls.config.compute.build_interval
cls.build_timeout = cls.config.compute.build_timeout
cls.ssh_user = cls.config.compute.ssh_user
+ cls.image_ssh_user = cls.config.compute.image_ssh_user
+ cls.image_ssh_password = cls.config.compute.image_ssh_password
cls.image_ref = cls.config.compute.image_ref
cls.image_ref_alt = cls.config.compute.image_ref_alt
cls.flavor_ref = cls.config.compute.flavor_ref
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index 51ce20c..c3ba671 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -52,7 +52,7 @@
@attr(type=['negative', 'gate'])
def test_get_non_existant_flavor(self):
- # flavor details are not returned for non existant flavors
+ # flavor details are not returned for non-existent flavors
self.assertRaises(exceptions.NotFound, self.client.get_flavor_details,
999)
@@ -150,7 +150,7 @@
@attr(type=['negative', 'gate'])
def test_get_flavor_details_for_invalid_flavor_id(self):
- # Ensure 404 returned for non-existant flavor ID
+ # Ensure 404 returned for non-existent flavor ID
self.assertRaises(exceptions.NotFound, self.client.get_flavor_details,
9999)
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips.py b/tempest/api/compute/floating_ips/test_list_floating_ips.py
index e380334..f5baa3c 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips.py
@@ -80,12 +80,12 @@
@attr(type=['negative', 'gate'])
def test_get_nonexistant_floating_ip_details(self):
# Negative test:Should not be able to GET the details
- # of nonexistant floating IP
+ # of non-existent floating IP
floating_ip_id = []
resp, body = self.client.list_floating_ips()
for i in range(len(body)):
floating_ip_id.append(body[i]['id'])
- # Creating a nonexistant floatingIP id
+ # Creating a non-existent floatingIP id
while True:
non_exist_id = rand_name('999')
if non_exist_id not in floating_ip_id:
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index 52239cd..a769744 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -120,20 +120,20 @@
@attr(type=['negative', 'gate'])
def test_update_nonexistant_image_metadata(self):
- # Negative test:An update should not happen for a nonexistant image
+ # Negative test:An update should not happen for a non-existent image
meta = {'key1': 'alt1', 'key2': 'alt2'}
self.assertRaises(exceptions.NotFound,
self.client.update_image_metadata, 999, meta)
@attr(type=['negative', 'gate'])
def test_get_nonexistant_image_metadata_item(self):
- # Negative test: Get on nonexistant image should not happen
+ # Negative test: Get on non-existent image should not happen
self.assertRaises(exceptions.NotFound,
self.client.get_image_metadata_item, 999, 'key2')
@attr(type=['negative', 'gate'])
def test_set_nonexistant_image_metadata(self):
- # Negative test: Metadata should not be set to a nonexistant image
+ # Negative test: Metadata should not be set to a non-existent image
meta = {'key1': 'alt1', 'key2': 'alt2'}
self.assertRaises(exceptions.NotFound, self.client.set_image_metadata,
999, meta)
@@ -149,8 +149,8 @@
@attr(type=['negative', 'gate'])
def test_delete_nonexistant_image_metadata_item(self):
- # Negative test: Shouldnt be able to delete metadata
- # item from nonexistant image
+ # Negative test: Shouldn't be able to delete metadata
+ # item from non-existent image
self.assertRaises(exceptions.NotFound,
self.client.delete_image_metadata_item, 999, 'key1')
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index a80f456..e700278 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -230,7 +230,7 @@
@attr(type=['negative', 'gate'])
def test_get_nonexistant_image(self):
- # Negative test: GET on non existant image should fail
+ # Negative test: GET on non-existent image should fail
self.assertRaises(exceptions.NotFound, self.client.get_image, 999)
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index 083fbd7..78c547a 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -157,7 +157,7 @@
k_name = rand_name('keypair-')
resp, _ = self.client.create_keypair(k_name)
self.assertEqual(200, resp.status)
- # Now try the same keyname to ceate another key
+ # Now try the same keyname to create another key
self.assertRaises(exceptions.Duplicate, self.client.create_keypair,
k_name)
resp, _ = self.client.delete_keypair(k_name)
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 472b8b4..6071e54 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -35,7 +35,7 @@
@attr(type='gate')
def test_security_group_rules_create(self):
# Positive test: Creation of Security Group rule
- # should be successfull
+ # should be successful
# Creating a Security Group to add rules to it
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
@@ -59,7 +59,7 @@
def test_security_group_rules_create_with_optional_arguments(self):
# Positive test: Creation of Security Group rule
# with optional arguments
- # should be successfull
+ # should be successful
secgroup1 = None
secgroup2 = None
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index 30db206..3e459a2 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -112,12 +112,12 @@
@attr(type=['negative', 'gate'])
def test_security_group_get_nonexistant_group(self):
# Negative test:Should not be able to GET the details
- # of nonexistant Security Group
+ # of non-existent Security Group
security_group_id = []
resp, body = self.client.list_security_groups()
for i in range(len(body)):
security_group_id.append(body[i]['id'])
- # Creating a nonexistant Security Group id
+ # Creating a non-existent Security Group id
while True:
non_exist_id = rand_name('999')
if non_exist_id not in security_group_id:
@@ -201,12 +201,12 @@
"Skipped until the Bug #1182384 is resolved")
@attr(type=['negative', 'gate'])
def test_delete_nonexistant_security_group(self):
- # Negative test:Deletion of a nonexistant Security Group should Fail
+ # Negative test:Deletion of a non-existent Security Group should Fail
security_group_id = []
resp, body = self.client.list_security_groups()
for i in range(len(body)):
security_group_id.append(body[i]['id'])
- # Creating Non Existant Security Group
+ # Creating non-existent Security Group
while True:
non_exist_id = rand_name('999')
if non_exist_id not in security_group_id:
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index 45de0d6..9997b97 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -115,7 +115,7 @@
@attr(type='gate')
def test_get_server_metadata_item(self):
- # The value for a specic metadata key should be returned
+ # The value for a specific metadata key should be returned
resp, meta = self.client.get_server_metadata_item(self.server_id,
'key2')
self.assertTrue('value2', meta['key2'])
@@ -148,13 +148,13 @@
@attr(type=['negative', 'gate'])
def test_get_nonexistant_server_metadata_item(self):
- # Negative test: GET on nonexistant server should not succeed
+ # Negative test: GET on a non-existent server should not succeed
self.assertRaises(exceptions.NotFound,
self.client.get_server_metadata_item, 999, 'test2')
@attr(type=['negative', 'gate'])
def test_list_nonexistant_server_metadata(self):
- # Negative test:List metadata on a non existant server should
+ # Negative test:List metadata on a non-existent server should
# not succeed
self.assertRaises(exceptions.NotFound,
self.client.list_server_metadata, 999)
@@ -171,7 +171,7 @@
@attr(type=['negative', 'gate'])
def test_set_nonexistant_server_metadata(self):
- # Negative test: Set metadata on a non existant server should not
+ # Negative test: Set metadata on a non-existent server should not
# succeed
meta = {'meta1': 'data1'}
self.assertRaises(exceptions.NotFound,
@@ -179,7 +179,7 @@
@attr(type=['negative', 'gate'])
def test_update_nonexistant_server_metadata(self):
- # Negative test: An update should not happen for a nonexistant image
+ # Negative test: An update should not happen for a non-existent image
meta = {'key1': 'value1', 'key2': 'value2'}
self.assertRaises(exceptions.NotFound,
self.client.update_server_metadata, 999, meta)
@@ -195,7 +195,7 @@
@attr(type=['negative', 'gate'])
def test_delete_nonexistant_server_metadata_item(self):
# Negative test: Should not be able to delete metadata item from a
- # nonexistant server
+ # non-existent server
# Delete the metadata item
self.assertRaises(exceptions.NotFound,
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index e09a23f..226c40e 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -154,7 +154,7 @@
@attr(type=['negative', 'gate'])
def test_create_with_non_existant_keypair(self):
- # Pass a non existant keypair while creating a server
+ # Pass a non-existent keypair while creating a server
key_name = rand_name('key')
self.assertRaises(exceptions.BadRequest,
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index b67a5e0..ee1ad9e 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -36,7 +36,7 @@
@classmethod
def setUpClass(cls):
super(AttachVolumeTestJSON, cls).setUpClass()
- cls.device = 'vdb'
+ cls.device = cls.config.compute.volume_device_name
if not cls.config.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@@ -54,7 +54,7 @@
def _create_and_attach(self):
# Start a server and wait for it to become ready
resp, server = self.create_server(wait_until='ACTIVE',
- adminPass='password')
+ adminPass=self.image_ssh_password)
self.server = server
# Record addresses so that we can ssh later
@@ -92,7 +92,7 @@
self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
linux_client = RemoteClient(server,
- self.ssh_user, server['adminPass'])
+ self.image_ssh_user, server['adminPass'])
partitions = linux_client.get_partitions()
self.assertIn(self.device, partitions)
@@ -106,7 +106,7 @@
self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
linux_client = RemoteClient(server,
- self.ssh_user, server['adminPass'])
+ self.image_ssh_user, server['adminPass'])
partitions = linux_client.get_partitions()
self.assertNotIn(self.device, partitions)
diff --git a/tempest/api/identity/admin/test_roles.py b/tempest/api/identity/admin/test_roles.py
index cc112cc..ac145c6 100644
--- a/tempest/api/identity/admin/test_roles.py
+++ b/tempest/api/identity/admin/test_roles.py
@@ -56,7 +56,7 @@
@attr(type='gate')
def test_list_roles_by_unauthorized_user(self):
- # Non admin user should not be able to list roles
+ # Non-administrator user should not be able to list roles
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.list_roles)
@@ -116,7 +116,8 @@
@attr(type='gate')
def test_assign_user_role_by_unauthorized_user(self):
- # Non admin user should not be authorized to assign a role to user
+ # Non-administrator user should not be authorized to
+ # assign a role to user
(user, tenant, role) = self._get_role_params()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.assign_user_role,
@@ -174,7 +175,8 @@
@attr(type='gate')
def test_remove_user_role_by_unauthorized_user(self):
- # Non admin user should not be authorized to remove a user's role
+ # Non-administrator user should not be authorized to
+ # remove a user's role
(user, tenant, role) = self._get_role_params()
resp, user_role = self.client.assign_user_role(tenant['id'],
user['id'],
@@ -237,7 +239,8 @@
@attr(type='gate')
def test_list_user_roles_by_unauthorized_user(self):
- # Non admin user should not be authorized to list a user's roles
+ # Non-administrator user should not be authorized to list
+ # a user's roles
(user, tenant, role) = self._get_role_params()
self.client.assign_user_role(tenant['id'], user['id'], role['id'])
self.assertRaises(exceptions.Unauthorized,
diff --git a/tempest/api/identity/admin/test_tenants.py b/tempest/api/identity/admin/test_tenants.py
index e8625db..a61a115 100644
--- a/tempest/api/identity/admin/test_tenants.py
+++ b/tempest/api/identity/admin/test_tenants.py
@@ -26,7 +26,7 @@
@attr(type='gate')
def test_list_tenants_by_unauthorized_user(self):
- # Non-admin user should not be able to list tenants
+ # Non-administrator user should not be able to list tenants
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.list_tenants)
@@ -63,7 +63,7 @@
@attr(type='gate')
def test_tenant_delete_by_unauthorized_user(self):
- # Non-admin user should not be able to delete a tenant
+ # Non-administrator user should not be able to delete a tenant
tenant_name = rand_name('tenant-')
resp, tenant = self.client.create_tenant(tenant_name)
self.data.tenants.append(tenant)
@@ -164,7 +164,7 @@
@attr(type='gate')
def test_create_tenant_by_unauthorized_user(self):
- # Non-admin user should not be authorized to create a tenant
+ # Non-administrator user should not be authorized to create a tenant
tenant_name = rand_name('tenant-')
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.create_tenant, tenant_name)
diff --git a/tempest/api/identity/admin/test_users.py b/tempest/api/identity/admin/test_users.py
index 4cfeb45..3455b5d 100644
--- a/tempest/api/identity/admin/test_users.py
+++ b/tempest/api/identity/admin/test_users.py
@@ -48,7 +48,7 @@
@attr(type=['negative', 'gate'])
def test_create_user_by_unauthorized_user(self):
- # Non-admin should not be authorized to create a user
+ # Non-administrator should not be authorized to create a user
self.data.setup_test_tenant()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.create_user, self.alt_user,
@@ -115,7 +115,7 @@
@attr(type=['negative', 'gate'])
def test_delete_users_by_unauthorized_user(self):
- # Non admin user should not be authorized to delete a user
+ # Non-administrator user should not be authorized to delete a user
self.data.setup_test_user()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.delete_user,
@@ -213,7 +213,7 @@
@attr(type=['negative', 'gate'])
def test_get_users_by_unauthorized_user(self):
- # Non admin user should not be authorized to get user list
+ # Non-administrator user should not be authorized to get user list
self.data.setup_test_user()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.get_users)
@@ -301,7 +301,7 @@
@attr(type=['negative', 'gate'])
def test_list_users_with_invalid_tenant(self):
# Should not be able to return a list of all
- # users for a nonexistant tenant
+ # users for a non-existent tenant
# Assign invalid tenant ids
invalid_id = list()
invalid_id.append(rand_name('999'))
diff --git a/tempest/api/identity/admin/v3/test_credentials.py b/tempest/api/identity/admin/v3/test_credentials.py
new file mode 100644
index 0000000..efd2f83
--- /dev/null
+++ b/tempest/api/identity/admin/v3/test_credentials.py
@@ -0,0 +1,120 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+class CredentialsTestJSON(base.BaseIdentityAdminTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(CredentialsTestJSON, cls).setUpClass()
+ cls.projects = list()
+ cls.creds_list = [['project_id', 'user_id', 'id'],
+ ['access', 'secret']]
+ u_name = rand_name('user-')
+ u_desc = '%s description' % u_name
+ u_email = '%s@testmail.tm' % u_name
+ u_password = rand_name('pass-')
+ for i in range(2):
+ resp, cls.project = cls.v3_client.create_project(
+ rand_name('project-'), description=rand_name('project-desc-'))
+ assert resp['status'] == '201', "Expected %s" % resp['status']
+ cls.projects.append(cls.project['id'])
+
+ resp, cls.user_body = cls.v3_client.create_user(
+ u_name, description=u_desc, password=u_password,
+ email=u_email, project_id=cls.projects[0])
+ assert resp['status'] == '201', "Expected: %s" % resp['status']
+
+ @classmethod
+ def tearDownClass(cls):
+ resp, _ = cls.v3_client.delete_user(cls.user_body['id'])
+ assert resp['status'] == '204', "Expected: %s" % resp['status']
+ for p in cls.projects:
+ resp, _ = cls.v3_client.delete_project(p)
+ assert resp['status'] == '204', "Expected: %s" % resp['status']
+ super(CredentialsTestJSON, cls).tearDownClass()
+
+ def _delete_credential(self, cred_id):
+ resp, body = self.creds_client.delete_credential(cred_id)
+ self.assertEqual(resp['status'], '204')
+
+ @attr(type='smoke')
+ def test_credentials_create_get_update_delete(self):
+ keys = [rand_name('Access-'), rand_name('Secret-')]
+ resp, cred = self.creds_client.create_credential(
+ keys[0], keys[1], self.user_body['id'],
+ self.projects[0])
+ self.addCleanup(self._delete_credential, cred['id'])
+ self.assertEqual(resp['status'], '201')
+ for value1 in self.creds_list[0]:
+ self.assertIn(value1, cred)
+ for value2 in self.creds_list[1]:
+ self.assertIn(value2, cred['blob'])
+
+ new_keys = [rand_name('NewAccess-'), rand_name('NewSecret-')]
+ resp, update_body = self.creds_client.update_credential(
+ cred['id'], access_key=new_keys[0], secret_key=new_keys[1],
+ project_id=self.projects[1])
+ self.assertEqual(resp['status'], '200')
+ self.assertEqual(cred['id'], update_body['id'])
+ self.assertEqual(self.projects[1], update_body['project_id'])
+ self.assertEqual(self.user_body['id'], update_body['user_id'])
+ self.assertEqual(update_body['blob']['access'], new_keys[0])
+ self.assertEqual(update_body['blob']['secret'], new_keys[1])
+
+ resp, get_body = self.creds_client.get_credential(cred['id'])
+ self.assertEqual(resp['status'], '200')
+ for value1 in self.creds_list[0]:
+ self.assertEqual(update_body[value1],
+ get_body[value1])
+ for value2 in self.creds_list[1]:
+ self.assertEqual(update_body['blob'][value2],
+ get_body['blob'][value2])
+
+ @attr(type='smoke')
+ def test_credentials_list_delete(self):
+ created_cred_ids = list()
+ fetched_cred_ids = list()
+
+ for i in range(2):
+ resp, cred = self.creds_client.create_credential(
+ rand_name('Access-'), rand_name('Secret-'),
+ self.user_body['id'], self.projects[0])
+ self.assertEqual(resp['status'], '201')
+ created_cred_ids.append(cred['id'])
+ self.addCleanup(self._delete_credential, cred['id'])
+
+ resp, creds = self.creds_client.list_credentials()
+ self.assertEqual(resp['status'], '200')
+
+ for i in creds:
+ fetched_cred_ids.append(i['id'])
+ missing_creds = [c for c in created_cred_ids
+ if c not in fetched_cred_ids]
+ self.assertEqual(0, len(missing_creds),
+ "Failed to find cred %s in fetched list" %
+ ', '.join(m_cred for m_cred
+ in missing_creds))
+
+
+class CredentialsTestXML(CredentialsTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 9136934..2fbef77 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -25,7 +25,7 @@
_interface = 'json'
def _delete_domain(self, domain_id):
- # It is necessary to disable the domian before deleting,
+ # It is necessary to disable the domain before deleting,
# or else it would result in unauthorized error
_, body = self.v3_client.update_domain(domain_id, enabled=False)
resp, _ = self.v3_client.delete_domain(domain_id)
@@ -39,7 +39,7 @@
for _ in range(3):
_, domain = self.v3_client.create_domain(
rand_name('domain-'), description=rand_name('domain-desc-'))
- # Delete the domian at the end of this method
+ # Delete the domain at the end of this method
self.addCleanup(self._delete_domain, domain['id'])
domain_ids.append(domain['id'])
# List and Verify Domains
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 9d143ed..02a6f5b 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -59,7 +59,7 @@
def test_list_endpoints(self):
# Get a list of endpoints
resp, fetched_endpoints = self.client.list_endpoints()
- # Asserting LIST Endpoint
+ # Asserting LIST endpoints
self.assertEqual(resp['status'], '200')
missing_endpoints =\
[e for e in self.setup_endpoints if e not in fetched_endpoints]
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 980323a..a238c46 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -54,7 +54,7 @@
resp[1], _ = cls.v3_client.delete_group(cls.group_body['id'])
resp[2], _ = cls.v3_client.delete_user(cls.user_body['id'])
resp[3], _ = cls.v3_client.delete_project(cls.project['id'])
- # NOTE(harika-vakadi): It is necessary to disable the domian
+ # NOTE(harika-vakadi): It is necessary to disable the domain
# before deleting,or else it would result in unauthorized error
cls.v3_client.update_domain(cls.domain['id'], enabled=False)
resp[4], _ = cls.v3_client.delete_domain(cls.domain['id'])
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index bfb5372..2a168de 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -34,6 +34,7 @@
cls.service_client = os.service_client
cls.policy_client = os.policy_client
cls.v3_token = os.token_v3_client
+ cls.creds_client = os.credentials_client
if not cls.client.has_admin_extensions():
raise cls.skipException("Admin extensions disabled")
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 19c5f84..3ae718c 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -56,9 +56,15 @@
cls.networks = []
cls.subnets = []
cls.ports = []
+ cls.pools = []
+ cls.vips = []
@classmethod
def tearDownClass(cls):
+ for vip in cls.vips:
+ cls.client.delete_vip(vip['id'])
+ for pool in cls.pools:
+ cls.client.delete_pool(pool['id'])
for port in cls.ports:
cls.client.delete_port(port['id'])
for subnet in cls.subnets:
@@ -111,3 +117,21 @@
port = body['port']
cls.ports.append(port)
return port
+
+ @classmethod
+ def create_pool(cls, name, lb_method, protocol, subnet):
+ """Wrapper utility that returns a test pool."""
+ resp, body = cls.client.create_pool(name, lb_method, protocol,
+ subnet['id'])
+ pool = body['pool']
+ cls.pools.append(pool)
+ return pool
+
+ @classmethod
+ def create_vip(cls, name, protocol, protocol_port, subnet, pool):
+ """Wrapper utility that returns a test vip."""
+ resp, body = cls.client.create_vip(name, protocol, protocol_port,
+ subnet['id'], pool['id'])
+ vip = body['vip']
+ cls.vips.append(vip)
+ return vip
diff --git a/tempest/api/network/test_load_balancer.py b/tempest/api/network/test_load_balancer.py
new file mode 100644
index 0000000..3880f4f
--- /dev/null
+++ b/tempest/api/network/test_load_balancer.py
@@ -0,0 +1,106 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+class LoadBalancerJSON(base.BaseNetworkTest):
+ _interface = 'json'
+
+ """
+ Tests the following operations in the Neutron API using the REST client for
+ Neutron:
+
+ create vIP, and Pool
+ show vIP
+ list vIP
+ update vIP
+ delete vIP
+ update pool
+ delete pool
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(LoadBalancerJSON, cls).setUpClass()
+ cls.network = cls.create_network()
+ cls.name = cls.network['name']
+ cls.subnet = cls.create_subnet(cls.network)
+ pool_name = rand_name('pool-')
+ vip_name = rand_name('vip-')
+ cls.pool = cls.create_pool(pool_name, "ROUND_ROBIN",
+ "HTTP", cls.subnet)
+ cls.vip = cls.create_vip(vip_name, "HTTP", 80, cls.subnet, cls.pool)
+
+ @attr(type='smoke')
+ def test_list_vips(self):
+ # Verify the vIP exists in the list of all vIPs
+ resp, body = self.client.list_vips()
+ self.assertEqual('200', resp['status'])
+ vips = body['vips']
+ found = None
+ for n in vips:
+ if (n['id'] == self.vip['id']):
+ found = n['id']
+ msg = "vIPs list doesn't contain created vip"
+ self.assertIsNotNone(found, msg)
+
+ def test_create_update_delete_pool_vip(self):
+ # Creates a vip
+ name = rand_name('vip-')
+ resp, body = self.client.create_pool(rand_name("pool-"),
+ "ROUND_ROBIN", "HTTP",
+ self.subnet['id'])
+ pool = body['pool']
+ resp, body = self.client.create_vip(name, "HTTP", 80,
+ self.subnet['id'], pool['id'])
+ self.assertEqual('201', resp['status'])
+ vip = body['vip']
+ vip_id = vip['id']
+ # Verification of vip update
+ new_name = "New_vip"
+ resp, body = self.client.update_vip(vip_id, new_name)
+ self.assertEqual('200', resp['status'])
+ updated_vip = body['vip']
+ self.assertEqual(updated_vip['name'], new_name)
+ # Verification of vip delete
+ resp, body = self.client.delete_vip(vip['id'])
+ self.assertEqual('204', resp['status'])
+ # Verification of pool update
+ new_name = "New_pool"
+ resp, body = self.client.update_pool(pool['id'], new_name)
+ self.assertEqual('200', resp['status'])
+ updated_pool = body['pool']
+ self.assertEqual(updated_pool['name'], new_name)
+ # Verification of pool delete
+ resp, body = self.client.delete_pool(pool['id'])
+ self.assertEqual('204', resp['status'])
+
+ @attr(type='smoke')
+ def test_show_vip(self):
+ # Verifies the details of a vip
+ resp, body = self.client.show_vip(self.vip['id'])
+ self.assertEqual('200', resp['status'])
+ vip = body['vip']
+ self.assertEqual(self.vip['id'], vip['id'])
+ self.assertEqual(self.vip['name'], vip['name'])
+
+
+class LoadBalancerXML(LoadBalancerJSON):
+ _interface = 'xml'
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index f3d1485..a2b4ab3 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -63,18 +63,6 @@
cls.cidr = cls.subnet['cidr']
cls.port = cls.create_port(cls.network)
- def _delete_networks(self, created_networks):
- for n in created_networks:
- resp, body = self.client.delete_network(n['id'])
- self.assertEqual(204, resp.status)
- # Asserting that the networks are not found in the list after deletion
- resp, body = self.client.list_networks()
- networks_list = list()
- for network in body['networks']:
- networks_list.append(network['id'])
- for n in created_networks:
- self.assertNotIn(n['id'], networks_list)
-
@attr(type='smoke')
def test_create_update_delete_network_subnet(self):
# Creates a network
@@ -208,6 +196,81 @@
self.assertRaises(exceptions.NotFound, self.client.show_subnet,
non_exist_id)
+ @attr(type=['negative', 'smoke'])
+ def test_show_non_existent_port(self):
+ non_exist_id = rand_name('port')
+ self.assertRaises(exceptions.NotFound, self.client.show_port,
+ non_exist_id)
+
+
+class NetworksTestXML(NetworksTestJSON):
+ _interface = 'xml'
+
+
+class BulkNetworkOpsJSON(base.BaseNetworkTest):
+ _interface = 'json'
+
+ """
+ Tests the following operations in the Neutron API using the REST client for
+ Neutron:
+
+ bulk network creation
+ bulk subnet creation
+ bulk subnet creation
+ list tenant's networks
+
+ v2.0 of the Neutron API is assumed. It is also assumed that the following
+ options are defined in the [network] section of etc/tempest.conf:
+
+ tenant_network_cidr with a block of cidr's from which smaller blocks
+ can be allocated for tenant networks
+
+ tenant_network_mask_bits with the mask bits to be used to partition the
+ block defined by tenant-network_cidr
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(BulkNetworkOpsJSON, cls).setUpClass()
+ cls.network1 = cls.create_network()
+ cls.network2 = cls.create_network()
+
+ def _delete_networks(self, created_networks):
+ for n in created_networks:
+ resp, body = self.client.delete_network(n['id'])
+ self.assertEqual(204, resp.status)
+ # Asserting that the networks are not found in the list after deletion
+ resp, body = self.client.list_networks()
+ networks_list = list()
+ for network in body['networks']:
+ networks_list.append(network['id'])
+ for n in created_networks:
+ self.assertNotIn(n['id'], networks_list)
+
+ def _delete_subnets(self, created_subnets):
+ for n in created_subnets:
+ resp, body = self.client.delete_subnet(n['id'])
+ self.assertEqual(204, resp.status)
+ # Asserting that the subnets are not found in the list after deletion
+ resp, body = self.client.list_subnets()
+ subnets_list = list()
+ for subnet in body['subnets']:
+ subnets_list.append(subnet['id'])
+ for n in created_subnets:
+ self.assertNotIn(n['id'], subnets_list)
+
+ def _delete_ports(self, created_ports):
+ for n in created_ports:
+ resp, body = self.client.delete_port(n['id'])
+ self.assertEqual(204, resp.status)
+ # Asserting that the ports are not found in the list after deletion
+ resp, body = self.client.list_ports()
+ ports_list = list()
+ for port in body['ports']:
+ ports_list.append(port['id'])
+ for n in created_ports:
+ self.assertNotIn(n['id'], ports_list)
+
@attr(type='smoke')
def test_bulk_create_delete_network(self):
# Creates 2 networks in one request
@@ -225,6 +288,74 @@
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], networks_list)
+ @attr(type='smoke')
+ def test_bulk_create_delete_subnet(self):
+ # Creates 2 subnets in one request
+ cidr = netaddr.IPNetwork(self.network_cfg.tenant_network_cidr)
+ mask_bits = self.network_cfg.tenant_network_mask_bits
+ cidrs = []
+ for subnet_cidr in cidr.subnet(mask_bits):
+ cidrs.append(subnet_cidr)
+ names = []
+ networks = [self.network1['id'], self.network2['id']]
+ for i in range(len(networks)):
+ names.append(rand_name('subnet-'))
+ subnet_list = []
+ # TODO(raies): "for IPv6, version list [4, 6] will be used.
+ # and cidr for IPv6 will be of IPv6"
+ ip_version = [4, 4]
+ for i in range(len(names)):
+ p1 = {
+ 'network_id': networks[i],
+ 'cidr': str(cidrs[(i)]),
+ 'name': names[i],
+ 'ip_version': ip_version[i]
+ }
+ subnet_list.append(p1)
+ del subnet_list[1]['name']
+ resp, body = self.client.create_bulk_subnet(subnet_list)
+ created_subnets = body['subnets']
+ self.addCleanup(self._delete_subnets, created_subnets)
+ self.assertEqual('201', resp['status'])
+ # Asserting that the subnets are found in the list after creation
+ resp, body = self.client.list_subnets()
+ subnets_list = list()
+ for subnet in body['subnets']:
+ subnets_list.append(subnet['id'])
+ for n in created_subnets:
+ self.assertIsNotNone(n['id'])
+ self.assertIn(n['id'], subnets_list)
-class NetworksTestXML(NetworksTestJSON):
+ @attr(type='smoke')
+ def test_bulk_create_delete_port(self):
+ # Creates 2 ports in one request
+ names = []
+ networks = [self.network1['id'], self.network2['id']]
+ for i in range(len(networks)):
+ names.append(rand_name('port-'))
+ port_list = []
+ state = [True, False]
+ for i in range(len(names)):
+ p1 = {
+ 'network_id': networks[i],
+ 'name': names[i],
+ 'admin_state_up': state[i],
+ }
+ port_list.append(p1)
+ del port_list[1]['name']
+ resp, body = self.client.create_bulk_port(port_list)
+ created_ports = body['ports']
+ self.addCleanup(self._delete_ports, created_ports)
+ self.assertEqual('201', resp['status'])
+ # Asserting that the ports are found in the list after creation
+ resp, body = self.client.list_ports()
+ ports_list = list()
+ for port in body['ports']:
+ ports_list.append(port['id'])
+ for n in created_ports:
+ self.assertIsNotNone(n['id'])
+ self.assertIn(n['id'], ports_list)
+
+
+class BulkNetworkOpsXML(BulkNetworkOpsJSON):
_interface = 'xml'
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 9f8c742..8b939fe 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -97,7 +97,7 @@
name = rand_name('router-')
resp, create_body = self.client.create_router(name)
self.addCleanup(self.client.delete_router, create_body['router']['id'])
- # Add router interafce with subnet id
+ # Add router interface with subnet id
resp, interface = self.client.add_router_interface_with_subnet_id(
create_body['router']['id'], subnet['id'])
self.assertEqual('200', resp['status'])
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
new file mode 100644
index 0000000..60ca88a
--- /dev/null
+++ b/tempest/api/network/test_security_groups.py
@@ -0,0 +1,127 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+from tempest.test import attr
+
+
+class SecGroupTest(base.BaseNetworkTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(SecGroupTest, cls).setUpClass()
+
+ def _delete_security_group(self, secgroup_id):
+ resp, _ = self.client.delete_security_group(secgroup_id)
+ self.assertEqual(204, resp.status)
+ # Asserting that the security group is not found in the list
+ # after deletion
+ resp, list_body = self.client.list_security_groups()
+ self.assertEqual('200', resp['status'])
+ secgroup_list = list()
+ for secgroup in list_body['security_groups']:
+ secgroup_list.append(secgroup['id'])
+ self.assertNotIn(secgroup_id, secgroup_list)
+
+ def _delete_security_group_rule(self, rule_id):
+ resp, _ = self.client.delete_security_group_rule(rule_id)
+ self.assertEqual(204, resp.status)
+ # Asserting that the security group is not found in the list
+ # after deletion
+ resp, list_body = self.client.list_security_group_rules()
+ self.assertEqual('200', resp['status'])
+ rules_list = list()
+ for rule in list_body['security_group_rules']:
+ rules_list.append(rule['id'])
+ self.assertNotIn(rule_id, rules_list)
+
+ @attr(type='smoke')
+ def test_list_security_groups(self):
+ # Verify the that security group belonging to tenant exist in list
+ resp, body = self.client.list_security_groups()
+ self.assertEqual('200', resp['status'])
+ security_groups = body['security_groups']
+ found = None
+ for n in security_groups:
+ if (n['name'] == 'default'):
+ found = n['id']
+ msg = "Security-group list doesn't contain default security-group"
+ self.assertIsNotNone(found, msg)
+
+ @attr(type='smoke')
+ def test_create_show_delete_security_group_and_rule(self):
+ # Create a security group
+ name = rand_name('secgroup-')
+ resp, group_create_body = self.client.create_security_group(name)
+ self.assertEqual('201', resp['status'])
+ self.addCleanup(self._delete_security_group,
+ group_create_body['security_group']['id'])
+ self.assertEqual(group_create_body['security_group']['name'], name)
+
+ # Show details of the created security group
+ resp, show_body = self.client.show_security_group(
+ group_create_body['security_group']['id'])
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(show_body['security_group']['name'], name)
+
+ # List security groups and verify if created group is there in response
+ resp, list_body = self.client.list_security_groups()
+ self.assertEqual('200', resp['status'])
+ secgroup_list = list()
+ for secgroup in list_body['security_groups']:
+ secgroup_list.append(secgroup['id'])
+ self.assertIn(group_create_body['security_group']['id'], secgroup_list)
+ # No Update in security group
+ # Create rule
+ resp, rule_create_body = self.client.create_security_group_rule(
+ group_create_body['security_group']['id']
+ )
+ self.assertEqual('201', resp['status'])
+ self.addCleanup(self._delete_security_group_rule,
+ rule_create_body['security_group_rule']['id'])
+ # Show details of the created security rule
+ resp, show_rule_body = self.client.show_security_group_rule(
+ rule_create_body['security_group_rule']['id']
+ )
+ self.assertEqual('200', resp['status'])
+
+ # List rules and verify created rule is in response
+ resp, rule_list_body = self.client.list_security_group_rules()
+ self.assertEqual('200', resp['status'])
+ rule_list = [rule['id']
+ for rule in rule_list_body['security_group_rules']]
+ self.assertIn(rule_create_body['security_group_rule']['id'], rule_list)
+
+ @attr(type=['negative', 'smoke'])
+ def test_show_non_existent_security_group(self):
+ non_exist_id = rand_name('secgroup-')
+ self.assertRaises(exceptions.NotFound, self.client.show_security_group,
+ non_exist_id)
+
+ @attr(type=['negative', 'smoke'])
+ def test_show_non_existent_security_group_rule(self):
+ non_exist_id = rand_name('rule-')
+ self.assertRaises(exceptions.NotFound,
+ self.client.show_security_group_rule,
+ non_exist_id)
+
+
+class SecGroupTestXML(SecGroupTest):
+ _interface = 'xml'
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index e6e8d17..1d16b2f 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -73,6 +73,11 @@
cls.data = DataGenerator(cls.identity_admin_client)
@classmethod
+ def tearDownClass(cls):
+ cls.isolated_creds.clear_isolated_creds()
+ super(BaseObjectTest, cls).tearDownClass()
+
+ @classmethod
def _assign_member_role(cls):
primary_user = cls.isolated_creds.get_primary_user()
alt_user = cls.isolated_creds.get_alt_user()
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index c934020..174c82a 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -147,7 +147,7 @@
@attr(type='slow')
def test_created_network(self):
- """Verifies created netowrk."""
+ """Verifies created network."""
network_id = self.test_resources.get('Network')['physical_resource_id']
resp, body = self.network_client.show_network(network_id)
self.assertEqual('200', resp['status'])
diff --git a/tempest/api/orchestration/stacks/test_server_cfn_init.py b/tempest/api/orchestration/stacks/test_server_cfn_init.py
index ffe8def..41849d0 100644
--- a/tempest/api/orchestration/stacks/test_server_cfn_init.py
+++ b/tempest/api/orchestration/stacks/test_server_cfn_init.py
@@ -158,7 +158,7 @@
resp, body = self.client.get_resource(sid, rid)
self.assertEqual('CREATE_COMPLETE', body['resource_status'])
- # fetch the ip address from servers client, since we can't get it
+ # fetch the IP address from servers client, since we can't get it
# from the stack until stack create is complete
resp, server = self.servers_client.get_server(
body['physical_resource_id'])
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 960785d..ad80505 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -18,6 +18,7 @@
from tempest.api.volume.base import BaseVolumeTest
from tempest.common.utils.data_utils import rand_name
from tempest.test import attr
+from tempest.test import services
from tempest.test import stresstest
@@ -55,6 +56,7 @@
@stresstest(class_setup_per='process')
@attr(type='smoke')
+ @services('compute')
def test_attach_detach_volume_to_instance(self):
# Volume is attached and detached successfully from an instance
mountpoint = '/dev/vdc'
@@ -69,6 +71,7 @@
@stresstest(class_setup_per='process')
@attr(type='gate')
+ @services('compute')
def test_get_volume_attachment(self):
# Verify that a volume's attachment information is retrieved
mountpoint = '/dev/vdc'
@@ -93,13 +96,16 @@
self.assertEqual(self.volume['id'], attachment['volume_id'])
@attr(type='gate')
+ @services('image')
def test_volume_upload(self):
# NOTE(gfidente): the volume uploaded in Glance comes from setUpClass,
# it is shared with the other tests. After it is uploaded in Glance,
# there is no way to delete it from Cinder, so we delete it from Glance
# using the Glance image_client and from Cinder via tearDownClass.
image_name = rand_name('Image-')
- resp, body = self.client.upload_volume(self.volume['id'], image_name)
+ resp, body = self.client.upload_volume(self.volume['id'],
+ image_name,
+ self.config.volume.disk_format)
image_id = body["image_id"]
self.addCleanup(self.image_client.delete_image, image_id)
self.assertEqual(202, resp.status)
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 2e90f16..12b03b5 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -18,6 +18,7 @@
from tempest.api.volume import base
from tempest.common.utils.data_utils import rand_name
from tempest.test import attr
+from tempest.test import services
class VolumesGetTest(base.BaseVolumeTest):
@@ -68,6 +69,10 @@
fetched_volume['metadata'],
'The fetched Volume is different '
'from the created Volume')
+ if 'imageRef' in kwargs:
+ self.assertEqual(fetched_volume['bootable'], True)
+ if 'imageRef' not in kwargs:
+ self.assertEqual(fetched_volume['bootable'], False)
@attr(type='gate')
def test_volume_get_metadata_none(self):
@@ -93,6 +98,7 @@
self._volume_create_get_delete()
@attr(type='smoke')
+ @services('image')
def test_volume_create_get_delete_from_image(self):
self._volume_create_get_delete(imageRef=self.config.compute.image_ref)
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index 8c39e08..d9c9e48 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -17,8 +17,11 @@
from tempest.api.volume import base
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
from tempest.test import attr
+LOG = logging.getLogger(__name__)
+
class VolumesListTest(base.BaseVolumeTest):
@@ -27,7 +30,7 @@
ensure that the backing file for the volume group that Nova uses
has space for at least 3 1G volumes!
If you are running a Devstack environment, ensure that the
- VOLUME_BACKING_FILE_SIZE is atleast 4G in your localrc
+ VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
"""
_interface = 'json'
@@ -64,22 +67,17 @@
resp, volume = cls.client.get_volume(volume['id'])
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
- except Exception:
+ except Exception as exc:
+ LOG.exception(exc)
if cls.volume_list:
# We could not create all the volumes, though we were able
# to create *some* of the volumes. This is typically
# because the backing file size of the volume group is
- # too small. So, here, we clean up whatever we did manage
- # to create and raise a SkipTest
+ # too small.
for volid in cls.volume_id_list:
cls.client.delete_volume(volid)
cls.client.wait_for_resource_deletion(volid)
- msg = ("Failed to create ALL necessary volumes to run "
- "test. This typically means that the backing file "
- "size of the nova-volumes group is too small to "
- "create the 3 volumes needed by this test case")
- raise cls.skipException(msg)
- raise
+ raise exc
@classmethod
def tearDownClass(cls):
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index e2b15a4..014ab32 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -31,8 +31,8 @@
@attr(type='gate')
def test_volume_get_nonexistant_volume_id(self):
- # Should not be able to get a nonexistant volume
- # Creating a nonexistant volume id
+ # Should not be able to get a non-existent volume
+ # Creating a non-existent volume id
volume_id_list = []
resp, volumes = self.client.list_volumes()
for i in range(len(volumes)):
@@ -41,14 +41,14 @@
non_exist_id = rand_name('999')
if non_exist_id not in volume_id_list:
break
- # Trying to Get a non existant volume
+ # Trying to Get a non-existent volume
self.assertRaises(exceptions.NotFound, self.client.get_volume,
non_exist_id)
@attr(type='gate')
def test_volume_delete_nonexistant_volume_id(self):
- # Should not be able to delete a nonexistant Volume
- # Creating nonexistant volume id
+ # Should not be able to delete a non-existent Volume
+ # Creating non-existent volume id
volume_id_list = []
resp, volumes = self.client.list_volumes()
for i in range(len(volumes)):
@@ -57,7 +57,7 @@
non_exist_id = '12345678-abcd-4321-abcd-123456789098'
if non_exist_id not in volume_id_list:
break
- # Try to Delete a non existant volume
+ # Try to delete a non-existent volume
self.assertRaises(exceptions.NotFound, self.client.delete_volume,
non_exist_id)
diff --git a/tempest/cli/output_parser.py b/tempest/cli/output_parser.py
index f22ec4e..bb3368f 100644
--- a/tempest/cli/output_parser.py
+++ b/tempest/cli/output_parser.py
@@ -158,7 +158,7 @@
def _table_columns(first_table_row):
"""Find column ranges in output line.
- Return list of touples (start,end) for each column
+ Return list of tuples (start,end) for each column
detected by plus (+) characters in delimiter line.
"""
positions = []
diff --git a/tempest/clients.py b/tempest/clients.py
index 48e4939..49b9283 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -71,6 +71,8 @@
VolumesExtensionsClientXML
from tempest.services.identity.json.identity_client import IdentityClientJSON
from tempest.services.identity.json.identity_client import TokenClientJSON
+from tempest.services.identity.v3.json.credentials_client import \
+ CredentialsClientJSON
from tempest.services.identity.v3.json.endpoints_client import \
EndPointClientJSON
from tempest.services.identity.v3.json.identity_client import \
@@ -79,6 +81,8 @@
from tempest.services.identity.v3.json.policy_client import PolicyClientJSON
from tempest.services.identity.v3.json.service_client import \
ServiceClientJSON
+from tempest.services.identity.v3.xml.credentials_client import \
+ CredentialsClientXML
from tempest.services.identity.v3.xml.endpoints_client import EndPointClientXML
from tempest.services.identity.v3.xml.identity_client import \
IdentityV3ClientXML
@@ -252,6 +256,11 @@
"xml": V3TokenClientXML,
}
+CREDENTIALS_CLIENT = {
+ "json": CredentialsClientJSON,
+ "xml": CredentialsClientXML,
+}
+
class Manager(object):
@@ -336,6 +345,8 @@
self.policy_client = POLICY_CLIENT[interface](*client_args)
self.hypervisor_client = HYPERVISOR_CLIENT[interface](*client_args)
self.token_v3_client = V3_TOKEN_CLIENT[interface](*client_args)
+ self.credentials_client = \
+ CREDENTIALS_CLIENT[interface](*client_args)
if client_args_v3_auth:
self.servers_client_v3_auth = SERVERS_CLIENTS[interface](
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index 22e1bd2..d6b4466 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -141,10 +141,11 @@
role = None
try:
roles = self._list_roles()
+ admin_role = self.config.identity.admin_role
if self.tempest_client:
- role = next(r for r in roles if r['name'] == 'admin')
+ role = next(r for r in roles if r['name'] == admin_role)
else:
- role = next(r for r in roles if r.name == 'admin')
+ role = next(r for r in roles if r.name == admin_role)
except StopIteration:
msg = "No admin role found"
raise exceptions.NotFound(msg)
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 2cbb74d..0d0e794 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -52,12 +52,12 @@
return self.ssh_client.test_connection_auth()
def hostname_equals_servername(self, expected_hostname):
- # Get hostname using command "hostname"
+ # Get host name using command "hostname"
actual_hostname = self.ssh_client.exec_command("hostname").rstrip()
return expected_hostname == actual_hostname
def get_files(self, path):
- # Return a list of comma seperated files
+ # Return a list of comma separated files
command = "ls -m " + path
return self.ssh_client.exec_command(command).rstrip('\n').split(', ')
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
new file mode 100644
index 0000000..15569cd
--- /dev/null
+++ b/tempest/common/waiters.py
@@ -0,0 +1,82 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import time
+
+from tempest import config
+from tempest import exceptions
+from tempest.openstack.common import log as logging
+
+CONFIG = config.TempestConfig()
+LOG = logging.getLogger(__name__)
+
+
+# NOTE(afazekas): This function needs to know a token and a subject.
+def wait_for_server_status(client, server_id, status, ready_wait=True):
+ """Waits for a server to reach a given status."""
+
+ def _get_task_state(body):
+ task_state = body.get('OS-EXT-STS:task_state', None)
+ return task_state
+
+ # NOTE(afazekas): UNKNOWN status possible on ERROR
+ # or in a very early stage.
+ resp, body = client.get_server(server_id)
+ old_status = server_status = body['status']
+ old_task_state = task_state = _get_task_state(body)
+ start_time = int(time.time())
+ while True:
+ # NOTE(afazekas): Now the BUILD status only reached
+ # between the UNKOWN->ACTIVE transition.
+ # TODO(afazekas): enumerate and validate the stable status set
+ if status == 'BUILD' and server_status != 'UNKNOWN':
+ return
+ if server_status == status:
+ if ready_wait:
+ if status == 'BUILD':
+ return
+ # NOTE(afazekas): The instance is in "ready for action state"
+ # when no task in progress
+ # NOTE(afazekas): Converted to string bacuse of the XML
+ # responses
+ if str(task_state) == "None":
+ # without state api extension 3 sec usually enough
+ time.sleep(CONFIG.compute.ready_wait)
+ return
+ else:
+ return
+
+ time.sleep(client.build_interval)
+ resp, body = client.get_server(server_id)
+ server_status = body['status']
+ task_state = _get_task_state(body)
+ if (server_status != old_status) or (task_state != old_task_state):
+ LOG.info('State transition "%s" ==> "%s" after %d second wait',
+ '/'.join((old_status, str(old_task_state))),
+ '/'.join((server_status, str(task_state))),
+ time.time() - start_time)
+ if server_status == 'ERROR':
+ raise exceptions.BuildErrorException(server_id=server_id)
+
+ timed_out = int(time.time()) - start_time >= client.build_timeout
+
+ if timed_out:
+ message = ('Server %s failed to reach %s status within the '
+ 'required time (%s s).' %
+ (server_id, status, client.build_timeout))
+ message += ' Current status: %s.' % server_status
+ raise exceptions.TimeoutException(message)
+ old_status = server_status
+ old_task_state = task_state
diff --git a/tempest/config.py b/tempest/config.py
index 3b09b5e..b386968 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -51,6 +51,9 @@
cfg.StrOpt('tenant_name',
default='demo',
help="Tenant name to use for Nova API requests."),
+ cfg.StrOpt('admin_role',
+ default='admin',
+ help="Role required to administrate keystone."),
cfg.StrOpt('password',
default='pass',
help="API key to use when authenticating.",
@@ -121,10 +124,17 @@
cfg.StrOpt('image_ssh_user',
default="root",
help="User name used to authenticate to an instance."),
+ cfg.StrOpt('image_ssh_password',
+ default="password",
+ help="Password used to authenticate to an instance."),
cfg.StrOpt('image_alt_ssh_user',
default="root",
help="User name used to authenticate to an instance using "
"the alternate image."),
+ cfg.StrOpt('image_alt_ssh_password',
+ default="password",
+ help="Password used to authenticate to an instance using "
+ "the alternate image."),
cfg.BoolOpt('resize_available',
default=False,
help="Does the test environment support resizing?"),
@@ -167,6 +177,10 @@
default=300,
help="Timeout in seconds to wait for authentication to "
"succeed."),
+ cfg.IntOpt('ready_wait',
+ default=0,
+ help="Additinal wait time for clean state, when there is"
+ " no OS-EXT-STS extension availiable"),
cfg.IntOpt('ssh_channel_timeout',
default=60,
help="Timeout in seconds to wait for output from ssh "
@@ -196,6 +210,10 @@
cfg.BoolOpt('flavor_extra_enabled',
default=True,
help="If false, skip flavor extra data test"),
+ cfg.StrOpt('volume_device_name',
+ default='vdb',
+ help="Expected device name when a volume is attached to "
+ "an instance")
]
@@ -227,36 +245,6 @@
for opt in ComputeAdminGroup:
conf.register_opt(opt, group='compute-admin')
-
-whitebox_group = cfg.OptGroup(name='whitebox',
- title="Whitebox Options")
-
-WhiteboxGroup = [
- cfg.BoolOpt('whitebox_enabled',
- default=False,
- help="Does the test environment support whitebox tests for "
- "Compute?"),
- cfg.StrOpt('db_uri',
- default=None,
- help="Connection string to the database of Compute service"),
- cfg.StrOpt('source_dir',
- default="/opt/stack/nova",
- help="Path of nova source directory"),
- cfg.StrOpt('config_path',
- default='/etc/nova/nova.conf',
- help="Path of nova configuration file"),
- cfg.StrOpt('bin_dir',
- default="/usr/local/bin/",
- help="Directory containing nova binaries such as nova-manage"),
-]
-
-
-def register_whitebox_opts(conf):
- conf.register_group(whitebox_group)
- for opt in WhiteboxGroup:
- conf.register_opt(opt, group='whitebox')
-
-
image_group = cfg.OptGroup(name='image',
title="Image Service Options")
@@ -342,6 +330,9 @@
cfg.StrOpt('vendor_name',
default='Open Source',
help='Backend vendor to target when creating volume types'),
+ cfg.StrOpt('disk_format',
+ default='raw',
+ help='Disk format to use when copying a volume to image'),
]
@@ -639,7 +630,6 @@
register_compute_opts(cfg.CONF)
register_identity_opts(cfg.CONF)
- register_whitebox_opts(cfg.CONF)
register_image_opts(cfg.CONF)
register_network_opts(cfg.CONF)
register_volume_opts(cfg.CONF)
@@ -652,7 +642,6 @@
register_scenario_opts(cfg.CONF)
register_service_available_opts(cfg.CONF)
self.compute = cfg.CONF.compute
- self.whitebox = cfg.CONF.whitebox
self.identity = cfg.CONF.identity
self.images = cfg.CONF.image
self.network = cfg.CONF.network
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 62bd8cf..924ebc9 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -65,6 +65,10 @@
message = 'Unauthorized'
+class InvalidServiceTag(RestClientException):
+ message = "Invalid service tag"
+
+
class TimeoutException(TempestException):
message = "Request timed out"
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 8cfd548..aa97211 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -22,6 +22,8 @@
SKIP_DECORATOR_RE = re.compile(r'\s*@testtools.skip\((.*)\)')
SKIP_STR_RE = re.compile(r'.*Bug #\d+.*')
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
+TEST_DEFINITION = re.compile(r'^\s*def test.*')
+SCENARIO_DECORATOR = re.compile(r'\s*@.*services\(')
def skip_bugs(physical_line):
@@ -53,6 +55,21 @@
" in tempest/api/* tests"))
+def scenario_tests_need_service_tags(physical_line, filename,
+ previous_logical):
+ """Check that scenario tests have service tags
+
+ T104: Scenario tests require a services decorator
+ """
+
+ if 'tempest/scenario' in filename:
+ if TEST_DEFINITION.match(physical_line):
+ if not SCENARIO_DECORATOR.match(previous_logical):
+ return (physical_line.find('def'),
+ "T104: Scenario tests require a service decorator")
+
+
def factory(register):
register(skip_bugs)
register(import_no_clients_in_api)
+ register(scenario_tests_need_service_tags)
diff --git a/tempest/openstack/common/excutils.py b/tempest/openstack/common/excutils.py
index 81aad14..db37660 100644
--- a/tempest/openstack/common/excutils.py
+++ b/tempest/openstack/common/excutils.py
@@ -24,6 +24,8 @@
import time
import traceback
+import six
+
from tempest.openstack.common.gettextutils import _ # noqa
@@ -65,7 +67,7 @@
self.tb))
return False
if self.reraise:
- raise self.type_, self.value, self.tb
+ six.reraise(self.type_, self.value, self.tb)
def forever_retry_uncaught_exceptions(infunc):
@@ -77,7 +79,8 @@
try:
return infunc(*args, **kwargs)
except Exception as exc:
- if exc.message == last_exc_message:
+ this_exc_message = unicode(exc)
+ if this_exc_message == last_exc_message:
exc_count += 1
else:
exc_count = 1
@@ -85,12 +88,12 @@
# the exception message changes
cur_time = int(time.time())
if (cur_time - last_log_time > 60 or
- exc.message != last_exc_message):
+ this_exc_message != last_exc_message):
logging.exception(
_('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
- last_exc_message = exc.message
+ last_exc_message = this_exc_message
exc_count = 0
# This should be a very rare event. In case it isn't, do
# a sleep.
diff --git a/tempest/openstack/common/fileutils.py b/tempest/openstack/common/fileutils.py
index d2e3d3e..6cf68ba 100644
--- a/tempest/openstack/common/fileutils.py
+++ b/tempest/openstack/common/fileutils.py
@@ -69,33 +69,34 @@
return (reloaded, cache_info['data'])
-def delete_if_exists(path):
+def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
:param path: File to delete
+ :param remove: Optional function to remove passed path
"""
try:
- os.unlink(path)
+ remove(path)
except OSError as e:
- if e.errno == errno.ENOENT:
- return
- else:
+ if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
-def remove_path_on_error(path):
+def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
+ :param remove: Optional function to remove passed path
"""
+
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
- delete_if_exists(path)
+ remove(path)
def file_open(*args, **kwargs):
diff --git a/tempest/openstack/common/gettextutils.py b/tempest/openstack/common/gettextutils.py
index 8594937..cbf570a 100644
--- a/tempest/openstack/common/gettextutils.py
+++ b/tempest/openstack/common/gettextutils.py
@@ -1,8 +1,8 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
-# All Rights Reserved.
# Copyright 2013 IBM Corp.
+# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -26,22 +26,44 @@
import copy
import gettext
-import logging.handlers
+import logging
import os
import re
-import UserString
+try:
+ import UserString as _userString
+except ImportError:
+ import collections as _userString
+from babel import localedata
import six
_localedir = os.environ.get('tempest'.upper() + '_LOCALEDIR')
_t = gettext.translation('tempest', localedir=_localedir, fallback=True)
+_AVAILABLE_LANGUAGES = {}
+USE_LAZY = False
+
+
+def enable_lazy():
+ """Convenience function for configuring _() to use lazy gettext
+
+ Call this at the start of execution to enable the gettextutils._
+ function to use lazy gettext functionality. This is useful if
+ your project is importing _ directly instead of using the
+ gettextutils.install() way of importing the _ function.
+ """
+ global USE_LAZY
+ USE_LAZY = True
+
def _(msg):
- return _t.ugettext(msg)
+ if USE_LAZY:
+ return Message(msg, 'tempest')
+ else:
+ return _t.ugettext(msg)
-def install(domain):
+def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
@@ -51,44 +73,48 @@
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
+
+ :param domain: the translation domain
+ :param lazy: indicates whether or not to install the lazy _() function.
+ The lazy _() introduces a way to do deferred translation
+ of messages by installing a _ that builds Message objects,
+ instead of strings, which can then be lazily translated into
+ any available locale.
"""
- gettext.install(domain,
- localedir=os.environ.get(domain.upper() + '_LOCALEDIR'),
- unicode=True)
+ if lazy:
+ # NOTE(mrodden): Lazy gettext functionality.
+ #
+ # The following introduces a deferred way to do translations on
+ # messages in OpenStack. We override the standard _() function
+ # and % (format string) operation to build Message objects that can
+ # later be translated when we have more information.
+ #
+ # Also included below is an example LocaleHandler that translates
+ # Messages to an associated locale, effectively allowing many logs,
+ # each with their own locale.
+
+ def _lazy_gettext(msg):
+ """Create and return a Message object.
+
+ Lazy gettext function for a given domain, it is a factory method
+ for a project/module to get a lazy gettext function for its own
+ translation domain (i.e. nova, glance, cinder, etc.)
+
+ Message encapsulates a string so that we can translate
+ it later when needed.
+ """
+ return Message(msg, domain)
+
+ import __builtin__
+ __builtin__.__dict__['_'] = _lazy_gettext
+ else:
+ localedir = '%s_LOCALEDIR' % domain.upper()
+ gettext.install(domain,
+ localedir=os.environ.get(localedir),
+ unicode=True)
-"""
-Lazy gettext functionality.
-
-The following is an attempt to introduce a deferred way
-to do translations on messages in OpenStack. We attempt to
-override the standard _() function and % (format string) operation
-to build Message objects that can later be translated when we have
-more information. Also included is an example LogHandler that
-translates Messages to an associated locale, effectively allowing
-many logs, each with their own locale.
-"""
-
-
-def get_lazy_gettext(domain):
- """Assemble and return a lazy gettext function for a given domain.
-
- Factory method for a project/module to get a lazy gettext function
- for its own translation domain (i.e. nova, glance, cinder, etc.)
- """
-
- def _lazy_gettext(msg):
- """Create and return a Message object.
-
- Message encapsulates a string so that we can translate it later when
- needed.
- """
- return Message(msg, domain)
-
- return _lazy_gettext
-
-
-class Message(UserString.UserString, object):
+class Message(_userString.UserString, object):
"""Class used to encapsulate translatable messages."""
def __init__(self, msg, domain):
# _msg is the gettext msgid and should never change
@@ -130,7 +156,7 @@
# look for %(blah) fields in string;
# ignore %% and deal with the
# case where % is first character on the line
- keys = re.findall('(?:[^%]|^)%\((\w*)\)[a-z]', full_msg)
+ keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg)
# if we don't find any %(blah) blocks but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
@@ -229,7 +255,47 @@
if name in ops:
return getattr(self.data, name)
else:
- return UserString.UserString.__getattribute__(self, name)
+ return _userString.UserString.__getattribute__(self, name)
+
+
+def get_available_languages(domain):
+ """Lists the available languages for the given translation domain.
+
+ :param domain: the domain to get languages for
+ """
+ if domain in _AVAILABLE_LANGUAGES:
+ return copy.copy(_AVAILABLE_LANGUAGES[domain])
+
+ localedir = '%s_LOCALEDIR' % domain.upper()
+ find = lambda x: gettext.find(domain,
+ localedir=os.environ.get(localedir),
+ languages=[x])
+
+ # NOTE(mrodden): en_US should always be available (and first in case
+ # order matters) since our in-line message strings are en_US
+ language_list = ['en_US']
+ # NOTE(luisg): Babel <1.0 used a function called list(), which was
+ # renamed to locale_identifiers() in >=1.0, the requirements master list
+ # requires >=0.9.6, uncapped, so defensively work with both. We can remove
+ # this check when the master list updates to >=1.0, and all projects udpate
+ list_identifiers = (getattr(localedata, 'list', None) or
+ getattr(localedata, 'locale_identifiers'))
+ locale_identifiers = list_identifiers()
+ for i in locale_identifiers:
+ if find(i) is not None:
+ language_list.append(i)
+ _AVAILABLE_LANGUAGES[domain] = language_list
+ return copy.copy(language_list)
+
+
+def get_localized_message(message, user_locale):
+ """Gets a localized version of the given message in the given locale."""
+ if isinstance(message, Message):
+ if user_locale:
+ message.locale = user_locale
+ return unicode(message)
+ else:
+ return message
class LocaleHandler(logging.Handler):
diff --git a/tempest/openstack/common/jsonutils.py b/tempest/openstack/common/jsonutils.py
index bd43e59..c568a06 100644
--- a/tempest/openstack/common/jsonutils.py
+++ b/tempest/openstack/common/jsonutils.py
@@ -38,14 +38,18 @@
import inspect
import itertools
import json
-import types
-import xmlrpclib
+try:
+ import xmlrpclib
+except ImportError:
+ # NOTE(jd): xmlrpclib is not shipped with Python 3
+ xmlrpclib = None
-import netaddr
import six
+from tempest.openstack.common import importutils
from tempest.openstack.common import timeutils
+netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
@@ -53,7 +57,8 @@
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
-_simple_types = (types.NoneType, int, basestring, bool, float, long)
+_simple_types = (six.string_types + six.integer_types
+ + (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
@@ -125,7 +130,7 @@
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
- if isinstance(value, xmlrpclib.DateTime):
+ if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
@@ -138,7 +143,7 @@
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
- elif isinstance(value, netaddr.IPAddress):
+ elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
diff --git a/tempest/openstack/common/local.py b/tempest/openstack/common/local.py
index f1bfc82..e82f17d 100644
--- a/tempest/openstack/common/local.py
+++ b/tempest/openstack/common/local.py
@@ -15,16 +15,15 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Greenthread local storage of variables using weak references"""
+"""Local storage of variables using weak references"""
+import threading
import weakref
-from eventlet import corolocal
-
-class WeakLocal(corolocal.local):
+class WeakLocal(threading.local):
def __getattribute__(self, attr):
- rval = corolocal.local.__getattribute__(self, attr)
+ rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
@@ -34,7 +33,7 @@
def __setattr__(self, attr, value):
value = weakref.ref(value)
- return corolocal.local.__setattr__(self, attr, value)
+ return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
@@ -45,4 +44,4 @@
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
-strong_store = corolocal.local
+strong_store = threading.local()
diff --git a/tempest/openstack/common/lockutils.py b/tempest/openstack/common/lockutils.py
index 3ff1a7a..0abd1a7 100644
--- a/tempest/openstack/common/lockutils.py
+++ b/tempest/openstack/common/lockutils.py
@@ -20,10 +20,10 @@
import errno
import functools
import os
+import threading
import time
import weakref
-from eventlet import semaphore
from oslo.config import cfg
from tempest.openstack.common import fileutils
@@ -137,7 +137,8 @@
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
- This function yields a `semaphore.Semaphore` instance unless external is
+ This function yields a `threading.Semaphore` instance (if we don't use
+ eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
@@ -155,7 +156,7 @@
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
- sem = _semaphores.get(name, semaphore.Semaphore())
+ sem = _semaphores.get(name, threading.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
diff --git a/tempest/openstack/common/timeutils.py b/tempest/openstack/common/timeutils.py
index bd60489..60f02bc 100644
--- a/tempest/openstack/common/timeutils.py
+++ b/tempest/openstack/common/timeutils.py
@@ -21,6 +21,7 @@
import calendar
import datetime
+import time
import iso8601
import six
@@ -49,9 +50,9 @@
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
- raise ValueError(e.message)
+ raise ValueError(unicode(e))
except TypeError as e:
- raise ValueError(e.message)
+ raise ValueError(unicode(e))
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
@@ -90,6 +91,11 @@
def utcnow_ts():
"""Timestamp version of our utcnow function."""
+ if utcnow.override_time is None:
+ # NOTE(kgriffs): This is several times faster
+ # than going through calendar.timegm(...)
+ return int(time.time())
+
return calendar.timegm(utcnow().timetuple())
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 7681f04..21c37b9 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
import os
import subprocess
@@ -28,6 +29,7 @@
from neutronclient.common import exceptions as exc
import neutronclient.v2_0.client
import novaclient.client
+from novaclient import exceptions as nova_exceptions
from tempest.api.network import common as net_common
from tempest.common import isolated_creds
@@ -36,11 +38,18 @@
from tempest.common.utils.linux.remote_client import RemoteClient
from tempest import exceptions
import tempest.manager
-from tempest.openstack.common import log as logging
+from tempest.openstack.common import log
import tempest.test
-LOG = logging.getLogger(__name__)
+LOG = log.getLogger(__name__)
+
+# NOTE(afazekas): Workaround for the stdout logging
+LOG_nova_client = logging.getLogger('novaclient.client')
+LOG_nova_client.addHandler(log.NullHandler())
+
+LOG_cinder_client = logging.getLogger('cinderclient.client')
+LOG_cinder_client.addHandler(log.NullHandler())
class OfficialClientManager(tempest.manager.Manager):
@@ -88,7 +97,8 @@
*client_args,
service_type=service_type,
no_cache=True,
- insecure=dscv)
+ insecure=dscv,
+ http_log_debug=True)
def _get_image_client(self):
token = self.identity_client.auth_token
@@ -104,7 +114,8 @@
username,
password,
tenant_name,
- auth_url)
+ auth_url,
+ http_log_debug=True)
def _get_orchestration_client(self, username=None, password=None,
tenant_name=None):
@@ -276,27 +287,57 @@
expected status to show. At any time, if the returned
status of the thing is ERROR, fail out.
"""
+ self._status_timeout(things, thing_id, expected_status=expected_status)
+
+ def delete_timeout(self, things, thing_id):
+ """
+ Given a thing, do a loop, sleeping
+ for a configurable amount of time, checking for the
+ deleted status to show. At any time, if the returned
+ status of the thing is ERROR, fail out.
+ """
+ self._status_timeout(things,
+ thing_id,
+ allow_notfound=True)
+
+ def _status_timeout(self,
+ things,
+ thing_id,
+ expected_status=None,
+ allow_notfound=False):
+
+ log_status = expected_status if expected_status else ''
+ if allow_notfound:
+ log_status += ' or NotFound' if log_status != '' else 'NotFound'
+
def check_status():
# python-novaclient has resources available to its client
# that all implement a get() method taking an identifier
# for the singular resource to retrieve.
- thing = things.get(thing_id)
+ try:
+ thing = things.get(thing_id)
+ except nova_exceptions.NotFound:
+ if allow_notfound:
+ return True
+ else:
+ raise
+
new_status = thing.status
if new_status == 'ERROR':
message = "%s failed to get to expected status. \
In ERROR state." % (thing)
raise exceptions.BuildErrorException(message)
- elif new_status == expected_status:
+ elif new_status == expected_status and expected_status is not None:
return True # All good.
LOG.debug("Waiting for %s to get to %s status. "
"Currently in %s status",
- thing, expected_status, new_status)
+ thing, log_status, new_status)
if not tempest.test.call_until_true(
check_status,
self.config.compute.build_timeout,
self.config.compute.build_interval):
message = "Timed out waiting for thing %s \
- to become %s" % (thing_id, expected_status)
+ to become %s" % (thing_id, log_status)
raise exceptions.TimeoutException(message)
def create_loginable_secgroup_rule(self, client=None, secgroup_id=None):
@@ -361,7 +402,7 @@
client = self.volume_client
if name is None:
name = rand_name('scenario-volume-')
- LOG.debug("Creating a volume (size :%s, name: %s)", size, name)
+ LOG.debug("Creating a volume (size: %s, name: %s)", size, name)
volume = client.volumes.create(size=size, display_name=name,
snapshot_id=snapshot_id,
imageRef=imageRef)
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
index 78025ee..1a4d802 100644
--- a/tempest/scenario/orchestration/test_autoscaling.py
+++ b/tempest/scenario/orchestration/test_autoscaling.py
@@ -12,10 +12,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import time
+
from tempest.scenario import manager
from tempest.test import attr
from tempest.test import call_until_true
-import time
+from tempest.test import services
class AutoScalingTest(manager.OrchestrationScenarioTest):
@@ -58,6 +60,7 @@
self.set_resource('stack', self.stack)
@attr(type='slow')
+ @services('orchestration', 'compute')
def test_scale_up_then_down(self):
self.assign_keypair()
@@ -82,7 +85,7 @@
def server_count():
# the number of servers is the number of resources
- # in the nexted stack
+ # in the nested stack
self.server_count = len(
self.client.resources.list(nested_stack_id))
return self.server_count
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index 9a45572..1081a3e 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -20,6 +20,7 @@
from lxml import html
from tempest.scenario import manager
+from tempest.test import services
class TestDashboardBasicOps(manager.OfficialClientTest):
@@ -66,6 +67,7 @@
response = self.opener.open(self.config.dashboard.dashboard_url)
self.assertIn('Overview', response.read())
+ @services('dashboard')
def test_basic_scenario(self):
self.check_login_page()
self.user_login()
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index 39b1e10..33b7adc 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -18,6 +18,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from tempest.scenario import manager
+from tempest.test import services
LOG = logging.getLogger(__name__)
@@ -96,6 +97,7 @@
self.addCleanup(delete, self.servers)
self._wait_for_server_status('ACTIVE')
+ @services('compute', 'image')
def test_large_ops_scenario(self):
if self.config.scenario.large_ops_number < 1:
return
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 5cddde2..ce4d1bd 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -18,6 +18,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from tempest.scenario import manager
+from tempest.test import services
LOG = logging.getLogger(__name__)
@@ -145,6 +146,7 @@
volume = self.volume_client.volumes.get(self.volume.id)
self.assertEqual('available', volume.status)
+ @services('compute', 'volume', 'image', 'network')
def test_minimum_basic_scenario(self):
self.glance_image_create()
self.nova_keypair_add()
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 70939f6..9d7086c 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -19,8 +19,12 @@
from tempest.api.network import common as net_common
from tempest.common.utils.data_utils import rand_name
from tempest import config
+from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest.test import attr
+from tempest.test import services
+
+LOG = logging.getLogger(__name__)
class TestNetworkBasicOps(manager.NetworkScenarioTest):
@@ -58,7 +62,7 @@
Tempest host. A public network is assumed to be reachable from
the Tempest host, and it should be possible to associate a public
('floating') IP address with a tenant ('fixed') IP address to
- faciliate external connectivity to a potentially unroutable
+ facilitate external connectivity to a potentially unroutable
tenant IP address.
This test suite can be configured to test network connectivity to
@@ -158,18 +162,15 @@
self.set_resource(name, router)
return router
- @attr(type='smoke')
- def test_001_create_keypairs(self):
+ def _create_keypairs(self):
self.keypairs[self.tenant_id] = self.create_keypair(
name=rand_name('keypair-smoke-'))
- @attr(type='smoke')
- def test_002_create_security_groups(self):
+ def _create_security_groups(self):
self.security_groups[self.tenant_id] = self._create_security_group(
self.compute_client)
- @attr(type='smoke')
- def test_003_create_networks(self):
+ def _create_networks(self):
network = self._create_network(self.tenant_id)
router = self._get_router(self.tenant_id)
subnet = self._create_subnet(network)
@@ -178,8 +179,7 @@
self.subnets.append(subnet)
self.routers.append(router)
- @attr(type='smoke')
- def test_004_check_networks(self):
+ def _check_networks(self):
# Checks that we see the newly created network/subnet/router via
# checking the result of list_[networks,routers,subnets]
seen_nets = self._list_networks()
@@ -202,10 +202,7 @@
self.assertIn(myrouter.name, seen_router_names)
self.assertIn(myrouter.id, seen_router_ids)
- @attr(type='smoke')
- def test_005_create_servers(self):
- if not (self.keypairs or self.security_groups or self.networks):
- raise self.skipTest('Necessary resources have not been defined')
+ def _create_servers(self):
for i, network in enumerate(self.networks):
tenant_id = network.tenant_id
name = rand_name('server-smoke-%d-' % i)
@@ -222,13 +219,11 @@
create_kwargs=create_kwargs)
self.servers.append(server)
- @attr(type='smoke')
- def test_006_check_tenant_network_connectivity(self):
+ def _check_tenant_network_connectivity(self):
if not self.config.network.tenant_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
- raise self.skipTest(msg)
- if not self.servers:
- raise self.skipTest("No VM's have been created")
+ LOG.info(msg)
+ return
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
ssh_login = self.config.compute.image_ssh_user
@@ -239,22 +234,14 @@
self._check_vm_connectivity(ip_address, ssh_login,
private_key)
- @attr(type='smoke')
- def test_007_assign_floating_ips(self):
+ def _assign_floating_ips(self):
public_network_id = self.config.network.public_network_id
- if not public_network_id:
- raise self.skipTest('Public network not configured')
- if not self.servers:
- raise self.skipTest("No VM's have been created")
for server in self.servers:
floating_ip = self._create_floating_ip(server, public_network_id)
self.floating_ips.setdefault(server, [])
self.floating_ips[server].append(floating_ip)
- @attr(type='smoke')
- def test_008_check_public_network_connectivity(self):
- if not self.floating_ips:
- raise self.skipTest('No floating ips have been allocated.')
+ def _check_public_network_connectivity(self):
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
ssh_login = self.config.compute.image_ssh_user
@@ -263,3 +250,15 @@
for floating_ip in floating_ips:
ip_address = floating_ip.floating_ip_address
self._check_vm_connectivity(ip_address, ssh_login, private_key)
+
+ @attr(type='smoke')
+ @services('compute', 'network')
+ def test_network_basic_ops(self):
+ self._create_keypairs()
+ self._create_security_groups()
+ self._create_networks()
+ self._check_networks()
+ self._create_servers()
+ self._check_tenant_network_connectivity()
+ self._assign_floating_ips()
+ self._check_public_network_connectivity()
diff --git a/tempest/scenario/test_network_quotas.py b/tempest/scenario/test_network_quotas.py
index 267aff6..3268066 100644
--- a/tempest/scenario/test_network_quotas.py
+++ b/tempest/scenario/test_network_quotas.py
@@ -16,7 +16,9 @@
# under the License.
from neutronclient.common import exceptions as exc
+
from tempest.scenario.manager import NetworkScenarioTest
+from tempest.test import services
MAX_REASONABLE_ITERATIONS = 51 # more than enough. Default for port is 50.
@@ -41,6 +43,7 @@
cls.subnets = []
cls.ports = []
+ @services('network')
def test_create_network_until_quota_hit(self):
hit_limit = False
for n in xrange(MAX_REASONABLE_ITERATIONS):
@@ -55,6 +58,7 @@
break
self.assertTrue(hit_limit, "Failed: Did not hit quota limit !")
+ @services('network')
def test_create_subnet_until_quota_hit(self):
if not self.networks:
self.networks.append(
@@ -73,6 +77,7 @@
break
self.assertTrue(hit_limit, "Failed: Did not hit quota limit !")
+ @services('network')
def test_create_ports_until_quota_hit(self):
if not self.networks:
self.networks.append(
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 8ee740e..cf72cd4 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -18,6 +18,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from tempest.scenario import manager
+from tempest.test import services
LOG = logging.getLogger(__name__)
@@ -45,6 +46,7 @@
msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
raise cls.skipException(msg)
+ @services('compute')
def test_resize_server_confirm(self):
# We create an instance for use in this test
i_name = rand_name('instance')
@@ -73,6 +75,7 @@
self.status_timeout(
self.compute_client.servers, instance_id, 'ACTIVE')
+ @services('compute')
def test_server_sequence_suspend_resume(self):
# We create an instance for use in this test
i_name = rand_name('instance')
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 8e14b06..04204eb 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -18,6 +18,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from tempest.scenario import manager
+from tempest.test import services
LOG = logging.getLogger(__name__)
@@ -100,6 +101,7 @@
instance.delete()
self.remove_resource('instance')
+ @services('compute', 'network')
def test_server_basicops(self):
self.add_keypair()
self.create_security_group()
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index 003c264..8c2cc76 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -16,6 +16,7 @@
# under the License.
from tempest.scenario import manager
+from tempest.test import services
class TestSnapshotPattern(manager.OfficialClientTest):
@@ -61,6 +62,7 @@
def _set_floating_ip_to_server(self, server, floating_ip):
server.add_floating_ip(floating_ip)
+ @services('compute', 'network', 'image')
def test_snapshot_pattern(self):
# prepare for booting a instance
self._add_keypair()
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 5af4bb2..c5a4aaf 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -144,6 +144,7 @@
self.assertEqual(self.timestamp, got_timestamp)
@testtools.skip("Skipped until the Bug #1205344 is resolved.")
+ @tempest.test.services('compute', 'network', 'volume', 'image')
def test_stamp_pattern(self):
# prepare for booting a instance
self._add_keypair()
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
new file mode 100644
index 0000000..3572166
--- /dev/null
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -0,0 +1,163 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common.utils.data_utils import rand_name
+from tempest.scenario import manager
+from tempest.test import services
+
+
+class TestVolumeBootPattern(manager.OfficialClientTest):
+
+ """
+ This test case attempts to reproduce the following steps:
+
+ * Create in Cinder some bootable volume importing a Glance image
+ * Boot an instance from the bootable volume
+ * Write content to the volume
+ * Delete an instance and Boot a new instance from the volume
+ * Check written content in the instance
+ * Create a volume snapshot while the instance is running
+ * Boot an additional instance from the new snapshot based volume
+ * Check written content in the instance booted from snapshot
+ """
+
+ def _create_volume_from_image(self):
+ img_uuid = self.config.compute.image_ref
+ vol_name = rand_name('volume-origin')
+ return self.create_volume(name=vol_name, imageRef=img_uuid)
+
+ def _boot_instance_from_volume(self, vol_id, keypair):
+ # NOTE(gfidente): the syntax for block_device_mapping is
+ # dev_name=id:type:size:delete_on_terminate
+ # where type needs to be "snap" if the server is booted
+ # from a snapshot, size instead can be safely left empty
+ bd_map = {
+ 'vda': vol_id + ':::0'
+ }
+ create_kwargs = {
+ 'block_device_mapping': bd_map,
+ 'key_name': keypair.name
+ }
+ return self.create_server(self.compute_client,
+ create_kwargs=create_kwargs)
+
+ def _create_snapshot_from_volume(self, vol_id):
+ volume_snapshots = self.volume_client.volume_snapshots
+ snap_name = rand_name('snapshot')
+ snap = volume_snapshots.create(volume_id=vol_id,
+ force=True,
+ display_name=snap_name)
+ self.set_resource(snap.id, snap)
+ self.status_timeout(volume_snapshots,
+ snap.id,
+ 'available')
+ return snap
+
+ def _create_volume_from_snapshot(self, snap_id):
+ vol_name = rand_name('volume')
+ return self.create_volume(name=vol_name, snapshot_id=snap_id)
+
+ def _stop_instances(self, instances):
+ # NOTE(gfidente): two loops so we do not wait for the status twice
+ for i in instances:
+ self.compute_client.servers.stop(i)
+ for i in instances:
+ self.status_timeout(self.compute_client.servers,
+ i.id,
+ 'SHUTOFF')
+
+ def _detach_volumes(self, volumes):
+ # NOTE(gfidente): two loops so we do not wait for the status twice
+ for v in volumes:
+ self.volume_client.volumes.detach(v)
+ for v in volumes:
+ self.status_timeout(self.volume_client.volumes,
+ v.id,
+ 'available')
+
+ def _ssh_to_server(self, server, keypair):
+ if self.config.compute.use_floatingip_for_ssh:
+ floating_ip = self.compute_client.floating_ips.create()
+ fip_name = rand_name('scenario-fip')
+ self.set_resource(fip_name, floating_ip)
+ server.add_floating_ip(floating_ip)
+ ip = floating_ip.ip
+ else:
+ network_name_for_ssh = self.config.compute.network_for_ssh
+ ip = server.networks[network_name_for_ssh][0]
+
+ client = self.get_remote_client(ip,
+ private_key=keypair.private_key)
+ return client.ssh_client
+
+ def _get_content(self, ssh_client):
+ return ssh_client.exec_command('cat /tmp/text')
+
+ def _write_text(self, ssh_client):
+ text = rand_name('text-')
+ ssh_client.exec_command('echo "%s" > /tmp/text; sync' % (text))
+
+ return self._get_content(ssh_client)
+
+ def _delete_server(self, server):
+ self.compute_client.servers.delete(server)
+ self.delete_timeout(self.compute_client.servers, server.id)
+
+ def _check_content_of_written_file(self, ssh_client, expected):
+ actual = self._get_content(ssh_client)
+ self.assertEqual(expected, actual)
+
+ @services('compute', 'volume', 'image')
+ def test_volume_boot_pattern(self):
+ keypair = self.create_keypair()
+ self.create_loginable_secgroup_rule()
+
+ # create an instance from volume
+ volume_origin = self._create_volume_from_image()
+ instance_1st = self._boot_instance_from_volume(volume_origin.id,
+ keypair)
+
+ # write content to volume on instance
+ ssh_client_for_instance_1st = self._ssh_to_server(instance_1st,
+ keypair)
+ text = self._write_text(ssh_client_for_instance_1st)
+
+ # delete instance
+ self._delete_server(instance_1st)
+
+ # create a 2nd instance from volume
+ instance_2nd = self._boot_instance_from_volume(volume_origin.id,
+ keypair)
+
+ # check the content of written file
+ ssh_client_for_instance_2nd = self._ssh_to_server(instance_2nd,
+ keypair)
+ self._check_content_of_written_file(ssh_client_for_instance_2nd, text)
+
+ # snapshot a volume
+ snapshot = self._create_snapshot_from_volume(volume_origin.id)
+
+ # create a 3rd instance from snapshot
+ volume = self._create_volume_from_snapshot(snapshot.id)
+ instance_from_snapshot = self._boot_instance_from_volume(volume.id,
+ keypair)
+
+ # check the content of written file
+ ssh_client = self._ssh_to_server(instance_from_snapshot, keypair)
+ self._check_content_of_written_file(ssh_client, text)
+
+ # NOTE(gfidente): ensure resources are in clean state for
+ # deletion operations to succeed
+ self._stop_instances([instance_2nd, instance_from_snapshot])
+ self._detach_volumes([volume_origin, volume])
diff --git a/tempest/scenario/test_volume_snapshot_pattern.py b/tempest/scenario/test_volume_snapshot_pattern.py
deleted file mode 100644
index d873d30..0000000
--- a/tempest/scenario/test_volume_snapshot_pattern.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.common.utils.data_utils import rand_name
-from tempest.scenario import manager
-
-
-class TestVolumeSnapshotPattern(manager.OfficialClientTest):
-
- """
- This test case attempts to reproduce the following steps:
-
- * Create in Cinder some bootable volume importing a Glance image
- * Boot an instance from the bootable volume
- * Create a volume snapshot while the instance is running
- * Boot an additional instance from the new snapshot based volume
- """
-
- def _create_volume_from_image(self):
- img_uuid = self.config.compute.image_ref
- vol_name = rand_name('volume-origin')
- return self.create_volume(name=vol_name, imageRef=img_uuid)
-
- def _boot_instance_from_volume(self, vol_id):
- # NOTE(gfidente): the syntax for block_device_mapping is
- # dev_name=id:type:size:delete_on_terminate
- # where type needs to be "snap" if the server is booted
- # from a snapshot, size instead can be safely left empty
- bd_map = {
- 'vda': vol_id + ':::0'
- }
- create_kwargs = {
- 'block_device_mapping': bd_map
- }
- return self.create_server(self.compute_client,
- create_kwargs=create_kwargs)
-
- def _create_snapshot_from_volume(self, vol_id):
- volume_snapshots = self.volume_client.volume_snapshots
- snap_name = rand_name('snapshot')
- snap = volume_snapshots.create(volume_id=vol_id,
- force=True,
- display_name=snap_name)
- self.set_resource(snap.id, snap)
- self.status_timeout(volume_snapshots,
- snap.id,
- 'available')
- return snap
-
- def _create_volume_from_snapshot(self, snap_id):
- vol_name = rand_name('volume')
- return self.create_volume(name=vol_name, snapshot_id=snap_id)
-
- def _stop_instances(self, instances):
- # NOTE(gfidente): two loops so we do not wait for the status twice
- for i in instances:
- self.compute_client.servers.stop(i)
- for i in instances:
- self.status_timeout(self.compute_client.servers,
- i.id,
- 'SHUTOFF')
-
- def _detach_volumes(self, volumes):
- # NOTE(gfidente): two loops so we do not wait for the status twice
- for v in volumes:
- self.volume_client.volumes.detach(v)
- for v in volumes:
- self.status_timeout(self.volume_client.volumes,
- v.id,
- 'available')
-
- def test_volume_snapshot_pattern(self):
- volume_origin = self._create_volume_from_image()
- i_origin = self._boot_instance_from_volume(volume_origin.id)
- snapshot = self._create_snapshot_from_volume(volume_origin.id)
- volume = self._create_volume_from_snapshot(snapshot.id)
- i = self._boot_instance_from_volume(volume.id)
- # NOTE(gfidente): ensure resources are in clean state for
- # deletion operations to succeed
- self._stop_instances([i_origin, i])
- self._detach_volumes([volume_origin, volume])
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 6906610..1f2daec 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -21,6 +21,7 @@
import urllib
from tempest.common.rest_client import RestClient
+from tempest.common import waiters
from tempest import exceptions
@@ -152,26 +153,7 @@
def wait_for_server_status(self, server_id, status):
"""Waits for a server to reach a given status."""
- resp, body = self.get_server(server_id)
- server_status = body['status']
- start = int(time.time())
-
- while(server_status != status):
- time.sleep(self.build_interval)
- resp, body = self.get_server(server_id)
- server_status = body['status']
-
- if server_status == 'ERROR':
- raise exceptions.BuildErrorException(server_id=server_id)
-
- timed_out = int(time.time()) - start >= self.build_timeout
-
- if server_status != status and timed_out:
- message = ('Server %s failed to reach %s status within the '
- 'required time (%s s).' %
- (server_id, status, self.build_timeout))
- message += ' Current status: %s.' % server_status
- raise exceptions.TimeoutException(message)
+ return waiters.wait_for_server_status(self, server_id, status)
def wait_for_server_termination(self, server_id, ignore_error=False):
"""Waits for server to reach termination."""
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 5c7a629..bf72bdc 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -22,6 +22,7 @@
from lxml import etree
from tempest.common.rest_client import RestClientXML
+from tempest.common import waiters
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.services.compute.xml.common import Document
@@ -46,9 +47,14 @@
# expanded xml namespace.
type_ns_prefix = ('{http://docs.openstack.org/compute/ext/extended_ips/'
'api/v1.1}type')
+ mac_ns_prefix = ('{http://docs.openstack.org/compute/ext/extended_ips_mac'
+ '/api/v1.1}mac_addr')
+
if type_ns_prefix in ip:
- ip['OS-EXT-IPS:type'] = ip[type_ns_prefix]
- ip.pop(type_ns_prefix)
+ ip['OS-EXT-IPS:type'] = ip.pop(type_ns_prefix)
+
+ if mac_ns_prefix in ip:
+ ip['OS-EXT-IPS-MAC:mac_addr'] = ip.pop(mac_ns_prefix)
return ip
@@ -101,11 +107,35 @@
json['addresses'] = json_addresses
else:
json = xml_to_json(xml_dom)
- diskConfig = '{http://docs.openstack.org/compute/ext/disk_config/api/v1.1'\
- '}diskConfig'
+ diskConfig = ('{http://docs.openstack.org'
+ '/compute/ext/disk_config/api/v1.1}diskConfig')
+ terminated_at = ('{http://docs.openstack.org/'
+ 'compute/ext/server_usage/api/v1.1}terminated_at')
+ launched_at = ('{http://docs.openstack.org'
+ '/compute/ext/server_usage/api/v1.1}launched_at')
+ power_state = ('{http://docs.openstack.org'
+ '/compute/ext/extended_status/api/v1.1}power_state')
+ availability_zone = ('{http://docs.openstack.org'
+ '/compute/ext/extended_availability_zone/api/v2}'
+ 'availability_zone')
+ vm_state = ('{http://docs.openstack.org'
+ '/compute/ext/extended_status/api/v1.1}vm_state')
+ task_state = ('{http://docs.openstack.org'
+ '/compute/ext/extended_status/api/v1.1}task_state')
if diskConfig in json:
- json['OS-DCF:diskConfig'] = json[diskConfig]
- del json[diskConfig]
+ json['OS-DCF:diskConfig'] = json.pop(diskConfig)
+ if terminated_at in json:
+ json['OS-SRV-USG:terminated_at'] = json.pop(terminated_at)
+ if launched_at in json:
+ json['OS-SRV-USG:launched_at'] = json.pop(launched_at)
+ if power_state in json:
+ json['OS-EXT-STS:power_state'] = json.pop(power_state)
+ if availability_zone in json:
+ json['OS-EXT-AZ:availability_zone'] = json.pop(availability_zone)
+ if vm_state in json:
+ json['OS-EXT-STS:vm_state'] = json.pop(vm_state)
+ if task_state in json:
+ json['OS-EXT-STS:task_state'] = json.pop(task_state)
return json
@@ -307,26 +337,7 @@
def wait_for_server_status(self, server_id, status):
"""Waits for a server to reach a given status."""
- resp, body = self.get_server(server_id)
- server_status = body['status']
- start = int(time.time())
-
- while(server_status != status):
- time.sleep(self.build_interval)
- resp, body = self.get_server(server_id)
- server_status = body['status']
-
- if server_status == 'ERROR':
- raise exceptions.BuildErrorException(server_id=server_id)
-
- timed_out = int(time.time()) - start >= self.build_timeout
-
- if server_status != status and timed_out:
- message = ('Server %s failed to reach %s status within the '
- 'required time (%s s).' %
- (server_id, status, self.build_timeout))
- message += ' Current status: %s.' % server_status
- raise exceptions.TimeoutException(message)
+ return waiters.wait_for_server_status(self, server_id, status)
def wait_for_server_termination(self, server_id, ignore_error=False):
"""Waits for server to reach termination."""
diff --git a/tempest/services/identity/v3/json/credentials_client.py b/tempest/services/identity/v3/json/credentials_client.py
new file mode 100644
index 0000000..c3f788a
--- /dev/null
+++ b/tempest/services/identity/v3/json/credentials_client.py
@@ -0,0 +1,97 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+from urlparse import urlparse
+
+from tempest.common.rest_client import RestClient
+
+
+class CredentialsClientJSON(RestClient):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(CredentialsClientJSON, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.identity.catalog_type
+ self.endpoint_url = 'adminURL'
+
+ def request(self, method, url, headers=None, body=None, wait=None):
+ """Overriding the existing HTTP request in super class rest_client."""
+ self._set_auth()
+ self.base_url = self.base_url.replace(urlparse(self.base_url).path,
+ "/v3")
+ return super(CredentialsClientJSON, self).request(method, url,
+ headers=headers,
+ body=body)
+
+ def create_credential(self, access_key, secret_key, user_id, project_id):
+ """Creates a credential."""
+ blob = "{\"access\": \"%s\", \"secret\": \"%s\"}" % (
+ access_key, secret_key)
+ post_body = {
+ "blob": blob,
+ "project_id": project_id,
+ "type": "ec2",
+ "user_id": user_id
+ }
+ post_body = json.dumps({'credential': post_body})
+ resp, body = self.post('credentials', post_body,
+ self.headers)
+ body = json.loads(body)
+ body['credential']['blob'] = json.loads(body['credential']['blob'])
+ return resp, body['credential']
+
+ def update_credential(self, credential_id, **kwargs):
+ """Updates a credential."""
+ resp, body = self.get_credential(credential_id)
+ cred_type = kwargs.get('type', body['type'])
+ access_key = kwargs.get('access_key', body['blob']['access'])
+ secret_key = kwargs.get('secret_key', body['blob']['secret'])
+ project_id = kwargs.get('project_id', body['project_id'])
+ user_id = kwargs.get('user_id', body['user_id'])
+ blob = "{\"access\": \"%s\", \"secret\": \"%s\"}" % (
+ access_key, secret_key)
+ post_body = {
+ "blob": blob,
+ "project_id": project_id,
+ "type": cred_type,
+ "user_id": user_id
+ }
+ post_body = json.dumps({'credential': post_body})
+ resp, body = self.patch('credentials/%s' % credential_id, post_body,
+ self.headers)
+ body = json.loads(body)
+ body['credential']['blob'] = json.loads(body['credential']['blob'])
+ return resp, body['credential']
+
+ def get_credential(self, credential_id):
+ """To GET Details of a credential."""
+ resp, body = self.get('credentials/%s' % credential_id)
+ body = json.loads(body)
+ body['credential']['blob'] = json.loads(body['credential']['blob'])
+ return resp, body['credential']
+
+ def list_credentials(self):
+ """Lists out all the available credentials."""
+ resp, body = self.get('credentials')
+ body = json.loads(body)
+ return resp, body['credentials']
+
+ def delete_credential(self, credential_id):
+ """Deletes a credential."""
+ resp, body = self.delete('credentials/%s' % credential_id)
+ return resp, body
diff --git a/tempest/services/identity/v3/xml/credentials_client.py b/tempest/services/identity/v3/xml/credentials_client.py
new file mode 100644
index 0000000..dc0ade1
--- /dev/null
+++ b/tempest/services/identity/v3/xml/credentials_client.py
@@ -0,0 +1,121 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+from urlparse import urlparse
+
+from lxml import etree
+
+from tempest.common.rest_client import RestClientXML
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import Text
+from tempest.services.compute.xml.common import xml_to_json
+
+
+XMLNS = "http://docs.openstack.org/identity/api/v3"
+
+
+class CredentialsClientXML(RestClientXML):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(CredentialsClientXML, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.identity.catalog_type
+ self.endpoint_url = 'adminURL'
+
+ def request(self, method, url, headers=None, body=None, wait=None):
+ """Overriding the existing HTTP request in super class rest_client."""
+ self._set_auth()
+ self.base_url = self.base_url.replace(urlparse(self.base_url).path,
+ "/v3")
+ return super(CredentialsClientXML, self).request(method, url,
+ headers=headers,
+ body=body)
+
+ def _parse_body(self, body):
+ data = xml_to_json(body)
+ return data
+
+ def _parse_creds(self, node):
+ array = []
+ for child in node.getchildren():
+ tag_list = child.tag.split('}', 1)
+ if tag_list[1] == "credential":
+ array.append(xml_to_json(child))
+ return array
+
+ def create_credential(self, access_key, secret_key, user_id, project_id):
+ """Creates a credential."""
+ cred_type = 'ec2'
+ access = ""access": "%s"" % access_key
+ secret = ""secret": "%s"" % secret_key
+ blob = Element('blob',
+ xmlns=XMLNS)
+ blob.append(Text("{%s , %s}"
+ % (access, secret)))
+ credential = Element('credential', project_id=project_id,
+ type=cred_type, user_id=user_id)
+ credential.append(blob)
+ resp, body = self.post('credentials', str(Document(credential)),
+ self.headers)
+ body = self._parse_body(etree.fromstring(body))
+ body['blob'] = json.loads(body['blob'])
+ return resp, body
+
+ def update_credential(self, credential_id, **kwargs):
+ """Updates a credential."""
+ resp, body = self.get_credential(credential_id)
+ cred_type = kwargs.get('type', body['type'])
+ access_key = kwargs.get('access_key', body['blob']['access'])
+ secret_key = kwargs.get('secret_key', body['blob']['secret'])
+ project_id = kwargs.get('project_id', body['project_id'])
+ user_id = kwargs.get('user_id', body['user_id'])
+ access = ""access": "%s"" % access_key
+ secret = ""secret": "%s"" % secret_key
+ blob = Element('blob',
+ xmlns=XMLNS)
+ blob.append(Text("{%s , %s}"
+ % (access, secret)))
+ credential = Element('credential', project_id=project_id,
+ type=cred_type, user_id=user_id)
+ credential.append(blob)
+ resp, body = self.patch('credentials/%s' % credential_id,
+ str(Document(credential)),
+ self.headers)
+ body = self._parse_body(etree.fromstring(body))
+ body['blob'] = json.loads(body['blob'])
+ return resp, body
+
+ def get_credential(self, credential_id):
+ """To GET Details of a credential."""
+ resp, body = self.get('credentials/%s' % credential_id, self.headers)
+ body = self._parse_body(etree.fromstring(body))
+ body['blob'] = json.loads(body['blob'])
+ return resp, body
+
+ def list_credentials(self):
+ """Lists out all the available credentials."""
+ resp, body = self.get('credentials', self.headers)
+ body = self._parse_creds(etree.fromstring(body))
+ return resp, body
+
+ def delete_credential(self, credential_id):
+ """Deletes a credential."""
+ resp, body = self.delete('credentials/%s' % credential_id,
+ self.headers)
+ return resp, body
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index ef12a00..bc0a6cf 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -242,7 +242,7 @@
update_body['admin_state_up'] = kwargs.get(
'admin_state_up', body['router']['admin_state_up'])
# Must uncomment/modify these lines once LP question#233187 is solved
- #update_body['external_gateway_info'] = kwargs.get(
+ # update_body['external_gateway_info'] = kwargs.get(
# 'external_gateway_info', body['router']['external_gateway_info'])
update_body = dict(router=update_body)
update_body = json.dumps(update_body)
@@ -296,18 +296,55 @@
body = json.loads(body)
return resp, body
+ def list_security_groups(self):
+ uri = '%s/security-groups' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def delete_security_group(self, secgroup_id):
+ uri = '%s/security-groups/%s' % (self.uri_prefix, secgroup_id)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def create_security_group(self, name, **kwargs):
+ post_body = {
+ 'security_group': {
+ 'name': name,
+ }
+ }
+ for key, value in kwargs.iteritems():
+ post_body['security_group'][str(key)] = value
+ body = json.dumps(post_body)
+ uri = '%s/security-groups' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
def show_floating_ip(self, floating_ip_id):
uri = '%s/floatingips/%s' % (self.uri_prefix, floating_ip_id)
resp, body = self.get(uri, self.headers)
body = json.loads(body)
return resp, body
+ def show_security_group(self, secgroup_id):
+ uri = '%s/security-groups/%s' % (self.uri_prefix, secgroup_id)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
def list_floating_ips(self):
uri = '%s/floatingips' % (self.uri_prefix)
resp, body = self.get(uri, self.headers)
body = json.loads(body)
return resp, body
+ def list_security_group_rules(self):
+ uri = '%s/security-group-rules' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
def delete_floating_ip(self, floating_ip_id):
uri = '%s/floatingips/%s' % (self.uri_prefix, floating_ip_id)
resp, body = self.delete(uri, self.headers)
@@ -321,3 +358,123 @@
resp, body = self.put(uri, headers=self.headers, body=body)
body = json.loads(body)
return resp, body
+
+ def create_security_group_rule(self, secgroup_id,
+ direction='ingress', **kwargs):
+ post_body = {
+ 'security_group_rule': {
+ 'direction': direction,
+ 'security_group_id': secgroup_id
+ }
+ }
+ for key, value in kwargs.iteritems():
+ post_body['security_group_rule'][str(key)] = value
+ body = json.dumps(post_body)
+ uri = '%s/security-group-rules' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def create_bulk_subnet(self, subnet_list):
+ post_body = {'subnets': subnet_list}
+ body = json.dumps(post_body)
+ uri = '%s/subnets' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def delete_security_group_rule(self, rule_id):
+ uri = '%s/security-group-rules/%s' % (self.uri_prefix, rule_id)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def show_security_group_rule(self, rule_id):
+ uri = '%s/security-group-rules/%s' % (self.uri_prefix, rule_id)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def create_bulk_port(self, port_list):
+ post_body = {'ports': port_list}
+ body = json.dumps(post_body)
+ uri = '%s/ports' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def list_vips(self):
+ uri = '%s/lb/vips' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def create_vip(self, name, protocol, protocol_port, subnet_id, pool_id):
+ post_body = {
+ "vip": {
+ "protocol": protocol,
+ "name": name,
+ "subnet_id": subnet_id,
+ "pool_id": pool_id,
+ "protocol_port": protocol_port
+ }
+ }
+ body = json.dumps(post_body)
+ uri = '%s/lb/vips' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def create_pool(self, name, lb_method, protocol, subnet_id):
+ post_body = {
+ "pool": {
+ "protocol": protocol,
+ "name": name,
+ "subnet_id": subnet_id,
+ "lb_method": lb_method
+ }
+ }
+ body = json.dumps(post_body)
+ uri = '%s/lb/pools' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def show_vip(self, uuid):
+ uri = '%s/lb/vips/%s' % (self.uri_prefix, uuid)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def delete_vip(self, uuid):
+ uri = '%s/lb/vips/%s' % (self.uri_prefix, uuid)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def delete_pool(self, uuid):
+ uri = '%s/lb/pools/%s' % (self.uri_prefix, uuid)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def update_vip(self, vip_id, new_name):
+ put_body = {
+ "vip": {
+ "name": new_name,
+ }
+ }
+ body = json.dumps(put_body)
+ uri = '%s/lb/vips/%s' % (self.uri_prefix, vip_id)
+ resp, body = self.put(uri, body=body, headers=self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def update_pool(self, pool_id, new_name):
+ put_body = {
+ "pool": {
+ "name": new_name,
+ }
+ }
+ body = json.dumps(put_body)
+ uri = '%s/lb/pools/%s' % (self.uri_prefix, pool_id)
+ resp, body = self.put(uri, body=body, headers=self.headers)
+ body = json.loads(body)
+ return resp, body
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
index d4fb656..6881479 100755
--- a/tempest/services/network/xml/network_client.py
+++ b/tempest/services/network/xml/network_client.py
@@ -161,6 +161,176 @@
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
+ def create_security_group(self, name):
+ uri = '%s/security-groups' % (self.uri_prefix)
+ post_body = Element("security_group")
+ p2 = Element("name", name)
+ post_body.append(p2)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def list_security_groups(self):
+ url = '%s/security-groups' % (self.uri_prefix)
+ resp, body = self.get(url, self.headers)
+ secgroups = self._parse_array(etree.fromstring(body))
+ secgroups = {"security_groups": secgroups}
+ return resp, secgroups
+
+ def delete_security_group(self, secgroup_id):
+ uri = '%s/security-groups/%s' % (self.uri_prefix, str(secgroup_id))
+ return self.delete(uri, self.headers)
+
+ def show_security_group(self, secgroup_id):
+ uri = '%s/security-groups/%s' % (self.uri_prefix, str(secgroup_id))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def list_security_group_rules(self):
+ url = '%s/security-group-rules' % (self.uri_prefix)
+ resp, body = self.get(url, self.headers)
+ rules = self._parse_array(etree.fromstring(body))
+ rules = {"security_group_rules": rules}
+ return resp, rules
+
+ def create_security_group_rule(self, secgroup_id,
+ direction='ingress', **kwargs):
+ uri = '%s/security-group-rules' % (self.uri_prefix)
+ rule = Element("security_group_rule")
+ p1 = Element('security_group_id', secgroup_id)
+ p2 = Element('direction', direction)
+ rule.append(p1)
+ rule.append(p2)
+ for key, val in kwargs.items():
+ key = Element(key, val)
+ rule.append(key)
+ resp, body = self.post(uri, str(Document(rule)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def delete_security_group_rule(self, rule_id):
+ uri = '%s/security-group-rules/%s' % (self.uri_prefix, str(rule_id))
+ return self.delete(uri, self.headers)
+
+ def show_security_group_rule(self, rule_id):
+ uri = '%s/security-group-rules/%s' % (self.uri_prefix, str(rule_id))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def create_bulk_subnet(self, subnet_list):
+ uri = '%s/subnets' % (self.uri_prefix)
+ post_body = Element("subnets")
+ for i in range(len(subnet_list)):
+ v = subnet_list[i]
+ p1 = Element("subnet")
+ for k, kv in v.iteritems():
+ p2 = Element(k, kv)
+ p1.append(p2)
+ post_body.append(p1)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ subnets = self._parse_array(etree.fromstring(body))
+ subnets = {"subnets": subnets}
+ return resp, subnets
+
+ def create_bulk_port(self, port_list):
+ uri = '%s/ports' % (self.uri_prefix)
+ post_body = Element("ports")
+ for i in range(len(port_list)):
+ v = port_list[i]
+ p1 = Element("port")
+ for k, kv in v.iteritems():
+ p2 = Element(k, kv)
+ p1.append(p2)
+ post_body.append(p1)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ ports = self._parse_array(etree.fromstring(body))
+ ports = {"ports": ports}
+ return resp, ports
+
+ def list_vips(self):
+ url = '%s/lb/vips' % (self.uri_prefix)
+ resp, body = self.get(url, self.headers)
+ vips = self._parse_array(etree.fromstring(body))
+ vips = {"vips": vips}
+ return resp, vips
+
+ def create_vip(self, name, protocol, protocol_port, subnet_id, pool_id):
+ uri = '%s/lb/vips' % (self.uri_prefix)
+ post_body = Element("vip")
+ p1 = Element("name", name)
+ p2 = Element("protocol", protocol)
+ p3 = Element("protocol_port", protocol_port)
+ p4 = Element("subnet_id", subnet_id)
+ p5 = Element("pool_id", pool_id)
+ post_body.append(p1)
+ post_body.append(p2)
+ post_body.append(p3)
+ post_body.append(p4)
+ post_body.append(p5)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def delete_vip(self, vip_id):
+ uri = '%s/lb/vips/%s' % (self.uri_prefix, str(vip_id))
+ return self.delete(uri, self.headers)
+
+ def show_vip(self, vip_id):
+ uri = '%s/lb/vips/%s' % (self.uri_prefix, str(vip_id))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_vip(self, vip_id, new_name):
+ uri = '%s/lb/vips/%s' % (self.uri_prefix, str(vip_id))
+ put_body = Element("vip")
+ p2 = Element("name", new_name)
+ put_body.append(p2)
+ resp, body = self.put(uri, str(Document(put_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def list_pools(self):
+ url = '%s/lb/pools' % (self.uri_prefix)
+ resp, body = self.get(url, self.headers)
+ pools = self._parse_array(etree.fromstring(body))
+ pools = {"pools": pools}
+ return resp, pools
+
+ def create_pool(self, name, lb_method, protocol, subnet_id):
+ uri = '%s/lb/pools' % (self.uri_prefix)
+ post_body = Element("pool")
+ p1 = Element("lb_method", lb_method)
+ p2 = Element("protocol", protocol)
+ p3 = Element("subnet_id", subnet_id)
+ post_body.append(p1)
+ post_body.append(p2)
+ post_body.append(p3)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def delete_pool(self, pool_id):
+ uri = '%s/lb/pools/%s' % (self.uri_prefix, str(pool_id))
+ return self.delete(uri, self.headers)
+
+ def show_pool(self, pool_id):
+ uri = '%s/lb/pools/%s' % (self.uri_prefix, str(pool_id))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_pool(self, pool_id, new_name):
+ uri = '%s/lb/pools/%s' % (self.uri_prefix, str(pool_id))
+ put_body = Element("pool")
+ p2 = Element("name", new_name)
+ put_body.append(p2)
+ resp, body = self.put(uri, str(Document(put_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
body = ET.fromstring(xml_returned_body)
diff --git a/tempest/services/object_storage/container_client.py b/tempest/services/object_storage/container_client.py
index dd5f3ec..75f7a33 100644
--- a/tempest/services/object_storage/container_client.py
+++ b/tempest/services/object_storage/container_client.py
@@ -35,7 +35,7 @@
metadata_prefix='X-Container-Meta-'):
"""
Creates a container, with optional metadata passed in as a
- dictonary
+ dictionary
"""
url = str(container_name)
headers = {}
@@ -92,9 +92,9 @@
"""
Returns complete list of all objects in the container, even if
item count is beyond 10,000 item listing limit.
- Does not require any paramaters aside from container name.
+ Does not require any parameters aside from container name.
"""
- # TODO(dwalleck): Rewite using json format to avoid newlines at end of
+ # TODO(dwalleck): Rewrite using json format to avoid newlines at end of
# obj names. Set limit to API limit - 1 (max returned items = 9999)
limit = 9999
if params is not None:
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index 1c97869..c605a45 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -126,7 +126,7 @@
return resp, body
def get_object_using_temp_url(self, container, object_name, expires, key):
- """Retrieve object's data using temp URL."""
+ """Retrieve object's data using temporary URL."""
self._set_auth()
method = 'GET'
diff --git a/tempest/services/volume/json/volumes_client.py b/tempest/services/volume/json/volumes_client.py
index 2ae73b1..c35452e 100644
--- a/tempest/services/volume/json/volumes_client.py
+++ b/tempest/services/volume/json/volumes_client.py
@@ -89,10 +89,11 @@
"""Deletes the Specified Volume."""
return self.delete("volumes/%s" % str(volume_id))
- def upload_volume(self, volume_id, image_name):
+ def upload_volume(self, volume_id, image_name, disk_format):
"""Uploads a volume in Glance."""
post_body = {
'image_name': image_name,
+ 'disk_format': disk_format
}
post_body = json.dumps({'os-volume_upload_image': post_body})
url = 'volumes/%s/action' % (volume_id)
diff --git a/tempest/services/volume/xml/snapshots_client.py b/tempest/services/volume/xml/snapshots_client.py
index 51c46da..3596017 100644
--- a/tempest/services/volume/xml/snapshots_client.py
+++ b/tempest/services/volume/xml/snapshots_client.py
@@ -81,7 +81,7 @@
display_name: Optional snapshot Name.
display_description: User friendly snapshot description.
"""
- # NOTE(afazekas): it should use the volume namaspace
+ # NOTE(afazekas): it should use the volume namespace
snapshot = Element("snapshot", xmlns=XMLNS_11, volume_id=volume_id)
for key, value in kwargs.items():
snapshot.add_attr(key, value)
diff --git a/tempest/services/volume/xml/volumes_client.py b/tempest/services/volume/xml/volumes_client.py
index 936e036..9fa7a1e 100644
--- a/tempest/services/volume/xml/volumes_client.py
+++ b/tempest/services/volume/xml/volumes_client.py
@@ -60,6 +60,21 @@
"""Return the element 'attachment' from input volumes."""
return volume['attachments']['attachment']
+ def _check_if_bootable(self, volume):
+ """
+ Check if the volume is bootable, also change the value
+ of 'bootable' from string to boolean.
+ """
+ if volume['bootable'] == 'True':
+ volume['bootable'] = True
+ elif volume['bootable'] == 'False':
+ volume['bootable'] = False
+ else:
+ raise ValueError(
+ 'bootable flag is supposed to be either True or False,'
+ 'it is %s' % volume['bootable'])
+ return volume
+
def list_volumes(self, params=None):
"""List all the volumes created."""
url = 'volumes'
@@ -72,6 +87,8 @@
volumes = []
if body is not None:
volumes += [self._parse_volume(vol) for vol in list(body)]
+ for v in volumes:
+ v = self._check_if_bootable(v)
return resp, volumes
def list_volumes_with_detail(self, params=None):
@@ -86,14 +103,17 @@
volumes = []
if body is not None:
volumes += [self._parse_volume(vol) for vol in list(body)]
+ for v in volumes:
+ v = self._check_if_bootable(v)
return resp, volumes
def get_volume(self, volume_id):
"""Returns the details of a single volume."""
url = "volumes/%s" % str(volume_id)
resp, body = self.get(url, self.headers)
- body = etree.fromstring(body)
- return resp, self._parse_volume(body)
+ body = self._parse_volume(etree.fromstring(body))
+ body = self._check_if_bootable(body)
+ return resp, body
def create_volume(self, size, **kwargs):
"""Creates a new Volume.
@@ -183,10 +203,11 @@
body = xml_to_json(etree.fromstring(body))
return resp, body
- def upload_volume(self, volume_id, image_name):
+ def upload_volume(self, volume_id, image_name, disk_format):
"""Uploads a volume in Glance."""
post_body = Element("os-volume_upload_image",
- image_name=image_name)
+ image_name=image_name,
+ disk_format=disk_format)
url = 'volumes/%s/action' % str(volume_id)
resp, body = self.post(url, str(Document(post_body)), self.headers)
volume = xml_to_json(etree.fromstring(body))
diff --git a/tempest/stress/stressaction.py b/tempest/stress/stressaction.py
index 28251af..45a628d 100644
--- a/tempest/stress/stressaction.py
+++ b/tempest/stress/stressaction.py
@@ -42,7 +42,7 @@
def setUp(self, **kwargs):
"""This method is called before the run method
- to help the test initiatlize any structures.
+ to help the test initialize any structures.
kwargs contains arguments passed in from the
configuration json file.
@@ -59,7 +59,7 @@
def execute(self, shared_statistic):
"""This is the main execution entry point called
by the driver. We register a signal handler to
- allow us to gracefull tearDown, and then exit.
+ allow us to tearDown gracefully, and then exit.
We also keep track of how many runs we do.
"""
signal.signal(signal.SIGHUP, self._shutdown_handler)
diff --git a/tempest/test.py b/tempest/test.py
index decae94..24c4489 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -26,6 +26,7 @@
from tempest import clients
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -57,6 +58,25 @@
return decorator
+def services(*args, **kwargs):
+ """A decorator used to set an attr for each service used in a test case
+
+ This decorator applies a testtools attr for each service that gets
+ exercised by a test case.
+ """
+ valid_service_list = ['compute', 'image', 'volume', 'orchestration',
+ 'network', 'identity', 'object', 'dashboard']
+
+ def decorator(f):
+ for service in args:
+ if service not in valid_service_list:
+ raise exceptions.InvalidServiceTag('%s is not a valid service'
+ % service)
+ attr(type=list(args))(f)
+ return f
+ return decorator
+
+
def stresstest(*args, **kwargs):
"""Add stress test decorator
diff --git a/tempest/whitebox/README.rst b/tempest/whitebox/README.rst
deleted file mode 100644
index 0e45421..0000000
--- a/tempest/whitebox/README.rst
+++ /dev/null
@@ -1,46 +0,0 @@
-Tempest Guide to Whitebox tests
-===============================
-
-
-What are these tests?
----------------------
-
-When you hit the OpenStack API, this causes internal state changes in
-the system. This might be database transitions, vm modifications,
-other deep state changes which aren't really accessible from the
-OpenStack API. These side effects are sometimes important to
-validate.
-
-White box testing is an approach there. In white box testing you are
-given database access to the environment, and can verify internal
-record changes after an API call.
-
-This is an optional part of testing, and requires extra setup, but can
-be useful for validating Tempest internals.
-
-
-Why are these tests in tempest?
--------------------------------
-
-Especially when it comes to something like VM state changing, which is
-a coordination of numerous running daemons, and a functioning VM, it's
-very difficult to get a realistic test like this in unit tests.
-
-
-Scope of these tests
---------------------
-
-White box tests should be limitted to tests where black box testing
-(using the OpenStack API to verify results) isn't sufficient.
-
-As these poke at internals of OpenStack, it should also be realized
-that these tests are very tightly coupled to current implementation of
-OpenStack. They will need to be maintained agressively to keep up with
-internals changes in OpenStack projects.
-
-
-Example of a good test
-----------------------
-
-Pushing VMs through a series of state transitions, and ensuring along
-the way the database state transitions match what's expected.
diff --git a/tempest/whitebox/__init__.py b/tempest/whitebox/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/whitebox/__init__.py
+++ /dev/null
diff --git a/tempest/whitebox/manager.py b/tempest/whitebox/manager.py
deleted file mode 100644
index 3b1b107..0000000
--- a/tempest/whitebox/manager.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack, LLC
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-import shlex
-import subprocess
-import sys
-
-from sqlalchemy import create_engine, MetaData
-
-from tempest.common.ssh import Client
-from tempest.common.utils.data_utils import rand_name
-from tempest import exceptions
-from tempest.openstack.common import log as logging
-from tempest.scenario import manager
-
-LOG = logging.getLogger(__name__)
-
-
-class WhiteboxTest(object):
-
- """
- Base test case class mixin for "whitebox tests"
-
- Whitebox tests are tests that have the following characteristics:
-
- * Test common and advanced operations against a set of servers
- * Use a client that it is possible to send random or bad data with
- * SSH into either a host or a guest in order to validate server state
- * May execute SQL queries directly against internal databases to verify
- the state of data records
- """
- pass
-
-
-class ComputeWhiteboxTest(manager.OfficialClientTest):
-
- """
- Base smoke test case class for OpenStack Compute API (Nova)
- """
-
- @classmethod
- def setUpClass(cls):
- super(ComputeWhiteboxTest, cls).setUpClass()
- if not cls.config.whitebox.whitebox_enabled:
- msg = "Whitebox testing disabled"
- raise cls.skipException(msg)
-
- # Add some convenience attributes that tests use...
- cls.nova_dir = cls.config.whitebox.source_dir
- cls.compute_bin_dir = cls.config.whitebox.bin_dir
- cls.compute_config_path = cls.config.whitebox.config_path
- cls.build_interval = cls.config.compute.build_interval
- cls.build_timeout = cls.config.compute.build_timeout
- cls.ssh_user = cls.config.compute.ssh_user
- cls.image_ref = cls.config.compute.image_ref
- cls.image_ref_alt = cls.config.compute.image_ref_alt
- cls.flavor_ref = cls.config.compute.flavor_ref
- cls.flavor_ref_alt = cls.config.compute.flavor_ref_alt
-
- # NOTE(afazekas): Mimics the helper method used in the api tests
- @classmethod
- def create_server(cls, **kwargs):
- flavor_ref = cls.config.compute.flavor_ref
- image_ref = cls.config.compute.image_ref
- name = rand_name(cls.__name__ + "-instance")
- if 'name' in kwargs:
- name = kwargs.pop('name')
- flavor = kwargs.get('flavor', flavor_ref)
- image_id = kwargs.get('image_id', image_ref)
-
- server = cls.compute_client.servers.create(
- name, image_id, flavor, **kwargs)
-
- if 'wait_until' in kwargs:
- cls.status_timeout(cls.compute_client.servers, server.id,
- server['id'], kwargs['wait_until'])
-
- server = cls.compute_client.servers.get(server.id)
- cls.set_resource(name, server)
- return server
-
- @classmethod
- def get_db_handle_and_meta(cls, database='nova'):
- """Return a connection handle and metadata of an OpenStack database."""
- engine_args = {"echo": False,
- "convert_unicode": True,
- "pool_recycle": 3600
- }
-
- try:
- engine = create_engine(cls.config.whitebox.db_uri, **engine_args)
- connection = engine.connect()
- meta = MetaData()
- meta.reflect(bind=engine)
-
- except Exception as e:
- raise exceptions.SQLException(message=e)
-
- return connection, meta
-
- def nova_manage(self, category, action, params):
- """Executes nova-manage command for the given action."""
-
- nova_manage_path = os.path.join(self.compute_bin_dir, 'nova-manage')
- cmd = ' '.join([nova_manage_path, category, action, params])
-
- if self.deploy_mode == 'devstack-local':
- if not os.path.isdir(self.nova_dir):
- sys.exit("Cannot find Nova source directory: %s" %
- self.nova_dir)
-
- cmd = shlex.split(cmd)
- result = subprocess.Popen(cmd, stdout=subprocess.PIPE)
-
- # TODO(rohitk): Need to define host connection parameters in config
- else:
- client = self.get_ssh_connection(self.config.whitebox.api_host,
- self.config.whitebox.api_user,
- self.config.whitebox.api_passwd)
- result = client.exec_command(cmd)
-
- return result
-
- def get_ssh_connection(self, host, username, password):
- """Create an SSH connection object to a host."""
- ssh_timeout = self.config.compute.ssh_timeout
- ssh_client = Client(host, username, password, ssh_timeout)
- if not ssh_client.test_connection_auth():
- raise exceptions.SSHTimeout()
- else:
- return ssh_client
diff --git a/tempest/whitebox/test_images_whitebox.py b/tempest/whitebox/test_images_whitebox.py
deleted file mode 100644
index 06dcd7f..0000000
--- a/tempest/whitebox/test_images_whitebox.py
+++ /dev/null
@@ -1,163 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack, LLC
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.common.utils.data_utils import rand_name
-from tempest.openstack.common import log as logging
-from tempest.whitebox import manager
-
-from novaclient import exceptions
-
-LOG = logging.getLogger(__name__)
-
-
-class ImagesWhiteboxTest(manager.ComputeWhiteboxTest):
- _interface = 'json'
-
- @classmethod
- def setUpClass(cls):
- super(ImagesWhiteboxTest, cls).setUpClass()
- cls.create_image = cls.compute_client.servers.create_image
- cls.connection, cls.meta = cls.get_db_handle_and_meta()
- cls.shared_server = cls.create_server()
- cls.image_ids = []
-
- @classmethod
- def tearDownClass(cls):
- """Delete images and server after a test is executed."""
- for image_id in cls.image_ids:
- cls.client.delete_image(image_id)
- cls.image_ids.remove(image_id)
- super(ImagesWhiteboxTest, cls).tearDownClass()
-
- @classmethod
- def update_state(self, server_id, vm_state, task_state, deleted=0):
- """Update states of an instance in database for validation."""
- if not task_state:
- task_state = "NULL"
-
- instances = self.meta.tables['instances']
- stmt = instances.update().where(instances.c.uuid == server_id).values(
- deleted=deleted,
- vm_state=vm_state,
- task_state=task_state)
-
- self.connection.execute(stmt, autocommit=True)
-
- def _test_create_image_409_base(self, vm_state, task_state, deleted=0):
- """Base method for create image tests based on vm and task states."""
- try:
- self.update_state(self.shared_server.id, vm_state,
- task_state, deleted)
-
- image_name = rand_name('snap-')
- self.assertRaises(exceptions.Conflict,
- self.create_image,
- self.shared_server.id, image_name)
- except Exception:
- LOG.error("Should not allow create image when vm_state=%s and "
- "task_state=%s" % (vm_state, task_state))
- raise
- finally:
- self.update_state(self.shared_server.id, 'active', None)
-
- def test_create_image_when_vm_eq_building_task_eq_scheduling(self):
- # 409 error when instance states are building,scheduling
- self._test_create_image_409_base("building", "scheduling")
-
- def test_create_image_when_vm_eq_building_task_eq_networking(self):
- # 409 error when instance states are building,networking
- self._test_create_image_409_base("building", "networking")
-
- def test_create_image_when_vm_eq_building_task_eq_bdm(self):
- # 409 error when instance states are building,block_device_mapping
- self._test_create_image_409_base("building", "block_device_mapping")
-
- def test_create_image_when_vm_eq_building_task_eq_spawning(self):
- # 409 error when instance states are building,spawning
- self._test_create_image_409_base("building", "spawning")
-
- def test_create_image_when_vm_eq_active_task_eq_image_backup(self):
- # 409 error when instance states are active,image_backup
- self._test_create_image_409_base("active", "image_backup")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_prep(self):
- # 409 error when instance states are resized,resize_prep
- self._test_create_image_409_base("resized", "resize_prep")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_migrating(self):
- # 409 error when instance states are resized,resize_migrating
- self._test_create_image_409_base("resized", "resize_migrating")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_migrated(self):
- # 409 error when instance states are resized,resize_migrated
- self._test_create_image_409_base("resized", "resize_migrated")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_finish(self):
- # 409 error when instance states are resized,resize_finish
- self._test_create_image_409_base("resized", "resize_finish")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_reverting(self):
- # 409 error when instance states are resized,resize_reverting
- self._test_create_image_409_base("resized", "resize_reverting")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_confirming(self):
- # 409 error when instance states are resized,resize_confirming
- self._test_create_image_409_base("resized", "resize_confirming")
-
- def test_create_image_when_vm_eq_active_task_eq_resize_verify(self):
- # 409 error when instance states are active,resize_verify
- self._test_create_image_409_base("active", "resize_verify")
-
- def test_create_image_when_vm_eq_active_task_eq_updating_password(self):
- # 409 error when instance states are active,updating_password
- self._test_create_image_409_base("active", "updating_password")
-
- def test_create_image_when_vm_eq_active_task_eq_rebuilding(self):
- # 409 error when instance states are active,rebuilding
- self._test_create_image_409_base("active", "rebuilding")
-
- def test_create_image_when_vm_eq_active_task_eq_rebooting(self):
- # 409 error when instance states are active,rebooting
- self._test_create_image_409_base("active", "rebooting")
-
- def test_create_image_when_vm_eq_building_task_eq_deleting(self):
- # 409 error when instance states are building,deleting
- self._test_create_image_409_base("building", "deleting")
-
- def test_create_image_when_vm_eq_active_task_eq_deleting(self):
- # 409 error when instance states are active,deleting
- self._test_create_image_409_base("active", "deleting")
-
- def test_create_image_when_vm_eq_error_task_eq_building(self):
- # 409 error when instance states are error,building
- self._test_create_image_409_base("error", "building")
-
- def test_create_image_when_vm_eq_error_task_eq_none(self):
- # 409 error when instance states are error,None
- self._test_create_image_409_base("error", None)
-
- def test_create_image_when_vm_eq_deleted_task_eq_none(self):
- # 409 error when instance states are deleted,None
- self._test_create_image_409_base("deleted", None)
-
- def test_create_image_when_vm_eq_resized_task_eq_none(self):
- # 409 error when instance states are resized,None
- self._test_create_image_409_base("resized", None)
-
- def test_create_image_when_vm_eq_error_task_eq_resize_prep(self):
- # 409 error when instance states are error,resize_prep
- self._test_create_image_409_base("error", "resize_prep")
diff --git a/tempest/whitebox/test_servers_whitebox.py b/tempest/whitebox/test_servers_whitebox.py
deleted file mode 100644
index b6c888c..0000000
--- a/tempest/whitebox/test_servers_whitebox.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack, LLC
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.identity.base import BaseIdentityAdminTest
-from tempest import exceptions
-from tempest.openstack.common import log as logging
-from tempest.whitebox import manager
-
-LOG = logging.getLogger(__name__)
-
-
-class ServersWhiteboxTest(manager.ComputeWhiteboxTest):
- _interface = 'json'
-
- @classmethod
- def setUpClass(cls):
- super(ServersWhiteboxTest, cls).setUpClass()
- # NOTE(afazekas): Strange relationship
- BaseIdentityAdminTest.setUpClass()
- cls.client = cls.servers_client
- cls.img_client = cls.images_client
- cls.admin_client = BaseIdentityAdminTest.client
-
- cls.connection, cls.meta = cls.get_db_handle_and_meta()
-
- resp, tenants = cls.admin_client.list_tenants()
- cls.tenant_id = [
- tnt['id']
- for tnt in tenants if tnt['name'] == cls.config.compute.tenant_name
- ][0]
-
- cls.shared_server = cls.create_server()
-
- def tearDown(cls):
- for server in cls.servers:
- try:
- cls.client.delete_server(server['id'])
- except exceptions.NotFound:
- continue
-
- def update_state(self, server_id, vm_state, task_state, deleted=0):
- """Update states of an instance in database for validation."""
- if not task_state:
- task_state = 'NULL'
-
- instances = self.meta.tables['instances']
- stmt = instances.update().where(instances.c.uuid == server_id).values(
- deleted=deleted,
- vm_state=vm_state,
- task_state=task_state)
- self.connection.execute(stmt, autocommit=True)
-
- def _test_delete_server_base(self, vm_state, task_state):
- """
- Base method for delete server tests based on vm and task states.
- Validates for successful server termination.
- """
- server = self.create_server()
- self.update_state(server['id'], vm_state, task_state)
-
- resp, body = self.client.delete_server(server['id'])
- self.assertEqual('204', resp['status'])
- self.client.wait_for_server_termination(server['id'],
- ignore_error=True)
-
- instances = self.meta.tables['instances']
- stmt = instances.select().where(instances.c.uuid == server['id'])
- result = self.connection.execute(stmt).first()
-
- self.assertEqual(True, result.deleted > 0)
- self.assertEqual('deleted', result.vm_state)
- self.assertEqual(None, result.task_state)
-
- def _test_delete_server_403_base(self, vm_state, task_state):
- """
- Base method for delete server tests based on vm and task states.
- Validates for 403 error code.
- """
- try:
- self.update_state(self.shared_server['id'], vm_state, task_state)
-
- self.assertRaises(exceptions.Unauthorized,
- self.client.delete_server,
- self.shared_server['id'])
- except Exception:
- LOG.error("Should not allow delete server when vm_state=%s and "
- "task_state=%s" % (vm_state, task_state))
- raise
- finally:
- self.update_state(self.shared_server['id'], 'active', None)
-
- def test_delete_server_when_vm_eq_building_task_eq_networking(self):
- # Delete server when instance states are building,networking
- self._test_delete_server_base('building', 'networking')
-
- def test_delete_server_when_vm_eq_building_task_eq_bdm(self):
- # Delete server when instance states are building,block device mapping
- self._test_delete_server_base('building', 'block_device_mapping')
-
- def test_delete_server_when_vm_eq_building_task_eq_spawning(self):
- # Delete server when instance states are building,spawning
- self._test_delete_server_base('building', 'spawning')
-
- def test_delete_server_when_vm_eq_active_task_eq_image_backup(self):
- # Delete server when instance states are active,image_backup
- self._test_delete_server_base('active', 'image_backup')
-
- def test_delete_server_when_vm_eq_active_task_eq_rebuilding(self):
- # Delete server when instance states are active,rebuilding
- self._test_delete_server_base('active', 'rebuilding')
-
- def test_delete_server_when_vm_eq_error_task_eq_spawning(self):
- # Delete server when instance states are error,spawning
- self._test_delete_server_base('error', 'spawning')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_prep(self):
- # Delete server when instance states are resized,resize_prep
- self._test_delete_server_403_base('resized', 'resize_prep')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_migrating(self):
- # Delete server when instance states are resized,resize_migrating
- self._test_delete_server_403_base('resized', 'resize_migrating')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_migrated(self):
- # Delete server when instance states are resized,resize_migrated
- self._test_delete_server_403_base('resized', 'resize_migrated')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_finish(self):
- # Delete server when instance states are resized,resize_finish
- self._test_delete_server_403_base('resized', 'resize_finish')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_reverting(self):
- # Delete server when instance states are resized,resize_reverting
- self._test_delete_server_403_base('resized', 'resize_reverting')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_confirming(self):
- # Delete server when instance states are resized,resize_confirming
- self._test_delete_server_403_base('resized', 'resize_confirming')
-
- def test_delete_server_when_vm_eq_active_task_eq_resize_verify(self):
- # Delete server when instance states are active,resize_verify
- self._test_delete_server_base('active', 'resize_verify')
-
- def test_delete_server_when_vm_eq_active_task_eq_rebooting(self):
- # Delete server when instance states are active,rebooting
- self._test_delete_server_base('active', 'rebooting')
-
- def test_delete_server_when_vm_eq_building_task_eq_deleting(self):
- # Delete server when instance states are building,deleting
- self._test_delete_server_base('building', 'deleting')
-
- def test_delete_server_when_vm_eq_active_task_eq_deleting(self):
- # Delete server when instance states are active,deleting
- self._test_delete_server_base('active', 'deleting')
-
- def test_delete_server_when_vm_eq_error_task_eq_none(self):
- # Delete server when instance states are error,None
- self._test_delete_server_base('error', None)
-
- def test_delete_server_when_vm_eq_resized_task_eq_none(self):
- # Delete server when instance states are resized,None
- self._test_delete_server_403_base('resized', None)
-
- def test_delete_server_when_vm_eq_error_task_eq_resize_prep(self):
- # Delete server when instance states are error,resize_prep
- self._test_delete_server_base('error', 'resize_prep')
-
- def test_delete_server_when_vm_eq_error_task_eq_error(self):
- # Delete server when instance states are error,error
- self._test_delete_server_base('error', 'error')
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
index f428c1e..0999e2c 100644
--- a/tools/install_venv_common.py
+++ b/tools/install_venv_common.py
@@ -114,9 +114,10 @@
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
- # setuptools.
- self.pip_install('pip>=1.3')
+ # setuptools and pbr
+ self.pip_install('pip>=1.4')
self.pip_install('setuptools')
+ self.pip_install('pbr')
self.pip_install('-r', self.requirements)
self.pip_install('-r', self.test_requirements)
@@ -201,12 +202,13 @@
RHEL: https://bugzilla.redhat.com/958868
"""
- # Install "patch" program if it's not there
- if not self.check_pkg('patch'):
- self.die("Please install 'patch'.")
+ if os.path.exists('contrib/redhat-eventlet.patch'):
+ # Install "patch" program if it's not there
+ if not self.check_pkg('patch'):
+ self.die("Please install 'patch'.")
- # Apply the eventlet patch
- self.apply_patch(os.path.join(self.venv, 'lib', self.py_version,
- 'site-packages',
- 'eventlet/green/subprocess.py'),
- 'contrib/redhat-eventlet.patch')
+ # Apply the eventlet patch
+ self.apply_patch(os.path.join(self.venv, 'lib', self.py_version,
+ 'site-packages',
+ 'eventlet/green/subprocess.py'),
+ 'contrib/redhat-eventlet.patch')