Merge "added an api test for security_groups"
diff --git a/HACKING.rst b/HACKING.rst
index 03e7dc3..5153fe1 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -20,6 +20,7 @@
- [T102] Cannot import OpenStack python clients in tempest/api tests
- [T103] tempest/tests is deprecated
+- [T104] Scenario tests require a services decorator
Test Data/Configuration
-----------------------
@@ -96,6 +97,24 @@
credentials management, testresources and so on. These facilities, MUST be able
to work even if just one ``test_method`` selected for execution.
+Service Tagging
+---------------
+Service tagging is used to specify which services are exercised by a particular
+test method. You specify the services with the tempest.test.services decorator.
+For example:
+
+@services('compute', 'image')
+
+Valid service tag names are the same as the list of directories in tempest.api
+that have tests.
+
+For scenario tests having a service tag is required. For the api tests service
+tags are only needed if the test method makes an api call (either directly or
+indirectly through another service) that differs from the parent directory
+name. For example, any test that make an api call to a service other than nova
+in tempest.api.compute would require a service tag for those services, however
+they do not need to be tagged as compute.
+
Guidelines
----------
- Do not submit changesets with only testcases which are skipped as
diff --git a/bin/tempest b/bin/tempest
deleted file mode 100755
index 87ba6d5..0000000
--- a/bin/tempest
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env bash
-
-function usage {
- echo "Usage: $0 [OPTION]..."
- echo "Run Tempest test suite"
- echo ""
- echo " -s, --smoke Only run smoke tests"
- echo " -w, --whitebox Only run whitebox tests"
- echo " -h, --help Print this usage message"
- echo " -d. --debug Debug this script -- set -o xtrace"
- exit
-}
-
-function process_option {
- case "$1" in
- -h|--help) usage;;
- -d|--debug) set -o xtrace;;
- -s|--smoke) noseargs="$noseargs --attr=type=smoke";;
- -w|--whitebox) noseargs="$noseargs --attr=type=whitebox";;
- *) noseargs="$noseargs $1"
- esac
-}
-
-noseargs=""
-
-export NOSE_WITH_OPENSTACK=1
-export NOSE_OPENSTACK_COLOR=1
-export NOSE_OPENSTACK_RED=15.00
-export NOSE_OPENSTACK_YELLOW=3.00
-export NOSE_OPENSTACK_SHOW_ELAPSED=1
-export NOSE_OPENSTACK_STDOUT=1
-
-for arg in "$@"; do
- process_option $arg
-done
-
-
-# only add tempest default if we don't specify a test
-if [[ "x$noseargs" =~ "tempest" ]]; then
- noseargs="$noseargs"
-else
- noseargs="$noseargs tempest"
-fi
-
-
-function run_tests {
- $NOSETESTS
-}
-
-NOSETESTS="nosetests $noseargs"
-
-run_tests || exit
diff --git a/doc/source/field_guide/whitebox.rst b/doc/source/field_guide/whitebox.rst
deleted file mode 120000
index 47f6069..0000000
--- a/doc/source/field_guide/whitebox.rst
+++ /dev/null
@@ -1 +0,0 @@
-../../../tempest/whitebox/README.rst
\ No newline at end of file
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f70cdd1..1c32b9c 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -31,7 +31,6 @@
field_guide/scenario
field_guide/stress
field_guide/thirdparty
- field_guide/whitebox
field_guide/unit_tests
------------------
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 8d96858..2f07a19 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -161,29 +161,6 @@
# Expected first device name when a volume is attached to an instance
volume_device_name = vdb
-[whitebox]
-# Whitebox options for compute. Whitebox options enable the
-# whitebox test cases, which look at internal Nova database state,
-# SSH into VMs to check instance state, etc.
-
-# Should we run whitebox tests for Compute?
-whitebox_enabled = true
-
-# Path of nova source directory
-source_dir = /opt/stack/nova
-
-# Path of nova configuration file
-config_path = /etc/nova/nova.conf
-
-# Directory containing nova binaries such as nova-manage
-bin_dir = /usr/local/bin
-
-# Connection string to the database of Compute service
-db_uri = mysql://nova:secret@localhost/nova
-
-# Path to a private key file for SSH access to remote hosts
-path_to_private_key = /home/user/.ssh/id_rsa
-
[compute-admin]
# This should be the username of a user WITH administrative privileges
# If not defined the admin user from the identity section will be used
diff --git a/requirements.txt b/requirements.txt
index 877b23c..ab48ec5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -18,6 +18,4 @@
keyring>=1.6.1
testrepository>=0.0.17
oslo.config>=1.1.0
-# Needed for whitebox testing
-SQLAlchemy>=0.7.8,<=0.7.99
eventlet>=0.13.0
diff --git a/run_tests.sh b/run_tests.sh
index d672b62..710fbaa 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -10,7 +10,6 @@
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -s, --smoke Only run smoke tests"
- echo " -w, --whitebox Only run whitebox tests"
echo " -t, --serial Run testr serially"
echo " -c, --nova-coverage Enable Nova coverage collection"
echo " -C, --config Config file location"
@@ -38,7 +37,7 @@
logging=0
logging_config=etc/logging.conf
-if ! options=$(getopt -o VNnfuswtcphdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,whitebox,serial,nova-coverage,pep8,help,debug,config:,logging,logging-config: -- "$@")
+if ! options=$(getopt -o VNnfustcphdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,serial,nova-coverage,pep8,help,debug,config:,logging,logging-config: -- "$@")
then
# parse error
usage
@@ -60,7 +59,6 @@
-C|--config) config_file=$2; shift;;
-p|--pep8) let just_pep8=1;;
-s|--smoke) testrargs="$testrargs smoke";;
- -w|--whitebox) testrargs="$testrargs whitebox";;
-t|--serial) serial=1;;
-l|--logging) logging=1;;
-L|--logging-config) logging_config=$2; shift;;
diff --git a/setup.cfg b/setup.cfg
index 7cfc4ce..a4cf118 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -21,10 +21,6 @@
setup-hooks =
pbr.hooks.setup_hook
-[files]
-scripts =
- bin/tempest
-
[build_sphinx]
all_files = 1
build-dir = doc/build
diff --git a/tempest/README.rst b/tempest/README.rst
index 33021c8..dbac809 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -18,7 +18,6 @@
| scenario/ - complex scenario tests
| stress/ - stress tests
| thirdparty/ - 3rd party api tests
-| whitebox/ - white box testing
Each of these directories contains different types of tests. What
belongs in each directory, the rules and examples for good tests, are
@@ -78,11 +77,3 @@
completely legitimate for Tempest to include tests of 3rdparty APIs,
but those should be kept separate from the normal OpenStack
validation.
-
-
-whitebox
---------
-
-Whitebox tests are tests which require access to the database of the
-target OpenStack machine to verify internal state after operations
-are made. White box tests are allowed to use the python clients.
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index 303bc0c..0bb0460 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import fixtures
+
from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
@@ -22,6 +24,16 @@
from tempest.test import attr
+class LockFixture(fixtures.Fixture):
+ def __init__(self, name):
+ self.mgr = lockutils.lock(name, 'tempest-', True)
+
+ def setUp(self):
+ super(LockFixture, self).setUp()
+ self.addCleanup(self.mgr.__exit__, None, None, None)
+ self.mgr.__enter__()
+
+
class AggregatesAdminTestJSON(base.BaseComputeAdminTest):
"""
@@ -146,9 +158,9 @@
self.client.get_aggregate, -1)
@attr(type='gate')
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_remove_host(self):
# Add an host to the given aggregate and remove.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
@@ -168,9 +180,9 @@
self.assertNotIn(self.host, body['hosts'])
@attr(type='gate')
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_host_list(self):
# Add an host to the given aggregate and list.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
@@ -186,9 +198,9 @@
self.assertIn(self.host, agg['hosts'])
@attr(type='gate')
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_host_get_details(self):
# Add an host to the given aggregate and get details.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
@@ -201,9 +213,9 @@
self.assertIn(self.host, body['hosts'])
@attr(type='gate')
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_host_create_server_with_az(self):
# Add an host to the given aggregate and create a server.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
az_name = rand_name(self.az_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name, az_name)
@@ -248,9 +260,9 @@
aggregate['id'], self.host)
@attr(type=['negative', 'gate'])
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_remove_host_as_user(self):
# Regular user is not allowed to remove a host from an aggregate.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index f2f82b5..ace77a6 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -17,6 +17,8 @@
from tempest.api import compute
from tempest.api.compute import base
+from tempest.common.utils.data_utils import rand_int_id
+from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest.test import attr
@@ -39,12 +41,12 @@
raise cls.skipException(msg)
cls.client = cls.os_adm.flavors_client
- flavor_name = 'test_flavor2'
+ flavor_name = rand_name('test_flavor')
ram = 512
vcpus = 1
disk = 10
ephemeral = 10
- cls.new_flavor_id = 12345
+ cls.new_flavor_id = rand_int_id(start=1000)
swap = 1024
rxtx = 1
# Create a flavor so as to set/get/unset extra specs
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 5c1ad0d..ee1ad9e 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -26,7 +26,6 @@
class AttachVolumeTestJSON(base.BaseComputeTest):
_interface = 'json'
run_ssh = tempest.config.TempestConfig().compute.run_ssh
- device = tempest.config.TempestConfig().compute.volume_device_name
def __init__(self, *args, **kwargs):
super(AttachVolumeTestJSON, self).__init__(*args, **kwargs)
@@ -37,7 +36,7 @@
@classmethod
def setUpClass(cls):
super(AttachVolumeTestJSON, cls).setUpClass()
-
+ cls.device = cls.config.compute.volume_device_name
if not cls.config.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 960785d..766a2c7 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -18,6 +18,7 @@
from tempest.api.volume.base import BaseVolumeTest
from tempest.common.utils.data_utils import rand_name
from tempest.test import attr
+from tempest.test import services
from tempest.test import stresstest
@@ -55,6 +56,7 @@
@stresstest(class_setup_per='process')
@attr(type='smoke')
+ @services('compute')
def test_attach_detach_volume_to_instance(self):
# Volume is attached and detached successfully from an instance
mountpoint = '/dev/vdc'
@@ -69,6 +71,7 @@
@stresstest(class_setup_per='process')
@attr(type='gate')
+ @services('compute')
def test_get_volume_attachment(self):
# Verify that a volume's attachment information is retrieved
mountpoint = '/dev/vdc'
@@ -93,6 +96,7 @@
self.assertEqual(self.volume['id'], attachment['volume_id'])
@attr(type='gate')
+ @services('image')
def test_volume_upload(self):
# NOTE(gfidente): the volume uploaded in Glance comes from setUpClass,
# it is shared with the other tests. After it is uploaded in Glance,
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 2e90f16..f7f428c 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -18,6 +18,7 @@
from tempest.api.volume import base
from tempest.common.utils.data_utils import rand_name
from tempest.test import attr
+from tempest.test import services
class VolumesGetTest(base.BaseVolumeTest):
@@ -93,6 +94,7 @@
self._volume_create_get_delete()
@attr(type='smoke')
+ @services('image')
def test_volume_create_get_delete_from_image(self):
self._volume_create_get_delete(imageRef=self.config.compute.image_ref)
diff --git a/tempest/config.py b/tempest/config.py
index 7245b10..acb0e8d 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -238,36 +238,6 @@
for opt in ComputeAdminGroup:
conf.register_opt(opt, group='compute-admin')
-
-whitebox_group = cfg.OptGroup(name='whitebox',
- title="Whitebox Options")
-
-WhiteboxGroup = [
- cfg.BoolOpt('whitebox_enabled',
- default=False,
- help="Does the test environment support whitebox tests for "
- "Compute?"),
- cfg.StrOpt('db_uri',
- default=None,
- help="Connection string to the database of Compute service"),
- cfg.StrOpt('source_dir',
- default="/opt/stack/nova",
- help="Path of nova source directory"),
- cfg.StrOpt('config_path',
- default='/etc/nova/nova.conf',
- help="Path of nova configuration file"),
- cfg.StrOpt('bin_dir',
- default="/usr/local/bin/",
- help="Directory containing nova binaries such as nova-manage"),
-]
-
-
-def register_whitebox_opts(conf):
- conf.register_group(whitebox_group)
- for opt in WhiteboxGroup:
- conf.register_opt(opt, group='whitebox')
-
-
image_group = cfg.OptGroup(name='image',
title="Image Service Options")
@@ -650,7 +620,6 @@
register_compute_opts(cfg.CONF)
register_identity_opts(cfg.CONF)
- register_whitebox_opts(cfg.CONF)
register_image_opts(cfg.CONF)
register_network_opts(cfg.CONF)
register_volume_opts(cfg.CONF)
@@ -663,7 +632,6 @@
register_scenario_opts(cfg.CONF)
register_service_available_opts(cfg.CONF)
self.compute = cfg.CONF.compute
- self.whitebox = cfg.CONF.whitebox
self.identity = cfg.CONF.identity
self.images = cfg.CONF.image
self.network = cfg.CONF.network
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 62bd8cf..924ebc9 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -65,6 +65,10 @@
message = 'Unauthorized'
+class InvalidServiceTag(RestClientException):
+ message = "Invalid service tag"
+
+
class TimeoutException(TempestException):
message = "Request timed out"
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 8cfd548..aa97211 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -22,6 +22,8 @@
SKIP_DECORATOR_RE = re.compile(r'\s*@testtools.skip\((.*)\)')
SKIP_STR_RE = re.compile(r'.*Bug #\d+.*')
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
+TEST_DEFINITION = re.compile(r'^\s*def test.*')
+SCENARIO_DECORATOR = re.compile(r'\s*@.*services\(')
def skip_bugs(physical_line):
@@ -53,6 +55,21 @@
" in tempest/api/* tests"))
+def scenario_tests_need_service_tags(physical_line, filename,
+ previous_logical):
+ """Check that scenario tests have service tags
+
+ T104: Scenario tests require a services decorator
+ """
+
+ if 'tempest/scenario' in filename:
+ if TEST_DEFINITION.match(physical_line):
+ if not SCENARIO_DECORATOR.match(previous_logical):
+ return (physical_line.find('def'),
+ "T104: Scenario tests require a service decorator")
+
+
def factory(register):
register(skip_bugs)
register(import_no_clients_in_api)
+ register(scenario_tests_need_service_tags)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 90b3fca..d3c2a18 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -28,6 +28,7 @@
from neutronclient.common import exceptions as exc
import neutronclient.v2_0.client
import novaclient.client
+from novaclient import exceptions as nova_exceptions
from tempest.api.network import common as net_common
from tempest.common import isolated_creds
@@ -276,27 +277,57 @@
expected status to show. At any time, if the returned
status of the thing is ERROR, fail out.
"""
+ self._status_timeout(things, thing_id, expected_status=expected_status)
+
+ def delete_timeout(self, things, thing_id):
+ """
+ Given a thing, do a loop, sleeping
+ for a configurable amount of time, checking for the
+ deleted status to show. At any time, if the returned
+ status of the thing is ERROR, fail out.
+ """
+ self._status_timeout(things,
+ thing_id,
+ allow_notfound=True)
+
+ def _status_timeout(self,
+ things,
+ thing_id,
+ expected_status=None,
+ allow_notfound=False):
+
+ log_status = expected_status if expected_status else ''
+ if allow_notfound:
+ log_status += ' or NotFound' if log_status != '' else 'NotFound'
+
def check_status():
# python-novaclient has resources available to its client
# that all implement a get() method taking an identifier
# for the singular resource to retrieve.
- thing = things.get(thing_id)
+ try:
+ thing = things.get(thing_id)
+ except nova_exceptions.NotFound:
+ if allow_notfound:
+ return True
+ else:
+ raise
+
new_status = thing.status
if new_status == 'ERROR':
message = "%s failed to get to expected status. \
In ERROR state." % (thing)
raise exceptions.BuildErrorException(message)
- elif new_status == expected_status:
+ elif new_status == expected_status and expected_status is not None:
return True # All good.
LOG.debug("Waiting for %s to get to %s status. "
"Currently in %s status",
- thing, expected_status, new_status)
+ thing, log_status, new_status)
if not tempest.test.call_until_true(
check_status,
self.config.compute.build_timeout,
self.config.compute.build_interval):
message = "Timed out waiting for thing %s \
- to become %s" % (thing_id, expected_status)
+ to become %s" % (thing_id, log_status)
raise exceptions.TimeoutException(message)
def create_loginable_secgroup_rule(self, client=None, secgroup_id=None):
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
index 78025ee..88f2ebd 100644
--- a/tempest/scenario/orchestration/test_autoscaling.py
+++ b/tempest/scenario/orchestration/test_autoscaling.py
@@ -12,10 +12,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import time
+
from tempest.scenario import manager
from tempest.test import attr
from tempest.test import call_until_true
-import time
+from tempest.test import services
class AutoScalingTest(manager.OrchestrationScenarioTest):
@@ -58,6 +60,7 @@
self.set_resource('stack', self.stack)
@attr(type='slow')
+ @services('orchestration', 'compute')
def test_scale_up_then_down(self):
self.assign_keypair()
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index 9a45572..1081a3e 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -20,6 +20,7 @@
from lxml import html
from tempest.scenario import manager
+from tempest.test import services
class TestDashboardBasicOps(manager.OfficialClientTest):
@@ -66,6 +67,7 @@
response = self.opener.open(self.config.dashboard.dashboard_url)
self.assertIn('Overview', response.read())
+ @services('dashboard')
def test_basic_scenario(self):
self.check_login_page()
self.user_login()
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index 39b1e10..33b7adc 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -18,6 +18,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from tempest.scenario import manager
+from tempest.test import services
LOG = logging.getLogger(__name__)
@@ -96,6 +97,7 @@
self.addCleanup(delete, self.servers)
self._wait_for_server_status('ACTIVE')
+ @services('compute', 'image')
def test_large_ops_scenario(self):
if self.config.scenario.large_ops_number < 1:
return
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 5cddde2..ce4d1bd 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -18,6 +18,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from tempest.scenario import manager
+from tempest.test import services
LOG = logging.getLogger(__name__)
@@ -145,6 +146,7 @@
volume = self.volume_client.volumes.get(self.volume.id)
self.assertEqual('available', volume.status)
+ @services('compute', 'volume', 'image', 'network')
def test_minimum_basic_scenario(self):
self.glance_image_create()
self.nova_keypair_add()
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 70939f6..662e919 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -19,8 +19,12 @@
from tempest.api.network import common as net_common
from tempest.common.utils.data_utils import rand_name
from tempest import config
+from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest.test import attr
+from tempest.test import services
+
+LOG = logging.getLogger(__name__)
class TestNetworkBasicOps(manager.NetworkScenarioTest):
@@ -158,18 +162,15 @@
self.set_resource(name, router)
return router
- @attr(type='smoke')
- def test_001_create_keypairs(self):
+ def _create_keypairs(self):
self.keypairs[self.tenant_id] = self.create_keypair(
name=rand_name('keypair-smoke-'))
- @attr(type='smoke')
- def test_002_create_security_groups(self):
+ def _create_security_groups(self):
self.security_groups[self.tenant_id] = self._create_security_group(
self.compute_client)
- @attr(type='smoke')
- def test_003_create_networks(self):
+ def _create_networks(self):
network = self._create_network(self.tenant_id)
router = self._get_router(self.tenant_id)
subnet = self._create_subnet(network)
@@ -178,8 +179,7 @@
self.subnets.append(subnet)
self.routers.append(router)
- @attr(type='smoke')
- def test_004_check_networks(self):
+ def _check_networks(self):
# Checks that we see the newly created network/subnet/router via
# checking the result of list_[networks,routers,subnets]
seen_nets = self._list_networks()
@@ -202,10 +202,7 @@
self.assertIn(myrouter.name, seen_router_names)
self.assertIn(myrouter.id, seen_router_ids)
- @attr(type='smoke')
- def test_005_create_servers(self):
- if not (self.keypairs or self.security_groups or self.networks):
- raise self.skipTest('Necessary resources have not been defined')
+ def _create_servers(self):
for i, network in enumerate(self.networks):
tenant_id = network.tenant_id
name = rand_name('server-smoke-%d-' % i)
@@ -222,13 +219,11 @@
create_kwargs=create_kwargs)
self.servers.append(server)
- @attr(type='smoke')
- def test_006_check_tenant_network_connectivity(self):
+ def _check_tenant_network_connectivity(self):
if not self.config.network.tenant_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
- raise self.skipTest(msg)
- if not self.servers:
- raise self.skipTest("No VM's have been created")
+ LOG.info(msg)
+ return
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
ssh_login = self.config.compute.image_ssh_user
@@ -239,22 +234,14 @@
self._check_vm_connectivity(ip_address, ssh_login,
private_key)
- @attr(type='smoke')
- def test_007_assign_floating_ips(self):
+ def _assign_floating_ips(self):
public_network_id = self.config.network.public_network_id
- if not public_network_id:
- raise self.skipTest('Public network not configured')
- if not self.servers:
- raise self.skipTest("No VM's have been created")
for server in self.servers:
floating_ip = self._create_floating_ip(server, public_network_id)
self.floating_ips.setdefault(server, [])
self.floating_ips[server].append(floating_ip)
- @attr(type='smoke')
- def test_008_check_public_network_connectivity(self):
- if not self.floating_ips:
- raise self.skipTest('No floating ips have been allocated.')
+ def _check_public_network_connectivity(self):
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
ssh_login = self.config.compute.image_ssh_user
@@ -263,3 +250,15 @@
for floating_ip in floating_ips:
ip_address = floating_ip.floating_ip_address
self._check_vm_connectivity(ip_address, ssh_login, private_key)
+
+ @attr(type='smoke')
+ @services('compute', 'network')
+ def test_network_basic_ops(self):
+ self._create_keypairs()
+ self._create_security_groups()
+ self._create_networks()
+ self._check_networks()
+ self._create_servers()
+ self._check_tenant_network_connectivity()
+ self._assign_floating_ips()
+ self._check_public_network_connectivity()
diff --git a/tempest/scenario/test_network_quotas.py b/tempest/scenario/test_network_quotas.py
index 267aff6..3268066 100644
--- a/tempest/scenario/test_network_quotas.py
+++ b/tempest/scenario/test_network_quotas.py
@@ -16,7 +16,9 @@
# under the License.
from neutronclient.common import exceptions as exc
+
from tempest.scenario.manager import NetworkScenarioTest
+from tempest.test import services
MAX_REASONABLE_ITERATIONS = 51 # more than enough. Default for port is 50.
@@ -41,6 +43,7 @@
cls.subnets = []
cls.ports = []
+ @services('network')
def test_create_network_until_quota_hit(self):
hit_limit = False
for n in xrange(MAX_REASONABLE_ITERATIONS):
@@ -55,6 +58,7 @@
break
self.assertTrue(hit_limit, "Failed: Did not hit quota limit !")
+ @services('network')
def test_create_subnet_until_quota_hit(self):
if not self.networks:
self.networks.append(
@@ -73,6 +77,7 @@
break
self.assertTrue(hit_limit, "Failed: Did not hit quota limit !")
+ @services('network')
def test_create_ports_until_quota_hit(self):
if not self.networks:
self.networks.append(
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 8ee740e..cf72cd4 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -18,6 +18,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from tempest.scenario import manager
+from tempest.test import services
LOG = logging.getLogger(__name__)
@@ -45,6 +46,7 @@
msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
raise cls.skipException(msg)
+ @services('compute')
def test_resize_server_confirm(self):
# We create an instance for use in this test
i_name = rand_name('instance')
@@ -73,6 +75,7 @@
self.status_timeout(
self.compute_client.servers, instance_id, 'ACTIVE')
+ @services('compute')
def test_server_sequence_suspend_resume(self):
# We create an instance for use in this test
i_name = rand_name('instance')
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 8e14b06..04204eb 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -18,6 +18,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from tempest.scenario import manager
+from tempest.test import services
LOG = logging.getLogger(__name__)
@@ -100,6 +101,7 @@
instance.delete()
self.remove_resource('instance')
+ @services('compute', 'network')
def test_server_basicops(self):
self.add_keypair()
self.create_security_group()
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index 003c264..8c2cc76 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -16,6 +16,7 @@
# under the License.
from tempest.scenario import manager
+from tempest.test import services
class TestSnapshotPattern(manager.OfficialClientTest):
@@ -61,6 +62,7 @@
def _set_floating_ip_to_server(self, server, floating_ip):
server.add_floating_ip(floating_ip)
+ @services('compute', 'network', 'image')
def test_snapshot_pattern(self):
# prepare for booting a instance
self._add_keypair()
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 5af4bb2..c5a4aaf 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -144,6 +144,7 @@
self.assertEqual(self.timestamp, got_timestamp)
@testtools.skip("Skipped until the Bug #1205344 is resolved.")
+ @tempest.test.services('compute', 'network', 'volume', 'image')
def test_stamp_pattern(self):
# prepare for booting a instance
self._add_keypair()
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
new file mode 100644
index 0000000..3572166
--- /dev/null
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -0,0 +1,163 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common.utils.data_utils import rand_name
+from tempest.scenario import manager
+from tempest.test import services
+
+
+class TestVolumeBootPattern(manager.OfficialClientTest):
+
+ """
+ This test case attempts to reproduce the following steps:
+
+ * Create in Cinder some bootable volume importing a Glance image
+ * Boot an instance from the bootable volume
+ * Write content to the volume
+ * Delete an instance and Boot a new instance from the volume
+ * Check written content in the instance
+ * Create a volume snapshot while the instance is running
+ * Boot an additional instance from the new snapshot based volume
+ * Check written content in the instance booted from snapshot
+ """
+
+ def _create_volume_from_image(self):
+ img_uuid = self.config.compute.image_ref
+ vol_name = rand_name('volume-origin')
+ return self.create_volume(name=vol_name, imageRef=img_uuid)
+
+ def _boot_instance_from_volume(self, vol_id, keypair):
+ # NOTE(gfidente): the syntax for block_device_mapping is
+ # dev_name=id:type:size:delete_on_terminate
+ # where type needs to be "snap" if the server is booted
+ # from a snapshot, size instead can be safely left empty
+ bd_map = {
+ 'vda': vol_id + ':::0'
+ }
+ create_kwargs = {
+ 'block_device_mapping': bd_map,
+ 'key_name': keypair.name
+ }
+ return self.create_server(self.compute_client,
+ create_kwargs=create_kwargs)
+
+ def _create_snapshot_from_volume(self, vol_id):
+ volume_snapshots = self.volume_client.volume_snapshots
+ snap_name = rand_name('snapshot')
+ snap = volume_snapshots.create(volume_id=vol_id,
+ force=True,
+ display_name=snap_name)
+ self.set_resource(snap.id, snap)
+ self.status_timeout(volume_snapshots,
+ snap.id,
+ 'available')
+ return snap
+
+ def _create_volume_from_snapshot(self, snap_id):
+ vol_name = rand_name('volume')
+ return self.create_volume(name=vol_name, snapshot_id=snap_id)
+
+ def _stop_instances(self, instances):
+ # NOTE(gfidente): two loops so we do not wait for the status twice
+ for i in instances:
+ self.compute_client.servers.stop(i)
+ for i in instances:
+ self.status_timeout(self.compute_client.servers,
+ i.id,
+ 'SHUTOFF')
+
+ def _detach_volumes(self, volumes):
+ # NOTE(gfidente): two loops so we do not wait for the status twice
+ for v in volumes:
+ self.volume_client.volumes.detach(v)
+ for v in volumes:
+ self.status_timeout(self.volume_client.volumes,
+ v.id,
+ 'available')
+
+ def _ssh_to_server(self, server, keypair):
+ if self.config.compute.use_floatingip_for_ssh:
+ floating_ip = self.compute_client.floating_ips.create()
+ fip_name = rand_name('scenario-fip')
+ self.set_resource(fip_name, floating_ip)
+ server.add_floating_ip(floating_ip)
+ ip = floating_ip.ip
+ else:
+ network_name_for_ssh = self.config.compute.network_for_ssh
+ ip = server.networks[network_name_for_ssh][0]
+
+ client = self.get_remote_client(ip,
+ private_key=keypair.private_key)
+ return client.ssh_client
+
+ def _get_content(self, ssh_client):
+ return ssh_client.exec_command('cat /tmp/text')
+
+ def _write_text(self, ssh_client):
+ text = rand_name('text-')
+ ssh_client.exec_command('echo "%s" > /tmp/text; sync' % (text))
+
+ return self._get_content(ssh_client)
+
+ def _delete_server(self, server):
+ self.compute_client.servers.delete(server)
+ self.delete_timeout(self.compute_client.servers, server.id)
+
+ def _check_content_of_written_file(self, ssh_client, expected):
+ actual = self._get_content(ssh_client)
+ self.assertEqual(expected, actual)
+
+ @services('compute', 'volume', 'image')
+ def test_volume_boot_pattern(self):
+ keypair = self.create_keypair()
+ self.create_loginable_secgroup_rule()
+
+ # create an instance from volume
+ volume_origin = self._create_volume_from_image()
+ instance_1st = self._boot_instance_from_volume(volume_origin.id,
+ keypair)
+
+ # write content to volume on instance
+ ssh_client_for_instance_1st = self._ssh_to_server(instance_1st,
+ keypair)
+ text = self._write_text(ssh_client_for_instance_1st)
+
+ # delete instance
+ self._delete_server(instance_1st)
+
+ # create a 2nd instance from volume
+ instance_2nd = self._boot_instance_from_volume(volume_origin.id,
+ keypair)
+
+ # check the content of written file
+ ssh_client_for_instance_2nd = self._ssh_to_server(instance_2nd,
+ keypair)
+ self._check_content_of_written_file(ssh_client_for_instance_2nd, text)
+
+ # snapshot a volume
+ snapshot = self._create_snapshot_from_volume(volume_origin.id)
+
+ # create a 3rd instance from snapshot
+ volume = self._create_volume_from_snapshot(snapshot.id)
+ instance_from_snapshot = self._boot_instance_from_volume(volume.id,
+ keypair)
+
+ # check the content of written file
+ ssh_client = self._ssh_to_server(instance_from_snapshot, keypair)
+ self._check_content_of_written_file(ssh_client, text)
+
+ # NOTE(gfidente): ensure resources are in clean state for
+ # deletion operations to succeed
+ self._stop_instances([instance_2nd, instance_from_snapshot])
+ self._detach_volumes([volume_origin, volume])
diff --git a/tempest/scenario/test_volume_snapshot_pattern.py b/tempest/scenario/test_volume_snapshot_pattern.py
deleted file mode 100644
index d873d30..0000000
--- a/tempest/scenario/test_volume_snapshot_pattern.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.common.utils.data_utils import rand_name
-from tempest.scenario import manager
-
-
-class TestVolumeSnapshotPattern(manager.OfficialClientTest):
-
- """
- This test case attempts to reproduce the following steps:
-
- * Create in Cinder some bootable volume importing a Glance image
- * Boot an instance from the bootable volume
- * Create a volume snapshot while the instance is running
- * Boot an additional instance from the new snapshot based volume
- """
-
- def _create_volume_from_image(self):
- img_uuid = self.config.compute.image_ref
- vol_name = rand_name('volume-origin')
- return self.create_volume(name=vol_name, imageRef=img_uuid)
-
- def _boot_instance_from_volume(self, vol_id):
- # NOTE(gfidente): the syntax for block_device_mapping is
- # dev_name=id:type:size:delete_on_terminate
- # where type needs to be "snap" if the server is booted
- # from a snapshot, size instead can be safely left empty
- bd_map = {
- 'vda': vol_id + ':::0'
- }
- create_kwargs = {
- 'block_device_mapping': bd_map
- }
- return self.create_server(self.compute_client,
- create_kwargs=create_kwargs)
-
- def _create_snapshot_from_volume(self, vol_id):
- volume_snapshots = self.volume_client.volume_snapshots
- snap_name = rand_name('snapshot')
- snap = volume_snapshots.create(volume_id=vol_id,
- force=True,
- display_name=snap_name)
- self.set_resource(snap.id, snap)
- self.status_timeout(volume_snapshots,
- snap.id,
- 'available')
- return snap
-
- def _create_volume_from_snapshot(self, snap_id):
- vol_name = rand_name('volume')
- return self.create_volume(name=vol_name, snapshot_id=snap_id)
-
- def _stop_instances(self, instances):
- # NOTE(gfidente): two loops so we do not wait for the status twice
- for i in instances:
- self.compute_client.servers.stop(i)
- for i in instances:
- self.status_timeout(self.compute_client.servers,
- i.id,
- 'SHUTOFF')
-
- def _detach_volumes(self, volumes):
- # NOTE(gfidente): two loops so we do not wait for the status twice
- for v in volumes:
- self.volume_client.volumes.detach(v)
- for v in volumes:
- self.status_timeout(self.volume_client.volumes,
- v.id,
- 'available')
-
- def test_volume_snapshot_pattern(self):
- volume_origin = self._create_volume_from_image()
- i_origin = self._boot_instance_from_volume(volume_origin.id)
- snapshot = self._create_snapshot_from_volume(volume_origin.id)
- volume = self._create_volume_from_snapshot(snapshot.id)
- i = self._boot_instance_from_volume(volume.id)
- # NOTE(gfidente): ensure resources are in clean state for
- # deletion operations to succeed
- self._stop_instances([i_origin, i])
- self._detach_volumes([volume_origin, volume])
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index ec9464a..6f17611 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -46,9 +46,14 @@
# expanded xml namespace.
type_ns_prefix = ('{http://docs.openstack.org/compute/ext/extended_ips/'
'api/v1.1}type')
+ mac_ns_prefix = ('{http://docs.openstack.org/compute/ext/extended_ips_mac'
+ '/api/v1.1}mac_addr')
+
if type_ns_prefix in ip:
- ip['OS-EXT-IPS:type'] = ip[type_ns_prefix]
- ip.pop(type_ns_prefix)
+ ip['OS-EXT-IPS:type'] = ip.pop(type_ns_prefix)
+
+ if mac_ns_prefix in ip:
+ ip['OS-EXT-IPS-MAC:mac_addr'] = ip.pop(mac_ns_prefix)
return ip
@@ -101,11 +106,35 @@
json['addresses'] = json_addresses
else:
json = xml_to_json(xml_dom)
- diskConfig = '{http://docs.openstack.org/compute/ext/disk_config/api/v1.1'\
- '}diskConfig'
+ diskConfig = ('{http://docs.openstack.org'
+ '/compute/ext/disk_config/api/v1.1}diskConfig')
+ terminated_at = ('{http://docs.openstack.org/'
+ 'compute/ext/server_usage/api/v1.1}terminated_at')
+ launched_at = ('{http://docs.openstack.org'
+ '/compute/ext/server_usage/api/v1.1}launched_at')
+ power_state = ('{http://docs.openstack.org'
+ '/compute/ext/extended_status/api/v1.1}power_state')
+ availability_zone = ('{http://docs.openstack.org'
+ '/compute/ext/extended_availability_zone/api/v2}'
+ 'availability_zone')
+ vm_state = ('{http://docs.openstack.org'
+ '/compute/ext/extended_status/api/v1.1}vm_state')
+ task_state = ('{http://docs.openstack.org'
+ '/compute/ext/extended_status/api/v1.1}task_state')
if diskConfig in json:
- json['OS-DCF:diskConfig'] = json[diskConfig]
- del json[diskConfig]
+ json['OS-DCF:diskConfig'] = json.pop(diskConfig)
+ if terminated_at in json:
+ json['OS-SRV-USG:terminated_at'] = json.pop(terminated_at)
+ if launched_at in json:
+ json['OS-SRV-USG:launched_at'] = json.pop(launched_at)
+ if power_state in json:
+ json['OS-EXT-STS:power_state'] = json.pop(power_state)
+ if availability_zone in json:
+ json['OS-EXT-AZ:availability_zone'] = json.pop(availability_zone)
+ if vm_state in json:
+ json['OS-EXT-STS:vm_state'] = json.pop(vm_state)
+ if task_state in json:
+ json['OS-EXT-STS:task_state'] = json.pop(task_state)
return json
diff --git a/tempest/test.py b/tempest/test.py
index decae94..24c4489 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -26,6 +26,7 @@
from tempest import clients
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -57,6 +58,25 @@
return decorator
+def services(*args, **kwargs):
+ """A decorator used to set an attr for each service used in a test case
+
+ This decorator applies a testtools attr for each service that gets
+ exercised by a test case.
+ """
+ valid_service_list = ['compute', 'image', 'volume', 'orchestration',
+ 'network', 'identity', 'object', 'dashboard']
+
+ def decorator(f):
+ for service in args:
+ if service not in valid_service_list:
+ raise exceptions.InvalidServiceTag('%s is not a valid service'
+ % service)
+ attr(type=list(args))(f)
+ return f
+ return decorator
+
+
def stresstest(*args, **kwargs):
"""Add stress test decorator
diff --git a/tempest/whitebox/README.rst b/tempest/whitebox/README.rst
deleted file mode 100644
index 0e45421..0000000
--- a/tempest/whitebox/README.rst
+++ /dev/null
@@ -1,46 +0,0 @@
-Tempest Guide to Whitebox tests
-===============================
-
-
-What are these tests?
----------------------
-
-When you hit the OpenStack API, this causes internal state changes in
-the system. This might be database transitions, vm modifications,
-other deep state changes which aren't really accessible from the
-OpenStack API. These side effects are sometimes important to
-validate.
-
-White box testing is an approach there. In white box testing you are
-given database access to the environment, and can verify internal
-record changes after an API call.
-
-This is an optional part of testing, and requires extra setup, but can
-be useful for validating Tempest internals.
-
-
-Why are these tests in tempest?
--------------------------------
-
-Especially when it comes to something like VM state changing, which is
-a coordination of numerous running daemons, and a functioning VM, it's
-very difficult to get a realistic test like this in unit tests.
-
-
-Scope of these tests
---------------------
-
-White box tests should be limitted to tests where black box testing
-(using the OpenStack API to verify results) isn't sufficient.
-
-As these poke at internals of OpenStack, it should also be realized
-that these tests are very tightly coupled to current implementation of
-OpenStack. They will need to be maintained agressively to keep up with
-internals changes in OpenStack projects.
-
-
-Example of a good test
-----------------------
-
-Pushing VMs through a series of state transitions, and ensuring along
-the way the database state transitions match what's expected.
diff --git a/tempest/whitebox/__init__.py b/tempest/whitebox/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/whitebox/__init__.py
+++ /dev/null
diff --git a/tempest/whitebox/manager.py b/tempest/whitebox/manager.py
deleted file mode 100644
index 3b1b107..0000000
--- a/tempest/whitebox/manager.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack, LLC
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-import shlex
-import subprocess
-import sys
-
-from sqlalchemy import create_engine, MetaData
-
-from tempest.common.ssh import Client
-from tempest.common.utils.data_utils import rand_name
-from tempest import exceptions
-from tempest.openstack.common import log as logging
-from tempest.scenario import manager
-
-LOG = logging.getLogger(__name__)
-
-
-class WhiteboxTest(object):
-
- """
- Base test case class mixin for "whitebox tests"
-
- Whitebox tests are tests that have the following characteristics:
-
- * Test common and advanced operations against a set of servers
- * Use a client that it is possible to send random or bad data with
- * SSH into either a host or a guest in order to validate server state
- * May execute SQL queries directly against internal databases to verify
- the state of data records
- """
- pass
-
-
-class ComputeWhiteboxTest(manager.OfficialClientTest):
-
- """
- Base smoke test case class for OpenStack Compute API (Nova)
- """
-
- @classmethod
- def setUpClass(cls):
- super(ComputeWhiteboxTest, cls).setUpClass()
- if not cls.config.whitebox.whitebox_enabled:
- msg = "Whitebox testing disabled"
- raise cls.skipException(msg)
-
- # Add some convenience attributes that tests use...
- cls.nova_dir = cls.config.whitebox.source_dir
- cls.compute_bin_dir = cls.config.whitebox.bin_dir
- cls.compute_config_path = cls.config.whitebox.config_path
- cls.build_interval = cls.config.compute.build_interval
- cls.build_timeout = cls.config.compute.build_timeout
- cls.ssh_user = cls.config.compute.ssh_user
- cls.image_ref = cls.config.compute.image_ref
- cls.image_ref_alt = cls.config.compute.image_ref_alt
- cls.flavor_ref = cls.config.compute.flavor_ref
- cls.flavor_ref_alt = cls.config.compute.flavor_ref_alt
-
- # NOTE(afazekas): Mimics the helper method used in the api tests
- @classmethod
- def create_server(cls, **kwargs):
- flavor_ref = cls.config.compute.flavor_ref
- image_ref = cls.config.compute.image_ref
- name = rand_name(cls.__name__ + "-instance")
- if 'name' in kwargs:
- name = kwargs.pop('name')
- flavor = kwargs.get('flavor', flavor_ref)
- image_id = kwargs.get('image_id', image_ref)
-
- server = cls.compute_client.servers.create(
- name, image_id, flavor, **kwargs)
-
- if 'wait_until' in kwargs:
- cls.status_timeout(cls.compute_client.servers, server.id,
- server['id'], kwargs['wait_until'])
-
- server = cls.compute_client.servers.get(server.id)
- cls.set_resource(name, server)
- return server
-
- @classmethod
- def get_db_handle_and_meta(cls, database='nova'):
- """Return a connection handle and metadata of an OpenStack database."""
- engine_args = {"echo": False,
- "convert_unicode": True,
- "pool_recycle": 3600
- }
-
- try:
- engine = create_engine(cls.config.whitebox.db_uri, **engine_args)
- connection = engine.connect()
- meta = MetaData()
- meta.reflect(bind=engine)
-
- except Exception as e:
- raise exceptions.SQLException(message=e)
-
- return connection, meta
-
- def nova_manage(self, category, action, params):
- """Executes nova-manage command for the given action."""
-
- nova_manage_path = os.path.join(self.compute_bin_dir, 'nova-manage')
- cmd = ' '.join([nova_manage_path, category, action, params])
-
- if self.deploy_mode == 'devstack-local':
- if not os.path.isdir(self.nova_dir):
- sys.exit("Cannot find Nova source directory: %s" %
- self.nova_dir)
-
- cmd = shlex.split(cmd)
- result = subprocess.Popen(cmd, stdout=subprocess.PIPE)
-
- # TODO(rohitk): Need to define host connection parameters in config
- else:
- client = self.get_ssh_connection(self.config.whitebox.api_host,
- self.config.whitebox.api_user,
- self.config.whitebox.api_passwd)
- result = client.exec_command(cmd)
-
- return result
-
- def get_ssh_connection(self, host, username, password):
- """Create an SSH connection object to a host."""
- ssh_timeout = self.config.compute.ssh_timeout
- ssh_client = Client(host, username, password, ssh_timeout)
- if not ssh_client.test_connection_auth():
- raise exceptions.SSHTimeout()
- else:
- return ssh_client
diff --git a/tempest/whitebox/test_images_whitebox.py b/tempest/whitebox/test_images_whitebox.py
deleted file mode 100644
index 06dcd7f..0000000
--- a/tempest/whitebox/test_images_whitebox.py
+++ /dev/null
@@ -1,163 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack, LLC
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.common.utils.data_utils import rand_name
-from tempest.openstack.common import log as logging
-from tempest.whitebox import manager
-
-from novaclient import exceptions
-
-LOG = logging.getLogger(__name__)
-
-
-class ImagesWhiteboxTest(manager.ComputeWhiteboxTest):
- _interface = 'json'
-
- @classmethod
- def setUpClass(cls):
- super(ImagesWhiteboxTest, cls).setUpClass()
- cls.create_image = cls.compute_client.servers.create_image
- cls.connection, cls.meta = cls.get_db_handle_and_meta()
- cls.shared_server = cls.create_server()
- cls.image_ids = []
-
- @classmethod
- def tearDownClass(cls):
- """Delete images and server after a test is executed."""
- for image_id in cls.image_ids:
- cls.client.delete_image(image_id)
- cls.image_ids.remove(image_id)
- super(ImagesWhiteboxTest, cls).tearDownClass()
-
- @classmethod
- def update_state(self, server_id, vm_state, task_state, deleted=0):
- """Update states of an instance in database for validation."""
- if not task_state:
- task_state = "NULL"
-
- instances = self.meta.tables['instances']
- stmt = instances.update().where(instances.c.uuid == server_id).values(
- deleted=deleted,
- vm_state=vm_state,
- task_state=task_state)
-
- self.connection.execute(stmt, autocommit=True)
-
- def _test_create_image_409_base(self, vm_state, task_state, deleted=0):
- """Base method for create image tests based on vm and task states."""
- try:
- self.update_state(self.shared_server.id, vm_state,
- task_state, deleted)
-
- image_name = rand_name('snap-')
- self.assertRaises(exceptions.Conflict,
- self.create_image,
- self.shared_server.id, image_name)
- except Exception:
- LOG.error("Should not allow create image when vm_state=%s and "
- "task_state=%s" % (vm_state, task_state))
- raise
- finally:
- self.update_state(self.shared_server.id, 'active', None)
-
- def test_create_image_when_vm_eq_building_task_eq_scheduling(self):
- # 409 error when instance states are building,scheduling
- self._test_create_image_409_base("building", "scheduling")
-
- def test_create_image_when_vm_eq_building_task_eq_networking(self):
- # 409 error when instance states are building,networking
- self._test_create_image_409_base("building", "networking")
-
- def test_create_image_when_vm_eq_building_task_eq_bdm(self):
- # 409 error when instance states are building,block_device_mapping
- self._test_create_image_409_base("building", "block_device_mapping")
-
- def test_create_image_when_vm_eq_building_task_eq_spawning(self):
- # 409 error when instance states are building,spawning
- self._test_create_image_409_base("building", "spawning")
-
- def test_create_image_when_vm_eq_active_task_eq_image_backup(self):
- # 409 error when instance states are active,image_backup
- self._test_create_image_409_base("active", "image_backup")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_prep(self):
- # 409 error when instance states are resized,resize_prep
- self._test_create_image_409_base("resized", "resize_prep")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_migrating(self):
- # 409 error when instance states are resized,resize_migrating
- self._test_create_image_409_base("resized", "resize_migrating")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_migrated(self):
- # 409 error when instance states are resized,resize_migrated
- self._test_create_image_409_base("resized", "resize_migrated")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_finish(self):
- # 409 error when instance states are resized,resize_finish
- self._test_create_image_409_base("resized", "resize_finish")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_reverting(self):
- # 409 error when instance states are resized,resize_reverting
- self._test_create_image_409_base("resized", "resize_reverting")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_confirming(self):
- # 409 error when instance states are resized,resize_confirming
- self._test_create_image_409_base("resized", "resize_confirming")
-
- def test_create_image_when_vm_eq_active_task_eq_resize_verify(self):
- # 409 error when instance states are active,resize_verify
- self._test_create_image_409_base("active", "resize_verify")
-
- def test_create_image_when_vm_eq_active_task_eq_updating_password(self):
- # 409 error when instance states are active,updating_password
- self._test_create_image_409_base("active", "updating_password")
-
- def test_create_image_when_vm_eq_active_task_eq_rebuilding(self):
- # 409 error when instance states are active,rebuilding
- self._test_create_image_409_base("active", "rebuilding")
-
- def test_create_image_when_vm_eq_active_task_eq_rebooting(self):
- # 409 error when instance states are active,rebooting
- self._test_create_image_409_base("active", "rebooting")
-
- def test_create_image_when_vm_eq_building_task_eq_deleting(self):
- # 409 error when instance states are building,deleting
- self._test_create_image_409_base("building", "deleting")
-
- def test_create_image_when_vm_eq_active_task_eq_deleting(self):
- # 409 error when instance states are active,deleting
- self._test_create_image_409_base("active", "deleting")
-
- def test_create_image_when_vm_eq_error_task_eq_building(self):
- # 409 error when instance states are error,building
- self._test_create_image_409_base("error", "building")
-
- def test_create_image_when_vm_eq_error_task_eq_none(self):
- # 409 error when instance states are error,None
- self._test_create_image_409_base("error", None)
-
- def test_create_image_when_vm_eq_deleted_task_eq_none(self):
- # 409 error when instance states are deleted,None
- self._test_create_image_409_base("deleted", None)
-
- def test_create_image_when_vm_eq_resized_task_eq_none(self):
- # 409 error when instance states are resized,None
- self._test_create_image_409_base("resized", None)
-
- def test_create_image_when_vm_eq_error_task_eq_resize_prep(self):
- # 409 error when instance states are error,resize_prep
- self._test_create_image_409_base("error", "resize_prep")
diff --git a/tempest/whitebox/test_servers_whitebox.py b/tempest/whitebox/test_servers_whitebox.py
deleted file mode 100644
index b6c888c..0000000
--- a/tempest/whitebox/test_servers_whitebox.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack, LLC
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.identity.base import BaseIdentityAdminTest
-from tempest import exceptions
-from tempest.openstack.common import log as logging
-from tempest.whitebox import manager
-
-LOG = logging.getLogger(__name__)
-
-
-class ServersWhiteboxTest(manager.ComputeWhiteboxTest):
- _interface = 'json'
-
- @classmethod
- def setUpClass(cls):
- super(ServersWhiteboxTest, cls).setUpClass()
- # NOTE(afazekas): Strange relationship
- BaseIdentityAdminTest.setUpClass()
- cls.client = cls.servers_client
- cls.img_client = cls.images_client
- cls.admin_client = BaseIdentityAdminTest.client
-
- cls.connection, cls.meta = cls.get_db_handle_and_meta()
-
- resp, tenants = cls.admin_client.list_tenants()
- cls.tenant_id = [
- tnt['id']
- for tnt in tenants if tnt['name'] == cls.config.compute.tenant_name
- ][0]
-
- cls.shared_server = cls.create_server()
-
- def tearDown(cls):
- for server in cls.servers:
- try:
- cls.client.delete_server(server['id'])
- except exceptions.NotFound:
- continue
-
- def update_state(self, server_id, vm_state, task_state, deleted=0):
- """Update states of an instance in database for validation."""
- if not task_state:
- task_state = 'NULL'
-
- instances = self.meta.tables['instances']
- stmt = instances.update().where(instances.c.uuid == server_id).values(
- deleted=deleted,
- vm_state=vm_state,
- task_state=task_state)
- self.connection.execute(stmt, autocommit=True)
-
- def _test_delete_server_base(self, vm_state, task_state):
- """
- Base method for delete server tests based on vm and task states.
- Validates for successful server termination.
- """
- server = self.create_server()
- self.update_state(server['id'], vm_state, task_state)
-
- resp, body = self.client.delete_server(server['id'])
- self.assertEqual('204', resp['status'])
- self.client.wait_for_server_termination(server['id'],
- ignore_error=True)
-
- instances = self.meta.tables['instances']
- stmt = instances.select().where(instances.c.uuid == server['id'])
- result = self.connection.execute(stmt).first()
-
- self.assertEqual(True, result.deleted > 0)
- self.assertEqual('deleted', result.vm_state)
- self.assertEqual(None, result.task_state)
-
- def _test_delete_server_403_base(self, vm_state, task_state):
- """
- Base method for delete server tests based on vm and task states.
- Validates for 403 error code.
- """
- try:
- self.update_state(self.shared_server['id'], vm_state, task_state)
-
- self.assertRaises(exceptions.Unauthorized,
- self.client.delete_server,
- self.shared_server['id'])
- except Exception:
- LOG.error("Should not allow delete server when vm_state=%s and "
- "task_state=%s" % (vm_state, task_state))
- raise
- finally:
- self.update_state(self.shared_server['id'], 'active', None)
-
- def test_delete_server_when_vm_eq_building_task_eq_networking(self):
- # Delete server when instance states are building,networking
- self._test_delete_server_base('building', 'networking')
-
- def test_delete_server_when_vm_eq_building_task_eq_bdm(self):
- # Delete server when instance states are building,block device mapping
- self._test_delete_server_base('building', 'block_device_mapping')
-
- def test_delete_server_when_vm_eq_building_task_eq_spawning(self):
- # Delete server when instance states are building,spawning
- self._test_delete_server_base('building', 'spawning')
-
- def test_delete_server_when_vm_eq_active_task_eq_image_backup(self):
- # Delete server when instance states are active,image_backup
- self._test_delete_server_base('active', 'image_backup')
-
- def test_delete_server_when_vm_eq_active_task_eq_rebuilding(self):
- # Delete server when instance states are active,rebuilding
- self._test_delete_server_base('active', 'rebuilding')
-
- def test_delete_server_when_vm_eq_error_task_eq_spawning(self):
- # Delete server when instance states are error,spawning
- self._test_delete_server_base('error', 'spawning')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_prep(self):
- # Delete server when instance states are resized,resize_prep
- self._test_delete_server_403_base('resized', 'resize_prep')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_migrating(self):
- # Delete server when instance states are resized,resize_migrating
- self._test_delete_server_403_base('resized', 'resize_migrating')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_migrated(self):
- # Delete server when instance states are resized,resize_migrated
- self._test_delete_server_403_base('resized', 'resize_migrated')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_finish(self):
- # Delete server when instance states are resized,resize_finish
- self._test_delete_server_403_base('resized', 'resize_finish')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_reverting(self):
- # Delete server when instance states are resized,resize_reverting
- self._test_delete_server_403_base('resized', 'resize_reverting')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_confirming(self):
- # Delete server when instance states are resized,resize_confirming
- self._test_delete_server_403_base('resized', 'resize_confirming')
-
- def test_delete_server_when_vm_eq_active_task_eq_resize_verify(self):
- # Delete server when instance states are active,resize_verify
- self._test_delete_server_base('active', 'resize_verify')
-
- def test_delete_server_when_vm_eq_active_task_eq_rebooting(self):
- # Delete server when instance states are active,rebooting
- self._test_delete_server_base('active', 'rebooting')
-
- def test_delete_server_when_vm_eq_building_task_eq_deleting(self):
- # Delete server when instance states are building,deleting
- self._test_delete_server_base('building', 'deleting')
-
- def test_delete_server_when_vm_eq_active_task_eq_deleting(self):
- # Delete server when instance states are active,deleting
- self._test_delete_server_base('active', 'deleting')
-
- def test_delete_server_when_vm_eq_error_task_eq_none(self):
- # Delete server when instance states are error,None
- self._test_delete_server_base('error', None)
-
- def test_delete_server_when_vm_eq_resized_task_eq_none(self):
- # Delete server when instance states are resized,None
- self._test_delete_server_403_base('resized', None)
-
- def test_delete_server_when_vm_eq_error_task_eq_resize_prep(self):
- # Delete server when instance states are error,resize_prep
- self._test_delete_server_base('error', 'resize_prep')
-
- def test_delete_server_when_vm_eq_error_task_eq_error(self):
- # Delete server when instance states are error,error
- self._test_delete_server_base('error', 'error')