Merge "Add logging to the python-clients"
diff --git a/.testr.conf b/.testr.conf
index 510f4c9..05b12c4 100644
--- a/.testr.conf
+++ b/.testr.conf
@@ -1,7 +1,7 @@
[DEFAULT]
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
- OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-250} \
+ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-500} \
${PYTHON:-python} -m subunit.run discover -t ./ ./tempest $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list
diff --git a/HACKING.rst b/HACKING.rst
index 1eb2d4f..5153fe1 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -20,6 +20,7 @@
- [T102] Cannot import OpenStack python clients in tempest/api tests
- [T103] tempest/tests is deprecated
+- [T104] Scenario tests require a services decorator
Test Data/Configuration
-----------------------
@@ -68,16 +69,52 @@
the ``self.fail`` line as the origin of the error.
Avoid constructing complex boolean expressions for assertion.
-The ``self.assertTrue`` or ``self.assertFalse`` will just tell you the
-single boolean, and you will not know anything about the values used in
-the formula. Most other assert method can include more information.
+The ``self.assertTrue`` or ``self.assertFalse`` without a ``msg`` argument,
+will just tell you the single boolean value, and you will not know anything
+about the values used in the formula, the ``msg`` argument might be good enough
+for providing more information.
+
+Most other assert method can include more information by default.
For example ``self.assertIn`` can include the whole set.
+Recommended to use testtools matcher for more tricky assertion.
+`[doc] <http://testtools.readthedocs.org/en/latest/for-test-authors.html#matchers>`_
+
+You can implement your own specific matcher as well.
+`[doc] <http://testtools.readthedocs.org/en/latest/for-test-authors.html#writing-your-own-matchers>`_
+
If the test case fails you can see the related logs and the information
carried by the exception (exception class, backtrack and exception info).
This and the service logs are your only guide to find the root cause of flaky
issue.
+Test cases are independent
+--------------------------
+Every ``test_method`` must be callable individually and MUST NOT depends on,
+any other ``test_method`` or ``test_method`` ordering.
+
+Test cases MAY depend on commonly initialized resources/facilities, like
+credentials management, testresources and so on. These facilities, MUST be able
+to work even if just one ``test_method`` selected for execution.
+
+Service Tagging
+---------------
+Service tagging is used to specify which services are exercised by a particular
+test method. You specify the services with the tempest.test.services decorator.
+For example:
+
+@services('compute', 'image')
+
+Valid service tag names are the same as the list of directories in tempest.api
+that have tests.
+
+For scenario tests having a service tag is required. For the api tests service
+tags are only needed if the test method makes an api call (either directly or
+indirectly through another service) that differs from the parent directory
+name. For example, any test that make an api call to a service other than nova
+in tempest.api.compute would require a service tag for those services, however
+they do not need to be tagged as compute.
+
Guidelines
----------
- Do not submit changesets with only testcases which are skipped as
diff --git a/bin/tempest b/bin/tempest
deleted file mode 100755
index 87ba6d5..0000000
--- a/bin/tempest
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env bash
-
-function usage {
- echo "Usage: $0 [OPTION]..."
- echo "Run Tempest test suite"
- echo ""
- echo " -s, --smoke Only run smoke tests"
- echo " -w, --whitebox Only run whitebox tests"
- echo " -h, --help Print this usage message"
- echo " -d. --debug Debug this script -- set -o xtrace"
- exit
-}
-
-function process_option {
- case "$1" in
- -h|--help) usage;;
- -d|--debug) set -o xtrace;;
- -s|--smoke) noseargs="$noseargs --attr=type=smoke";;
- -w|--whitebox) noseargs="$noseargs --attr=type=whitebox";;
- *) noseargs="$noseargs $1"
- esac
-}
-
-noseargs=""
-
-export NOSE_WITH_OPENSTACK=1
-export NOSE_OPENSTACK_COLOR=1
-export NOSE_OPENSTACK_RED=15.00
-export NOSE_OPENSTACK_YELLOW=3.00
-export NOSE_OPENSTACK_SHOW_ELAPSED=1
-export NOSE_OPENSTACK_STDOUT=1
-
-for arg in "$@"; do
- process_option $arg
-done
-
-
-# only add tempest default if we don't specify a test
-if [[ "x$noseargs" =~ "tempest" ]]; then
- noseargs="$noseargs"
-else
- noseargs="$noseargs tempest"
-fi
-
-
-function run_tests {
- $NOSETESTS
-}
-
-NOSETESTS="nosetests $noseargs"
-
-run_tests || exit
diff --git a/doc/source/field_guide/unit_tests.rst b/doc/source/field_guide/unit_tests.rst
new file mode 120000
index 0000000..67a8b20
--- /dev/null
+++ b/doc/source/field_guide/unit_tests.rst
@@ -0,0 +1 @@
+../../../tempest/tests/README.rst
\ No newline at end of file
diff --git a/doc/source/field_guide/whitebox.rst b/doc/source/field_guide/whitebox.rst
deleted file mode 120000
index 47f6069..0000000
--- a/doc/source/field_guide/whitebox.rst
+++ /dev/null
@@ -1 +0,0 @@
-../../../tempest/whitebox/README.rst
\ No newline at end of file
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 00c4e9a..1c32b9c 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -31,7 +31,7 @@
field_guide/scenario
field_guide/stress
field_guide/thirdparty
- field_guide/whitebox
+ field_guide/unit_tests
------------------
API and test cases
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 703d92a..2f07a19 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -76,10 +76,18 @@
flavor_ref = 1
flavor_ref_alt = 2
-# User names used to authenticate to an instance for a given image.
+# User name used to authenticate to an instance
image_ssh_user = root
+
+# Password used to authenticate to an instance
+image_ssh_password = password
+
+# User name used to authenticate to an instance using the alternate image
image_alt_ssh_user = root
+# Password used to authenticate to an instance using the alternate image
+image_alt_ssh_password = password
+
# Number of seconds to wait while looping to check the status of an
# instance that is building.
build_interval = 10
@@ -93,7 +101,7 @@
# executing the tests
run_ssh = false
-# Name of a user used to authenticated to an instance
+# Name of a user used to authenticate to an instance.
ssh_user = cirros
# Visible fixed network name
@@ -150,28 +158,8 @@
# When set to false, flavor extra data tests are forced to skip
flavor_extra_enabled = true
-[whitebox]
-# Whitebox options for compute. Whitebox options enable the
-# whitebox test cases, which look at internal Nova database state,
-# SSH into VMs to check instance state, etc.
-
-# Should we run whitebox tests for Compute?
-whitebox_enabled = true
-
-# Path of nova source directory
-source_dir = /opt/stack/nova
-
-# Path of nova configuration file
-config_path = /etc/nova/nova.conf
-
-# Directory containing nova binaries such as nova-manage
-bin_dir = /usr/local/bin
-
-# Connection string to the database of Compute service
-db_uri = mysql://nova:secret@localhost/nova
-
-# Path to a private key file for SSH access to remote hosts
-path_to_private_key = /home/user/.ssh/id_rsa
+# Expected first device name when a volume is attached to an instance
+volume_device_name = vdb
[compute-admin]
# This should be the username of a user WITH administrative privileges
@@ -390,3 +378,11 @@
heat = false
# Whether or not horizon is expected to be available
horizon = True
+
+[stress]
+# Maximum number of instances to create during test
+max_instances = 32
+# Time (in seconds) between log file error checks
+log_check_interval = 60
+# The default number of threads created while stress test
+default_thread_number_per_action=4
diff --git a/requirements.txt b/requirements.txt
index 877b23c..ab48ec5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -18,6 +18,4 @@
keyring>=1.6.1
testrepository>=0.0.17
oslo.config>=1.1.0
-# Needed for whitebox testing
-SQLAlchemy>=0.7.8,<=0.7.99
eventlet>=0.13.0
diff --git a/run_tests.sh b/run_tests.sh
index f995cde..710fbaa 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -10,8 +10,7 @@
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -s, --smoke Only run smoke tests"
- echo " -w, --whitebox Only run whitebox tests"
- echo " -t, --parallel Run testr parallel"
+ echo " -t, --serial Run testr serially"
echo " -c, --nova-coverage Enable Nova coverage collection"
echo " -C, --config Config file location"
echo " -p, --pep8 Just run pep8"
@@ -26,7 +25,7 @@
just_pep8=0
venv=.venv
with_venv=tools/with_venv.sh
-parallel=0
+serial=0
always_venv=0
never_venv=0
no_site_packages=0
@@ -38,7 +37,7 @@
logging=0
logging_config=etc/logging.conf
-if ! options=$(getopt -o VNnfuswtcphdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,whitebox,parallel,nova-coverage,pep8,help,debug,config:,logging,logging-config: -- "$@")
+if ! options=$(getopt -o VNnfustcphdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,serial,nova-coverage,pep8,help,debug,config:,logging,logging-config: -- "$@")
then
# parse error
usage
@@ -60,8 +59,7 @@
-C|--config) config_file=$2; shift;;
-p|--pep8) let just_pep8=1;;
-s|--smoke) testrargs="$testrargs smoke";;
- -w|--whitebox) testrargs="$testrargs whitebox";;
- -t|--parallel) parallel=1;;
+ -t|--serial) serial=1;;
-l|--logging) logging=1;;
-L|--logging-config) logging_config=$2; shift;;
--) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no ;;
@@ -101,10 +99,10 @@
function run_tests {
testr_init
${wrapper} find . -type f -name "*.pyc" -delete
- if [ $parallel -eq 1 ]; then
- ${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
- else
+ if [ $serial -eq 1 ]; then
${wrapper} testr run --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
+ else
+ ${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
fi
}
@@ -124,7 +122,11 @@
}
function run_pep8 {
- echo "Running pep8 ..."
+ echo "Running flake8 ..."
+ if [ $never_venv -eq 1 ]; then
+ echo "**WARNING**:" >&2
+ echo "Running flake8 without virtual env may miss OpenStack HACKING detection" >&2
+ fi
${wrapper} flake8
}
diff --git a/setup.cfg b/setup.cfg
index 7cfc4ce..a4cf118 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -21,10 +21,6 @@
setup-hooks =
pbr.hooks.setup_hook
-[files]
-scripts =
- bin/tempest
-
[build_sphinx]
all_files = 1
build-dir = doc/build
diff --git a/tempest/README.rst b/tempest/README.rst
index 33021c8..dbac809 100644
--- a/tempest/README.rst
+++ b/tempest/README.rst
@@ -18,7 +18,6 @@
| scenario/ - complex scenario tests
| stress/ - stress tests
| thirdparty/ - 3rd party api tests
-| whitebox/ - white box testing
Each of these directories contains different types of tests. What
belongs in each directory, the rules and examples for good tests, are
@@ -78,11 +77,3 @@
completely legitimate for Tempest to include tests of 3rdparty APIs,
but those should be kept separate from the normal OpenStack
validation.
-
-
-whitebox
---------
-
-Whitebox tests are tests which require access to the database of the
-target OpenStack machine to verify internal state after operations
-are made. White box tests are allowed to use the python clients.
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index 303bc0c..0bb0460 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import fixtures
+
from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
@@ -22,6 +24,16 @@
from tempest.test import attr
+class LockFixture(fixtures.Fixture):
+ def __init__(self, name):
+ self.mgr = lockutils.lock(name, 'tempest-', True)
+
+ def setUp(self):
+ super(LockFixture, self).setUp()
+ self.addCleanup(self.mgr.__exit__, None, None, None)
+ self.mgr.__enter__()
+
+
class AggregatesAdminTestJSON(base.BaseComputeAdminTest):
"""
@@ -146,9 +158,9 @@
self.client.get_aggregate, -1)
@attr(type='gate')
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_remove_host(self):
# Add an host to the given aggregate and remove.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
@@ -168,9 +180,9 @@
self.assertNotIn(self.host, body['hosts'])
@attr(type='gate')
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_host_list(self):
# Add an host to the given aggregate and list.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
@@ -186,9 +198,9 @@
self.assertIn(self.host, agg['hosts'])
@attr(type='gate')
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_host_get_details(self):
# Add an host to the given aggregate and get details.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
@@ -201,9 +213,9 @@
self.assertIn(self.host, body['hosts'])
@attr(type='gate')
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_add_host_create_server_with_az(self):
# Add an host to the given aggregate and create a server.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
az_name = rand_name(self.az_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name, az_name)
@@ -248,9 +260,9 @@
aggregate['id'], self.host)
@attr(type=['negative', 'gate'])
- @lockutils.synchronized('availability_zone', 'tempest-', True)
def test_aggregate_remove_host_as_user(self):
# Regular user is not allowed to remove a host from an aggregate.
+ self.useFixture(LockFixture('availability_zone'))
aggregate_name = rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 5f31084..7efd3c1 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -296,6 +296,24 @@
_test_string_variations(['t', 'true', 'yes', '1'],
flavor_name_public)
+ @attr(type='gate')
+ def test_create_flavor_using_string_ram(self):
+ flavor_name = rand_name(self.flavor_name_prefix)
+ new_flavor_id = rand_int_id(start=1000)
+
+ ram = " 1024 "
+ resp, flavor = self.client.create_flavor(flavor_name,
+ ram, self.vcpus,
+ self.disk,
+ new_flavor_id)
+ self.addCleanup(self.flavor_clean_up, flavor['id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(flavor['name'], flavor_name)
+ self.assertEqual(flavor['vcpus'], self.vcpus)
+ self.assertEqual(flavor['disk'], self.disk)
+ self.assertEqual(flavor['ram'], int(ram))
+ self.assertEqual(int(flavor['id']), new_flavor_id)
+
@attr(type=['negative', 'gate'])
def test_invalid_is_public_string(self):
self.assertRaises(exceptions.BadRequest,
@@ -319,6 +337,26 @@
self.user_client.delete_flavor,
self.flavor_ref_alt)
+ @attr(type=['negative', 'gate'])
+ def test_create_flavor_using_invalid_ram(self):
+ flavor_name = rand_name(self.flavor_name_prefix)
+ new_flavor_id = rand_int_id(start=1000)
+
+ self.assertRaises(exceptions.BadRequest,
+ self.client.create_flavor,
+ flavor_name, -1, self.vcpus,
+ self.disk, new_flavor_id)
+
+ @attr(type=['negative', 'gate'])
+ def test_create_flavor_using_invalid_vcpus(self):
+ flavor_name = rand_name(self.flavor_name_prefix)
+ new_flavor_id = rand_int_id(start=1000)
+
+ self.assertRaises(exceptions.BadRequest,
+ self.client.create_flavor,
+ flavor_name, self.ram, 0,
+ self.disk, new_flavor_id)
+
class FlavorsAdminTestXML(FlavorsAdminTestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index 7b79a12..ace77a6 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -17,6 +17,8 @@
from tempest.api import compute
from tempest.api.compute import base
+from tempest.common.utils.data_utils import rand_int_id
+from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest.test import attr
@@ -39,12 +41,12 @@
raise cls.skipException(msg)
cls.client = cls.os_adm.flavors_client
- flavor_name = 'test_flavor2'
+ flavor_name = rand_name('test_flavor')
ram = 512
vcpus = 1
disk = 10
ephemeral = 10
- cls.new_flavor_id = 12345
+ cls.new_flavor_id = rand_int_id(start=1000)
swap = 1024
rxtx = 1
# Create a flavor so as to set/get/unset extra specs
@@ -58,6 +60,7 @@
@classmethod
def tearDownClass(cls):
resp, body = cls.client.delete_flavor(cls.flavor['id'])
+ cls.client.wait_for_resource_deletion(cls.flavor['id'])
super(FlavorsExtraSpecsTestJSON, cls).tearDownClass()
@attr(type='gate')
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index acf0275..09d9bc0 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -73,6 +73,8 @@
cls.build_interval = cls.config.compute.build_interval
cls.build_timeout = cls.config.compute.build_timeout
cls.ssh_user = cls.config.compute.ssh_user
+ cls.image_ssh_user = cls.config.compute.image_ssh_user
+ cls.image_ssh_password = cls.config.compute.image_ssh_password
cls.image_ref = cls.config.compute.image_ref
cls.image_ref_alt = cls.config.compute.image_ref_alt
cls.flavor_ref = cls.config.compute.flavor_ref
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 0052a30..06e9ab2 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -64,7 +64,7 @@
cls.alt_manager = clients.AltManager()
cls.alt_client = cls.alt_manager.images_client
- @testtools.skip("Until Bug #1006725 is fixed")
+ @testtools.skip("Skipped until the Bug #1006725 is resolved.")
@attr(type=['negative', 'gate'])
def test_create_image_specify_multibyte_character_image_name(self):
# Return an error if the image name has multi-byte characters
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index e4e87c0..083fbd7 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -84,23 +84,17 @@
# Keypair should be created, Got details by name and deleted
k_name = rand_name('keypair-')
resp, keypair = self.client.create_keypair(k_name)
- try:
- resp, keypair_detail = self.client.get_keypair(k_name)
- self.assertEqual(200, resp.status)
- self.assertIn('name', keypair_detail)
- self.assertIn('public_key', keypair_detail)
- self.assertEqual(keypair_detail['name'], k_name,
- "The created keypair name is not equal "
- "to requested name")
- public_key = keypair_detail['public_key']
- self.assertTrue(public_key is not None,
- "Field public_key is empty or not found.")
- except Exception:
- self.fail("GET keypair details requested by keypair name "
- "has failed")
- finally:
- resp, _ = self.client.delete_keypair(k_name)
- self.assertEqual(202, resp.status)
+ self.addCleanup(self.client.delete_keypair, k_name)
+ resp, keypair_detail = self.client.get_keypair(k_name)
+ self.assertEqual(200, resp.status)
+ self.assertIn('name', keypair_detail)
+ self.assertIn('public_key', keypair_detail)
+ self.assertEqual(keypair_detail['name'], k_name,
+ "The created keypair name is not equal "
+ "to requested name")
+ public_key = keypair_detail['public_key']
+ self.assertTrue(public_key is not None,
+ "Field public_key is empty or not found.")
@attr(type='gate')
def test_keypair_create_with_pub_key(self):
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index ade7604..8d31598 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -205,7 +205,7 @@
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
- @testtools.skip('Until Bug #1170718 is resolved.')
+ @testtools.skip('Skipped until the Bug #1170718 is resolved.')
@attr(type='gate')
def test_list_servers_filtered_by_ip(self):
# Filter servers by ip
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 25df6e6..5ea771b 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -86,7 +86,7 @@
new_boot_time = linux_client.get_boot_time()
self.assertGreater(new_boot_time, boot_time)
- @testtools.skip('Until Bug #1014647 is dealt with.')
+ @testtools.skip('Skipped until the Bug #1014647 is resolved.')
@attr(type='smoke')
def test_reboot_server_soft(self):
# The server should be signaled to reboot gracefully
@@ -225,7 +225,7 @@
resp, output = self.servers_client.get_console_output(
self.server_id, 10)
self.assertEqual(200, resp.status)
- self.assertIsNotNone(output)
+ self.assertTrue(output, "Console output was empty.")
lines = len(output.split('\n'))
self.assertEqual(lines, 10)
self.wait_for(get_output)
@@ -238,7 +238,7 @@
self.servers_client.get_console_output,
'!@#$%^&*()', 10)
- @testtools.skip('Until tempest Bug #1014683 is fixed.')
+ @testtools.skip('Skipped until the Bug #1014683 is resolved.')
@attr(type='gate')
def test_get_console_output_server_id_in_reboot_status(self):
# Positive test:Should be able to GET the console output
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index e5ea30e..b743a85 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -37,8 +37,8 @@
resp, server = cls.create_server(wait_until='ACTIVE')
cls.server_id = server['id']
- @testtools.skipIf(CONF.service_available.neutron, "This feature is not " +
- "implemented by Neutron. See bug: #1183436")
+ @testtools.skipIf(CONF.service_available.neutron, "Not implemented by " +
+ "Neutron. Skipped until the Bug #1183436 is resolved.")
@attr(type='gate')
def test_list_virtual_interfaces(self):
# Positive test:Should be able to GET the virtual interfaces list
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 60297a9..efdadb0 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -21,8 +21,11 @@
from tempest.common.utils.data_utils import parse_image_id
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.test import attr
+LOG = logging.getLogger(__name__)
+
class AuthorizationTestJSON(base.BaseComputeTest):
_interface = 'json'
@@ -204,7 +207,7 @@
self.alt_keypairs_client.base_url = self.saved_base_url
if (resp['status'] is not None):
resp, _ = self.alt_keypairs_client.delete_keypair(k_name)
- self.fail("Create keypair request should not happen "
+ LOG.error("Create keypair request should not happen "
"if the tenant id does not match the current user")
@attr(type='gate')
@@ -255,7 +258,7 @@
self.alt_security_client.base_url = self.saved_base_url
if resp['status'] is not None:
self.alt_security_client.delete_security_group(resp['id'])
- self.fail("Create Security Group request should not happen if"
+ LOG.error("Create Security Group request should not happen if"
"the tenant id does not match the current user")
@attr(type='gate')
@@ -297,7 +300,7 @@
self.alt_security_client.base_url = self.saved_base_url
if resp['status'] is not None:
self.alt_security_client.delete_security_group_rule(resp['id'])
- self.fail("Create security group rule request should not "
+ LOG.error("Create security group rule request should not "
"happen if the tenant id does not match the"
" current user")
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index b67a5e0..ee1ad9e 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -36,7 +36,7 @@
@classmethod
def setUpClass(cls):
super(AttachVolumeTestJSON, cls).setUpClass()
- cls.device = 'vdb'
+ cls.device = cls.config.compute.volume_device_name
if not cls.config.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@@ -54,7 +54,7 @@
def _create_and_attach(self):
# Start a server and wait for it to become ready
resp, server = self.create_server(wait_until='ACTIVE',
- adminPass='password')
+ adminPass=self.image_ssh_password)
self.server = server
# Record addresses so that we can ssh later
@@ -92,7 +92,7 @@
self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
linux_client = RemoteClient(server,
- self.ssh_user, server['adminPass'])
+ self.image_ssh_user, server['adminPass'])
partitions = linux_client.get_partitions()
self.assertIn(self.device, partitions)
@@ -106,7 +106,7 @@
self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
linux_client = RemoteClient(server,
- self.ssh_user, server['adminPass'])
+ self.image_ssh_user, server['adminPass'])
partitions = linux_client.get_partitions()
self.assertNotIn(self.device, partitions)
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index d98fb71..9d143ed 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -68,44 +68,30 @@
', '.join(str(e) for e in missing_endpoints))
@attr(type='gate')
- def test_create_delete_endpoint(self):
+ def test_create_list_delete_endpoint(self):
region = rand_name('region')
url = rand_name('url')
interface = 'public'
- create_flag = False
- matched = False
- try:
- resp, endpoint =\
- self.client.create_endpoint(self.service_id, interface, url,
- region=region, enabled=True)
- create_flag = True
- # Asserting Create Endpoint response body
- self.assertEqual(resp['status'], '201')
- self.assertEqual(region, endpoint['region'])
- self.assertEqual(url, endpoint['url'])
- # Checking if created endpoint is present in the list of endpoints
- resp, fetched_endpoints = self.client.list_endpoints()
- for e in fetched_endpoints:
- if endpoint['id'] == e['id']:
- matched = True
- if not matched:
- self.fail("Created endpoint does not appear in the list"
- " of endpoints")
- finally:
- if create_flag:
- matched = False
- # Deleting the endpoint created in this method
- resp_header, resp_body =\
- self.client.delete_endpoint(endpoint['id'])
- self.assertEqual(resp_header['status'], '204')
- self.assertEqual(resp_body, '')
- # Checking whether endpoint is deleted successfully
- resp, fetched_endpoints = self.client.list_endpoints()
- for e in fetched_endpoints:
- if endpoint['id'] == e['id']:
- matched = True
- if matched:
- self.fail("Delete endpoint is not successful")
+ resp, endpoint =\
+ self.client.create_endpoint(self.service_id, interface, url,
+ region=region, enabled=True)
+ # Asserting Create Endpoint response body
+ self.assertEqual(resp['status'], '201')
+ self.assertIn('id', endpoint)
+ self.assertEqual(region, endpoint['region'])
+ self.assertEqual(url, endpoint['url'])
+ # Checking if created endpoint is present in the list of endpoints
+ resp, fetched_endpoints = self.client.list_endpoints()
+ fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
+ self.assertIn(endpoint['id'], fetched_endpoints_id)
+ # Deleting the endpoint created in this method
+ resp, body = self.client.delete_endpoint(endpoint['id'])
+ self.assertEqual(resp['status'], '204')
+ self.assertEqual(body, '')
+ # Checking whether endpoint is deleted successfully
+ resp, fetched_endpoints = self.client.list_endpoints()
+ fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
+ self.assertNotIn(endpoint['id'], fetched_endpoints_id)
@attr(type='smoke')
def test_update_endpoint(self):
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
new file mode 100644
index 0000000..017864f
--- /dev/null
+++ b/tempest/api/network/test_floating_ips.py
@@ -0,0 +1,135 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+class FloatingIPTest(base.BaseNetworkTest):
+ _interface = 'json'
+
+ """
+ Tests the following operations in the Quantum API using the REST client for
+ Quantum:
+
+ Create a Floating IP
+ Update a Floating IP
+ Delete a Floating IP
+ List all Floating IPs
+ Show Floating IP details
+
+ v2.0 of the Quantum API is assumed. It is also assumed that the following
+ options are defined in the [network] section of etc/tempest.conf:
+
+ public_network_id which is the id for the external network present
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(FloatingIPTest, cls).setUpClass()
+ cls.ext_net_id = cls.config.network.public_network_id
+
+ # Create network, subnet, router and add interface
+ cls.network = cls.create_network()
+ cls.subnet = cls.create_subnet(cls.network)
+ resp, router = cls.client.create_router(
+ rand_name('router-'),
+ external_gateway_info={"network_id":
+ cls.network_cfg.public_network_id})
+ cls.router = router['router']
+ resp, _ = cls.client.add_router_interface_with_subnet_id(
+ cls.router['id'], cls.subnet['id'])
+ cls.port = list()
+ # Create two ports one each for Creation and Updating of floatingIP
+ for i in range(2):
+ resp, port = cls.client.create_port(cls.network['id'])
+ cls.port.append(port['port'])
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.client.remove_router_interface_with_subnet_id(cls.router['id'],
+ cls.subnet['id'])
+ for i in range(2):
+ cls.client.delete_port(cls.port[i]['id'])
+ cls.client.delete_router(cls.router['id'])
+ super(FloatingIPTest, cls).tearDownClass()
+
+ def _delete_floating_ip(self, floating_ip_id):
+ # Deletes a floating IP and verifies if it is deleted or not
+ resp, _ = self.client.delete_floating_ip(floating_ip_id)
+ self.assertEqual(204, resp.status)
+ # Asserting that the floating_ip is not found in list after deletion
+ resp, floating_ips = self.client.list_floating_ips()
+ floatingip_id_list = list()
+ for f in floating_ips['floatingips']:
+ floatingip_id_list.append(f['id'])
+ self.assertNotIn(floating_ip_id, floatingip_id_list)
+
+ @attr(type='smoke')
+ def test_create_list_show_update_delete_floating_ip(self):
+ # Creates a floating IP
+ resp, floating_ip = self.client.create_floating_ip(
+ self.ext_net_id, port_id=self.port[0]['id'])
+ self.assertEqual('201', resp['status'])
+ create_floating_ip = floating_ip['floatingip']
+ self.assertIsNotNone(create_floating_ip['id'])
+ self.assertIsNotNone(create_floating_ip['tenant_id'])
+ self.assertIsNotNone(create_floating_ip['floating_ip_address'])
+ self.assertEqual(create_floating_ip['port_id'], self.port[0]['id'])
+ self.assertEqual(create_floating_ip['floating_network_id'],
+ self.ext_net_id)
+ self.addCleanup(self._delete_floating_ip, create_floating_ip['id'])
+ # Verifies the details of a floating_ip
+ resp, floating_ip = self.client.show_floating_ip(
+ create_floating_ip['id'])
+ self.assertEqual('200', resp['status'])
+ show_floating_ip = floating_ip['floatingip']
+ self.assertEqual(show_floating_ip['id'], create_floating_ip['id'])
+ self.assertEqual(show_floating_ip['floating_network_id'],
+ self.ext_net_id)
+ self.assertEqual(show_floating_ip['tenant_id'],
+ create_floating_ip['tenant_id'])
+ self.assertEqual(show_floating_ip['floating_ip_address'],
+ create_floating_ip['floating_ip_address'])
+ self.assertEqual(show_floating_ip['port_id'], self.port[0]['id'])
+
+ # Verify the floating ip exists in the list of all floating_ips
+ resp, floating_ips = self.client.list_floating_ips()
+ self.assertEqual('200', resp['status'])
+ floatingip_id_list = list()
+ for f in floating_ips['floatingips']:
+ floatingip_id_list.append(f['id'])
+ self.assertIn(create_floating_ip['id'], floatingip_id_list)
+
+ # Associate floating IP to the other port
+ resp, floating_ip = self.client.update_floating_ip(
+ create_floating_ip['id'], port_id=self.port[1]['id'])
+ self.assertEqual('200', resp['status'])
+ update_floating_ip = floating_ip['floatingip']
+ self.assertEqual(update_floating_ip['port_id'], self.port[1]['id'])
+ self.assertIsNotNone(update_floating_ip['fixed_ip_address'])
+ self.assertEqual(update_floating_ip['router_id'], self.router['id'])
+
+ # Disassociate floating IP from the port
+ resp, floating_ip = self.client.update_floating_ip(
+ create_floating_ip['id'], port_id=None)
+ self.assertEqual('200', resp['status'])
+ update_floating_ip = floating_ip['floatingip']
+ self.assertIsNone(update_floating_ip['port_id'])
+ self.assertIsNone(update_floating_ip['fixed_ip_address'])
+ self.assertIsNone(update_floating_ip['router_id'])
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 7f49452..f3d1485 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -75,7 +75,7 @@
for n in created_networks:
self.assertNotIn(n['id'], networks_list)
- @attr(type='gate')
+ @attr(type='smoke')
def test_create_update_delete_network_subnet(self):
# Creates a network
name = rand_name('network-')
@@ -116,7 +116,7 @@
resp, body = self.client.delete_network(net_id)
self.assertEqual('204', resp['status'])
- @attr(type='gate')
+ @attr(type='smoke')
def test_show_network(self):
# Verifies the details of a network
resp, body = self.client.show_network(self.network['id'])
@@ -125,7 +125,7 @@
self.assertEqual(self.network['id'], network['id'])
self.assertEqual(self.name, network['name'])
- @attr(type='gate')
+ @attr(type='smoke')
def test_list_networks(self):
# Verify the network exists in the list of all networks
resp, body = self.client.list_networks()
@@ -138,7 +138,7 @@
msg = "Network list doesn't contain created network"
self.assertIsNotNone(found, msg)
- @attr(type='gate')
+ @attr(type='smoke')
def test_show_subnet(self):
# Verifies the details of a subnet
resp, body = self.client.show_subnet(self.subnet['id'])
@@ -147,7 +147,7 @@
self.assertEqual(self.subnet['id'], subnet['id'])
self.assertEqual(self.cidr, subnet['cidr'])
- @attr(type='gate')
+ @attr(type='smoke')
def test_list_subnets(self):
# Verify the subnet exists in the list of all subnets
resp, body = self.client.list_subnets()
@@ -160,7 +160,7 @@
msg = "Subnet list doesn't contain created subnet"
self.assertIsNotNone(found, msg)
- @attr(type='gate')
+ @attr(type='smoke')
def test_create_update_delete_port(self):
# Verify that successful port creation, update & deletion
resp, body = self.client.create_port(self.network['id'])
@@ -176,7 +176,7 @@
resp, body = self.client.delete_port(port['id'])
self.assertEqual('204', resp['status'])
- @attr(type='gate')
+ @attr(type='smoke')
def test_show_port(self):
# Verify the details of port
resp, body = self.client.show_port(self.port['id'])
@@ -184,7 +184,7 @@
port = body['port']
self.assertEqual(self.port['id'], port['id'])
- @attr(type='gate')
+ @attr(type='smoke')
def test_list_ports(self):
# Verify the port exists in the list of all ports
resp, body = self.client.list_ports()
@@ -196,19 +196,19 @@
found = n['id']
self.assertIsNotNone(found, "Port list doesn't contain created port")
- @attr(type=['negative', 'gate'])
+ @attr(type=['negative', 'smoke'])
def test_show_non_existent_network(self):
non_exist_id = rand_name('network')
self.assertRaises(exceptions.NotFound, self.client.show_network,
non_exist_id)
- @attr(type=['negative', 'gate'])
+ @attr(type=['negative', 'smoke'])
def test_show_non_existent_subnet(self):
non_exist_id = rand_name('subnet')
self.assertRaises(exceptions.NotFound, self.client.show_subnet,
non_exist_id)
- @attr(type='gate')
+ @attr(type='smoke')
def test_bulk_create_delete_network(self):
# Creates 2 networks in one request
network_names = [rand_name('network-'), rand_name('network-')]
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 4f687b0..9f8c742 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -49,7 +49,7 @@
router_id, port_id)
self.assertEqual('200', resp['status'])
- @attr(type='gate')
+ @attr(type='smoke')
def test_create_show_list_update_delete_router(self):
# Create a router
name = rand_name('router-')
@@ -90,7 +90,7 @@
create_body['router']['id'])
self.assertEqual(show_body['router']['name'], updated_name)
- @attr(type='gate')
+ @attr(type='smoke')
def test_add_remove_router_interface_with_subnet_id(self):
network = self.create_network()
subnet = self.create_subnet(network)
@@ -111,7 +111,7 @@
self.assertEqual(show_port_body['port']['device_id'],
create_body['router']['id'])
- @attr(type='gate')
+ @attr(type='smoke')
def test_add_remove_router_interface_with_port_id(self):
network = self.create_network()
self.create_subnet(network)
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
new file mode 100644
index 0000000..24f8286
--- /dev/null
+++ b/tempest/api/network/test_security_groups.py
@@ -0,0 +1,127 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+from tempest.test import attr
+
+
+class SecGroupTest(base.BaseNetworkTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(SecGroupTest, cls).setUpClass()
+
+ def _delete_security_group(self, secgroup_id):
+ resp, _ = self.client.delete_security_group(secgroup_id)
+ self.assertEqual(204, resp.status)
+ # Asserting that the secgroup is not found in the list
+ # after deletion
+ resp, list_body = self.client.list_security_groups()
+ self.assertEqual('200', resp['status'])
+ secgroup_list = list()
+ for secgroup in list_body['security_groups']:
+ secgroup_list.append(secgroup['id'])
+ self.assertNotIn(secgroup_id, secgroup_list)
+
+ def _delete_security_group_rule(self, rule_id):
+ resp, _ = self.client.delete_security_group_rule(rule_id)
+ self.assertEqual(204, resp.status)
+ # Asserting that the secgroup is not found in the list
+ # after deletion
+ resp, list_body = self.client.list_security_group_rules()
+ self.assertEqual('200', resp['status'])
+ rules_list = list()
+ for rule in list_body['security_group_rules']:
+ rules_list.append(rule['id'])
+ self.assertNotIn(rule_id, rules_list)
+
+ @attr(type='smoke')
+ def test_list_security_groups(self):
+ # Verify the that security group belonging to tenant exist in list
+ resp, body = self.client.list_security_groups()
+ self.assertEqual('200', resp['status'])
+ security_groups = body['security_groups']
+ found = None
+ for n in security_groups:
+ if (n['name'] == 'default'):
+ found = n['id']
+ msg = "Security-group list doesn't contain default security-group"
+ self.assertIsNotNone(found, msg)
+
+ @attr(type='smoke')
+ def test_create_show_delete_security_group_and_rule(self):
+ # Create a security group
+ name = rand_name('secgroup-')
+ resp, group_create_body = self.client.create_security_group(name)
+ self.assertEqual('201', resp['status'])
+ self.addCleanup(self._delete_security_group,
+ group_create_body['security_group']['id'])
+ self.assertEqual(group_create_body['security_group']['name'], name)
+
+ # Show details of the created security group
+ resp, show_body = self.client.show_security_group(
+ group_create_body['security_group']['id'])
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(show_body['security_group']['name'], name)
+
+ # List security groups and verify if created group is there in response
+ resp, list_body = self.client.list_security_groups()
+ self.assertEqual('200', resp['status'])
+ secgroup_list = list()
+ for secgroup in list_body['security_groups']:
+ secgroup_list.append(secgroup['id'])
+ self.assertIn(group_create_body['security_group']['id'], secgroup_list)
+ # No Udpate in security group
+ # Create rule
+ resp, rule_create_body = self.client.create_security_group_rule(
+ group_create_body['security_group']['id']
+ )
+ self.assertEqual('201', resp['status'])
+ self.addCleanup(self._delete_security_group_rule,
+ rule_create_body['security_group_rule']['id'])
+ # Show details of the created security rule
+ resp, show_rule_body = self.client.show_security_group_rule(
+ rule_create_body['security_group_rule']['id']
+ )
+ self.assertEqual('200', resp['status'])
+
+ # List rules and verify created rule is in response
+ resp, rule_list_body = self.client.list_security_group_rules()
+ self.assertEqual('200', resp['status'])
+ rule_list = [rule['id']
+ for rule in rule_list_body['security_group_rules']]
+ self.assertIn(rule_create_body['security_group_rule']['id'], rule_list)
+
+ @attr(type=['negative', 'smoke'])
+ def test_show_non_existent_security_group(self):
+ non_exist_id = rand_name('secgroup-')
+ self.assertRaises(exceptions.NotFound, self.client.show_security_group,
+ non_exist_id)
+
+ @attr(type=['negative', 'smoke'])
+ def test_show_non_existent_security_group_rule(self):
+ non_exist_id = rand_name('rule-')
+ self.assertRaises(exceptions.NotFound,
+ self.client.show_security_group_rule,
+ non_exist_id)
+
+
+class SecGroupTestXML(SecGroupTest):
+ _interface = 'xml'
diff --git a/tempest/api/object_storage/test_container_staticweb.py b/tempest/api/object_storage/test_container_staticweb.py
new file mode 100644
index 0000000..d07697a
--- /dev/null
+++ b/tempest/api/object_storage/test_container_staticweb.py
@@ -0,0 +1,94 @@
+# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
+#
+# Author: Joe H. Rahme <joe.hakim.rahme@enovance.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.object_storage import base
+from tempest.common.utils.data_utils import arbitrary_string
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+class StaticWebTest(base.BaseObjectTest):
+
+ @classmethod
+ def setUpClass(cls):
+ super(StaticWebTest, cls).setUpClass()
+ cls.container_name = rand_name(name="TestContainer")
+
+ # This header should be posted on the container before every test
+ cls.headers_public_read_acl = {'Read': '.r:*'}
+
+ # Create test container and create one object in it
+ cls.container_client.create_container(cls.container_name)
+ cls.object_name = rand_name(name="TestObject")
+ cls.object_data = arbitrary_string()
+ cls.object_client.create_object(cls.container_name,
+ cls.object_name,
+ cls.object_data)
+
+ cls.container_client.update_container_metadata(
+ cls.container_name,
+ metadata=cls.headers_public_read_acl,
+ metadata_prefix="X-Container-")
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.delete_containers([cls.container_name])
+ cls.data.teardown_all()
+ super(StaticWebTest, cls).tearDownClass()
+
+ @attr('gate')
+ def test_web_index(self):
+ headers = {'web-index': self.object_name}
+
+ self.container_client.update_container_metadata(
+ self.container_name, metadata=headers)
+
+ # test GET on http://account_url/container_name
+ # we should retrieve the self.object_name file
+ resp, body = self.custom_account_client.request("GET",
+ self.container_name)
+ self.assertEqual(resp['status'], '200')
+ self.assertEqual(body, self.object_data)
+
+ # clean up before exiting
+ self.container_client.update_container_metadata(self.container_name,
+ {'web-index': ""})
+
+ _, body = self.container_client.list_container_metadata(
+ self.container_name)
+ self.assertNotIn('x-container-meta-web-index', body)
+
+ @attr('gate')
+ def test_web_listing(self):
+ headers = {'web-listings': 'true'}
+
+ self.container_client.update_container_metadata(
+ self.container_name, metadata=headers)
+
+ # test GET on http://account_url/container_name
+ # we should retrieve a listing of objects
+ resp, body = self.custom_account_client.request("GET",
+ self.container_name)
+ self.assertEqual(resp['status'], '200')
+ self.assertIn(self.object_name, body)
+
+ # clean up before exiting
+ self.container_client.update_container_metadata(self.container_name,
+ {'web-listings': ""})
+
+ _, body = self.container_client.list_container_metadata(
+ self.container_name)
+ self.assertNotIn('x-container-meta-web-listings', body)
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index d18c2ad..66a74e4 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -52,7 +52,7 @@
cls.delete_containers(cls.containers, client[0], client[1])
super(ContainerSyncTest, cls).tearDownClass()
- @testtools.skip('Until Bug #1093743 is resolved.')
+ @testtools.skip('Skipped until the Bug #1093743 is resolved.')
@attr(type='gate')
def test_container_synchronization(self):
# container to container synchronization
diff --git a/tempest/api/object_storage/test_object_expiry.py b/tempest/api/object_storage/test_object_expiry.py
index 8703480..889436d 100644
--- a/tempest/api/object_storage/test_object_expiry.py
+++ b/tempest/api/object_storage/test_object_expiry.py
@@ -43,7 +43,7 @@
cls.delete_containers([cls.container_name])
super(ObjectExpiryTest, cls).tearDownClass()
- @testtools.skip('Until Bug #1069849 is resolved.')
+ @testtools.skip('Skipped until the Bug #1069849 is resolved.')
@attr(type='gate')
def test_get_object_after_expiry_time(self):
# TODO(harika-vakadi): similar test case has to be created for
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index c599562..dd724c7 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -211,24 +211,18 @@
object_name,
orig_metadata)
self.assertIn(int(resp['status']), HTTP_SUCCESS)
- try:
- # copy object from source container to destination container
- resp, _ = self.object_client.copy_object_across_containers(
- src_container_name, object_name, dst_container_name,
- object_name)
- self.assertEqual(resp['status'], '201')
-
- # check if object is present in destination container
- resp, body = self.object_client.get_object(dst_container_name,
- object_name)
- self.assertEqual(body, data)
- actual_meta_key = 'x-object-meta-' + meta_key
- self.assertTrue(actual_meta_key in resp)
- self.assertEqual(resp[actual_meta_key], meta_value)
-
- except Exception as e:
- self.fail("Got exception :%s ; while copying"
- " object across containers" % e)
+ # copy object from source container to destination container
+ resp, _ = self.object_client.copy_object_across_containers(
+ src_container_name, object_name, dst_container_name,
+ object_name)
+ self.assertEqual(resp['status'], '201')
+ # check if object is present in destination container
+ resp, body = self.object_client.get_object(dst_container_name,
+ object_name)
+ self.assertEqual(body, data)
+ actual_meta_key = 'x-object-meta-' + meta_key
+ self.assertTrue(actual_meta_key in resp)
+ self.assertEqual(resp[actual_meta_key], meta_value)
@attr(type='gate')
def test_get_object_using_temp_url(self):
@@ -367,36 +361,32 @@
def test_access_public_object_with_another_user_creds(self):
# make container public-readable and access an object in it using
# another user's credentials
- try:
- cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
- resp_meta, body = self.container_client.update_container_metadata(
- self.container_name, metadata=cont_headers,
- metadata_prefix='')
- self.assertIn(int(resp_meta['status']), HTTP_SUCCESS)
- # create object
- object_name = rand_name(name='Object')
- data = arbitrary_string(size=len(object_name) * 1,
- base_text=object_name)
- resp, _ = self.object_client.create_object(self.container_name,
- object_name, data)
- self.assertEqual(resp['status'], '201')
+ cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
+ resp_meta, body = self.container_client.update_container_metadata(
+ self.container_name, metadata=cont_headers,
+ metadata_prefix='')
+ self.assertIn(int(resp_meta['status']), HTTP_SUCCESS)
- # list container metadata
- resp, _ = self.container_client.list_container_metadata(
- self.container_name)
- self.assertIn(int(resp['status']), HTTP_SUCCESS)
- self.assertIn('x-container-read', resp)
- self.assertEqual(resp['x-container-read'], '.r:*,.rlistings')
+ # create object
+ object_name = rand_name(name='Object')
+ data = arbitrary_string(size=len(object_name) * 1,
+ base_text=object_name)
+ resp, _ = self.object_client.create_object(self.container_name,
+ object_name, data)
+ self.assertEqual(resp['status'], '201')
- # get auth token of alternative user
- token = self.identity_client_alt.get_auth()
- headers = {'X-Auth-Token': token}
- # access object using alternate user creds
- resp, body = self.custom_object_client.get_object(
- self.container_name, object_name,
- metadata=headers)
- self.assertEqual(body, data)
+ # list container metadata
+ resp, _ = self.container_client.list_container_metadata(
+ self.container_name)
+ self.assertIn(int(resp['status']), HTTP_SUCCESS)
+ self.assertIn('x-container-read', resp)
+ self.assertEqual(resp['x-container-read'], '.r:*,.rlistings')
- except Exception as e:
- self.fail("Failed to get public readable object with another"
- " user creds raised exception is %s" % e)
+ # get auth token of alternative user
+ token = self.identity_client_alt.get_auth()
+ headers = {'X-Auth-Token': token}
+ # access object using alternate user creds
+ resp, body = self.custom_object_client.get_object(
+ self.container_name, object_name,
+ metadata=headers)
+ self.assertEqual(body, data)
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 745dd87..2a72c95 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -89,8 +89,8 @@
pass
@classmethod
- def _create_keypair(cls, namestart='keypair-heat-'):
- kp_name = rand_name(namestart)
+ def _create_keypair(cls, name_start='keypair-heat-'):
+ kp_name = rand_name(name_start)
resp, body = cls.keypairs_client.create_keypair(kp_name)
cls.keypairs.append(kp_name)
return body
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
new file mode 100644
index 0000000..c934020
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -0,0 +1,211 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest import clients
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+LOG = logging.getLogger(__name__)
+
+
+class NeutronResourcesTestJSON(base.BaseOrchestrationTest):
+ _interface = 'json'
+
+ template = """
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+ Template which creates single EC2 instance
+Parameters:
+ KeyName:
+ Type: String
+ InstanceType:
+ Type: String
+ ImageId:
+ Type: String
+ ExternalRouterId:
+ Type: String
+Resources:
+ Network:
+ Type: OS::Quantum::Net
+ Properties: {name: NewNetwork}
+ Subnet:
+ Type: OS::Quantum::Subnet
+ Properties:
+ network_id: {Ref: Network}
+ name: NewSubnet
+ ip_version: 4
+ cidr: 10.0.3.0/24
+ dns_nameservers: ["8.8.8.8"]
+ allocation_pools:
+ - {end: 10.0.3.150, start: 10.0.3.20}
+ RouterInterface:
+ Type: OS::Quantum::RouterInterface
+ Properties:
+ router_id: {Ref: ExternalRouterId}
+ subnet_id: {Ref: Subnet}
+ Server:
+ Type: AWS::EC2::Instance
+ Metadata:
+ Name: SmokeServer
+ Properties:
+ ImageId: {Ref: ImageId}
+ InstanceType: {Ref: InstanceType}
+ KeyName: {Ref: KeyName}
+ SubnetId: {Ref: Subnet}
+ UserData:
+ Fn::Base64:
+ Fn::Join:
+ - ''
+ - - '#!/bin/bash -v
+
+ '
+ - /opt/aws/bin/cfn-signal -e 0 -r "SmokeServer created" '
+ - {Ref: WaitHandle}
+ - '''
+
+ '
+ WaitHandle:
+ Type: AWS::CloudFormation::WaitConditionHandle
+ WaitCondition:
+ Type: AWS::CloudFormation::WaitCondition
+ DependsOn: Server
+ Properties:
+ Handle: {Ref: WaitHandle}
+ Timeout: '600'
+"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(NeutronResourcesTestJSON, cls).setUpClass()
+ if not cls.orchestration_cfg.image_ref:
+ raise cls.skipException("No image available to test")
+ cls.client = cls.orchestration_client
+ os = clients.Manager()
+ cls.network_cfg = os.config.network
+ if not cls.config.service_available.neutron:
+ raise cls.skipException("Neutron support is required")
+ cls.network_client = os.network_client
+ cls.stack_name = rand_name('heat')
+ cls.keypair_name = (cls.orchestration_cfg.keypair_name or
+ cls._create_keypair()['name'])
+ cls.external_router_id = cls._get_external_router_id()
+
+ # create the stack
+ cls.stack_identifier = cls.create_stack(
+ cls.stack_name,
+ cls.template,
+ parameters={
+ 'KeyName': cls.keypair_name,
+ 'InstanceType': cls.orchestration_cfg.instance_type,
+ 'ImageId': cls.orchestration_cfg.image_ref,
+ 'ExternalRouterId': cls.external_router_id
+ })
+ cls.stack_id = cls.stack_identifier.split('/')[1]
+ cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
+ _, resources = cls.client.list_resources(cls.stack_identifier)
+ cls.test_resources = {}
+ for resource in resources:
+ cls.test_resources[resource['logical_resource_id']] = resource
+
+ @classmethod
+ def _get_external_router_id(cls):
+ resp, body = cls.network_client.list_ports()
+ ports = body['ports']
+ router_ports = filter(lambda port: port['device_owner'] ==
+ 'network:router_interface', ports)
+ return router_ports[0]['device_id']
+
+ @attr(type='slow')
+ def test_created_resources(self):
+ """Verifies created neutron resources."""
+ resources = [('Network', 'OS::Quantum::Net'),
+ ('Subnet', 'OS::Quantum::Subnet'),
+ ('RouterInterface', 'OS::Quantum::RouterInterface'),
+ ('Server', 'AWS::EC2::Instance')]
+ for resource_name, resource_type in resources:
+ resource = self.test_resources.get(resource_name, None)
+ self.assertIsInstance(resource, dict)
+ self.assertEqual(resource_name, resource['logical_resource_id'])
+ self.assertEqual(resource_type, resource['resource_type'])
+ self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
+
+ @attr(type='slow')
+ def test_created_network(self):
+ """Verifies created netowrk."""
+ network_id = self.test_resources.get('Network')['physical_resource_id']
+ resp, body = self.network_client.show_network(network_id)
+ self.assertEqual('200', resp['status'])
+ network = body['network']
+ self.assertIsInstance(network, dict)
+ self.assertEqual(network_id, network['id'])
+ self.assertEqual('NewNetwork', network['name'])
+
+ @attr(type='slow')
+ def test_created_subnet(self):
+ """Verifies created subnet."""
+ subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
+ resp, body = self.network_client.show_subnet(subnet_id)
+ self.assertEqual('200', resp['status'])
+ subnet = body['subnet']
+ network_id = self.test_resources.get('Network')['physical_resource_id']
+ self.assertEqual(subnet_id, subnet['id'])
+ self.assertEqual(network_id, subnet['network_id'])
+ self.assertEqual('NewSubnet', subnet['name'])
+ self.assertEqual('8.8.8.8', subnet['dns_nameservers'][0])
+ self.assertEqual('10.0.3.20', subnet['allocation_pools'][0]['start'])
+ self.assertEqual('10.0.3.150', subnet['allocation_pools'][0]['end'])
+ self.assertEqual(4, subnet['ip_version'])
+ self.assertEqual('10.0.3.0/24', subnet['cidr'])
+
+ @attr(type='slow')
+ def test_created_router_interface(self):
+ """Verifies created router interface."""
+ network_id = self.test_resources.get('Network')['physical_resource_id']
+ subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
+ resp, body = self.network_client.list_ports()
+ self.assertEqual('200', resp['status'])
+ ports = body['ports']
+ router_ports = filter(lambda port: port['device_id'] ==
+ self.external_router_id, ports)
+ created_network_ports = filter(lambda port: port['network_id'] ==
+ network_id, router_ports)
+ self.assertEqual(1, len(created_network_ports))
+ router_interface = created_network_ports[0]
+ fixed_ips = router_interface['fixed_ips']
+ subnet_fixed_ips = filter(lambda port: port['subnet_id'] ==
+ subnet_id, fixed_ips)
+ self.assertEqual(1, len(subnet_fixed_ips))
+ router_interface_ip = subnet_fixed_ips[0]['ip_address']
+ self.assertEqual('10.0.3.1', router_interface_ip)
+
+ @attr(type='slow')
+ def test_created_server(self):
+ """Verifies created sever."""
+ server_id = self.test_resources.get('Server')['physical_resource_id']
+ resp, server = self.servers_client.get_server(server_id)
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(self.keypair_name, server['key_name'])
+ self.assertEqual('ACTIVE', server['status'])
+ network = server['addresses']['NewNetwork'][0]
+ self.assertEqual(4, network['version'])
+ ip_addr_prefix = network['addr'][:7]
+ ip_addr_suffix = int(network['addr'].split('.')[3])
+ self.assertEqual('10.0.3.', ip_addr_prefix)
+ self.assertTrue(ip_addr_suffix >= 20)
+ self.assertTrue(ip_addr_suffix <= 150)
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
new file mode 100644
index 0000000..defb910
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -0,0 +1,169 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+LOG = logging.getLogger(__name__)
+
+
+class StacksTestJSON(base.BaseOrchestrationTest):
+ _interface = 'json'
+
+ template = """
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+ Template which creates single EC2 instance
+Parameters:
+ KeyName:
+ Type: String
+ InstanceType:
+ Type: String
+ ImageId:
+ Type: String
+Resources:
+ SmokeServer:
+ Type: AWS::EC2::Instance
+ Metadata:
+ Name: SmokeServer
+ Properties:
+ ImageId: {Ref: ImageId}
+ InstanceType: {Ref: InstanceType}
+ KeyName: {Ref: KeyName}
+ UserData:
+ Fn::Base64:
+ Fn::Join:
+ - ''
+ - - '#!/bin/bash -v
+
+ '
+ - /opt/aws/bin/cfn-signal -e 0 -r "SmokeServer created" '
+ - {Ref: WaitHandle}
+ - '''
+
+ '
+ WaitHandle:
+ Type: AWS::CloudFormation::WaitConditionHandle
+ WaitCondition:
+ Type: AWS::CloudFormation::WaitCondition
+ DependsOn: SmokeServer
+ Properties:
+ Handle: {Ref: WaitHandle}
+ Timeout: '600'
+"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(StacksTestJSON, cls).setUpClass()
+ if not cls.orchestration_cfg.image_ref:
+ raise cls.skipException("No image available to test")
+ cls.client = cls.orchestration_client
+ cls.stack_name = rand_name('heat')
+ keypair_name = (cls.orchestration_cfg.keypair_name or
+ cls._create_keypair()['name'])
+
+ # create the stack
+ cls.stack_identifier = cls.create_stack(
+ cls.stack_name,
+ cls.template,
+ parameters={
+ 'KeyName': keypair_name,
+ 'InstanceType': cls.orchestration_cfg.instance_type,
+ 'ImageId': cls.orchestration_cfg.image_ref
+ })
+ cls.stack_id = cls.stack_identifier.split('/')[1]
+ cls.resource_name = 'SmokeServer'
+ cls.resource_type = 'AWS::EC2::Instance'
+ cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
+
+ @attr(type='slow')
+ def test_stack_list(self):
+ """Created stack should be on the list of existing stacks."""
+ resp, stacks = self.client.list_stacks()
+ self.assertEqual('200', resp['status'])
+ self.assertIsInstance(stacks, list)
+ stacks_names = map(lambda stack: stack['stack_name'], stacks)
+ self.assertIn(self.stack_name, stacks_names)
+
+ @attr(type='slow')
+ def test_stack_show(self):
+ """Getting details about created stack should be possible."""
+ resp, stack = self.client.get_stack(self.stack_name)
+ self.assertEqual('200', resp['status'])
+ self.assertIsInstance(stack, dict)
+ self.assertEqual(self.stack_name, stack['stack_name'])
+ self.assertEqual(self.stack_id, stack['id'])
+
+ @attr(type='slow')
+ def test_list_resources(self):
+ """Getting list of created resources for the stack should be possible.
+ """
+ resp, resources = self.client.list_resources(self.stack_identifier)
+ self.assertEqual('200', resp['status'])
+ self.assertIsInstance(resources, list)
+ resources_names = map(lambda resource: resource['logical_resource_id'],
+ resources)
+ self.assertIn(self.resource_name, resources_names)
+ resources_types = map(lambda resource: resource['resource_type'],
+ resources)
+ self.assertIn(self.resource_type, resources_types)
+
+ @attr(type='slow')
+ def test_show_resource(self):
+ """Getting details about created resource should be possible."""
+ resp, resource = self.client.get_resource(self.stack_identifier,
+ self.resource_name)
+ self.assertIsInstance(resource, dict)
+ self.assertEqual(self.resource_name, resource['logical_resource_id'])
+ self.assertEqual(self.resource_type, resource['resource_type'])
+
+ @attr(type='slow')
+ def test_resource_metadata(self):
+ """Getting metadata for created resource should be possible."""
+ resp, metadata = self.client.show_resource_metadata(
+ self.stack_identifier,
+ self.resource_name)
+ self.assertEqual('200', resp['status'])
+ self.assertIsInstance(metadata, dict)
+ self.assertEqual(self.resource_name, metadata.get('Name', None))
+
+ @attr(type='slow')
+ def test_list_events(self):
+ """Getting list of created events for the stack should be possible."""
+ resp, events = self.client.list_events(self.stack_identifier)
+ self.assertEqual('200', resp['status'])
+ self.assertIsInstance(events, list)
+ resource_statuses = map(lambda event: event['resource_status'], events)
+ self.assertIn('CREATE_IN_PROGRESS', resource_statuses)
+ self.assertIn('CREATE_COMPLETE', resource_statuses)
+
+ @attr(type='slow')
+ def test_show_event(self):
+ """Getting details about existing event should be possible."""
+ resp, events = self.client.list_resource_events(self.stack_identifier,
+ self.resource_name)
+ self.assertNotEqual([], events)
+ events.sort(key=lambda event: event['event_time'])
+ event_id = events[0]['id']
+ resp, event = self.client.show_event(self.stack_identifier,
+ self.resource_name, event_id)
+ self.assertEqual('200', resp['status'])
+ self.assertEqual('CREATE_IN_PROGRESS', event['resource_status'])
+ self.assertEqual('state changed', event['resource_status_reason'])
+ self.assertEqual(self.resource_name, event['logical_resource_id'])
+ self.assertIsInstance(event, dict)
diff --git a/tempest/api/orchestration/stacks/test_instance_cfn_init.py b/tempest/api/orchestration/stacks/test_server_cfn_init.py
similarity index 90%
rename from tempest/api/orchestration/stacks/test_instance_cfn_init.py
rename to tempest/api/orchestration/stacks/test_server_cfn_init.py
index fe55ecf..ffe8def 100644
--- a/tempest/api/orchestration/stacks/test_instance_cfn_init.py
+++ b/tempest/api/orchestration/stacks/test_server_cfn_init.py
@@ -26,7 +26,7 @@
LOG = logging.getLogger(__name__)
-class InstanceCfnInitTestJSON(base.BaseOrchestrationTest):
+class ServerCfnInitTestJSON(base.BaseOrchestrationTest):
_interface = 'json'
existing_keypair = (tempest.config.TempestConfig().
orchestration.keypair_name is not None)
@@ -37,11 +37,11 @@
Template which uses a wait condition to confirm that a minimal
cfn-init and cfn-signal has worked
Parameters:
- KeyName:
+ key_name:
Type: String
- InstanceType:
+ flavor:
Type: String
- ImageId:
+ image:
Type: String
Resources:
CfnUser:
@@ -58,7 +58,7 @@
Properties:
UserName: {Ref: CfnUser}
SmokeServer:
- Type: AWS::EC2::Instance
+ Type: OS::Nova::Server
Metadata:
AWS::CloudFormation::Init:
config:
@@ -83,12 +83,12 @@
owner: root
group: root
Properties:
- ImageId: {Ref: ImageId}
- InstanceType: {Ref: InstanceType}
- KeyName: {Ref: KeyName}
- SecurityGroups:
+ image: {Ref: image}
+ flavor: {Ref: flavor}
+ key_name: {Ref: key_name}
+ security_groups:
- {Ref: SmokeSecurityGroup}
- UserData:
+ user_data:
Fn::Base64:
Fn::Join:
- ''
@@ -118,12 +118,12 @@
SmokeServerIp:
Description: IP address of server
Value:
- Fn::GetAtt: [SmokeServer, PublicIp]
+ Fn::GetAtt: [SmokeServer, first_private_address]
"""
@classmethod
def setUpClass(cls):
- super(InstanceCfnInitTestJSON, cls).setUpClass()
+ super(ServerCfnInitTestJSON, cls).setUpClass()
if not cls.orchestration_cfg.image_ref:
raise cls.skipException("No image available to test")
cls.client = cls.orchestration_client
@@ -140,9 +140,9 @@
stack_name,
cls.template,
parameters={
- 'KeyName': keypair_name,
- 'InstanceType': cls.orchestration_cfg.instance_type,
- 'ImageId': cls.orchestration_cfg.image_ref
+ 'key_name': keypair_name,
+ 'flavor': cls.orchestration_cfg.instance_type,
+ 'image': cls.orchestration_cfg.image_ref
})
@attr(type='slow')
@@ -187,7 +187,7 @@
# This is an assert of great significance, as it means the following
# has happened:
# - cfn-init read the provided metadata and wrote out a file
- # - a user was created and credentials written to the instance
+ # - a user was created and credentials written to the server
# - a cfn-signal was built which was signed with provided credentials
# - the wait condition was fulfilled and the stack has changed state
wait_status = json.loads(
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index f1f1f7e..4bda5ab 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -33,8 +33,7 @@
@attr(type='smoke')
def test_stack_list_responds(self):
- resp, body = self.client.list_stacks()
- stacks = body['stacks']
+ resp, stacks = self.client.list_stacks()
self.assertEqual('200', resp['status'])
self.assertIsInstance(stacks, list)
@@ -42,9 +41,6 @@
def test_stack_crud_no_resources(self):
stack_name = rand_name('heat')
- # count how many stacks to start with
- resp, body = self.client.list_stacks()
-
# create the stack
stack_identifier = self.create_stack(
stack_name, self.empty_template)
@@ -54,21 +50,21 @@
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
# check for stack in list
- resp, body = self.client.list_stacks()
- list_ids = list([stack['id'] for stack in body['stacks']])
+ resp, stacks = self.client.list_stacks()
+ list_ids = list([stack['id'] for stack in stacks])
self.assertIn(stack_id, list_ids)
# fetch the stack
- resp, body = self.client.get_stack(stack_identifier)
- self.assertEqual('CREATE_COMPLETE', body['stack_status'])
+ resp, stack = self.client.get_stack(stack_identifier)
+ self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
# fetch the stack by name
- resp, body = self.client.get_stack(stack_name)
- self.assertEqual('CREATE_COMPLETE', body['stack_status'])
+ resp, stack = self.client.get_stack(stack_name)
+ self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
# fetch the stack by id
- resp, body = self.client.get_stack(stack_id)
- self.assertEqual('CREATE_COMPLETE', body['stack_status'])
+ resp, stack = self.client.get_stack(stack_id)
+ self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
# delete the stack
resp = self.client.delete_stack(stack_identifier)
diff --git a/tempest/api/orchestration/stacks/test_templates.py b/tempest/api/orchestration/stacks/test_templates.py
new file mode 100644
index 0000000..6a7c541
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_templates.py
@@ -0,0 +1,86 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+from tempest.test import attr
+
+
+LOG = logging.getLogger(__name__)
+
+
+class TemplateYAMLTestJSON(base.BaseOrchestrationTest):
+ _interface = 'json'
+
+ template = """
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+ Template which creates only a new user
+Resources:
+ CfnUser:
+ Type: AWS::IAM::User
+"""
+
+ invalid_template_url = 'http://www.example.com/template.yaml'
+
+ @classmethod
+ def setUpClass(cls):
+ super(TemplateYAMLTestJSON, cls).setUpClass()
+ cls.client = cls.orchestration_client
+ cls.stack_name = rand_name('heat')
+ cls.stack_identifier = cls.create_stack(cls.stack_name, cls.template)
+ cls.client.wait_for_stack_status(cls.stack_identifier,
+ 'CREATE_COMPLETE')
+ cls.stack_id = cls.stack_identifier.split('/')[1]
+ cls.parameters = {}
+
+ @attr(type='gate')
+ def test_show_template(self):
+ """Getting template used to create the stack."""
+ resp, template = self.client.show_template(self.stack_identifier)
+ self.assertEqual('200', resp['status'])
+
+ @attr(type='gate')
+ def test_validate_template(self):
+ """Validating template passing it content."""
+ resp, parameters = self.client.validate_template(self.template,
+ self.parameters)
+ self.assertEqual('200', resp['status'])
+
+ @attr(type=['gate', 'negative'])
+ def test_validate_template_url(self):
+ """Validating template passing url to it."""
+ self.assertRaises(exceptions.BadRequest,
+ self.client.validate_template_url,
+ template_url=self.invalid_template_url,
+ parameters=self.parameters)
+
+
+class TemplateAWSTestJSON(TemplateYAMLTestJSON):
+ template = """
+{
+ "AWSTemplateFormatVersion" : "2010-09-09",
+ "Description" : "Template which creates only a new user",
+ "Resources" : {
+ "CfnUser" : {
+ "Type" : "AWS::IAM::User"
+ }
+ }
+}
+"""
+
+ invalid_template_url = 'http://www.example.com/template.template'
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 822f691..b15f8dd 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -41,6 +41,7 @@
def _delete_volume(self, volume_id):
resp, _ = self.volumes_client.delete_volume(volume_id)
self.assertEqual(202, resp.status)
+ self.volumes_client.wait_for_resource_deletion(volume_id)
def _delete_volume_type(self, volume_type_id):
resp, _ = self.client.delete_volume_type(volume_type_id)
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 5861497..766a2c7 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -18,6 +18,8 @@
from tempest.api.volume.base import BaseVolumeTest
from tempest.common.utils.data_utils import rand_name
from tempest.test import attr
+from tempest.test import services
+from tempest.test import stresstest
class VolumesActionsTest(BaseVolumeTest):
@@ -52,49 +54,49 @@
super(VolumesActionsTest, cls).tearDownClass()
+ @stresstest(class_setup_per='process')
@attr(type='smoke')
+ @services('compute')
def test_attach_detach_volume_to_instance(self):
# Volume is attached and detached successfully from an instance
- try:
- mountpoint = '/dev/vdc'
- resp, body = self.client.attach_volume(self.volume['id'],
- self.server['id'],
- mountpoint)
- self.assertEqual(202, resp.status)
- self.client.wait_for_volume_status(self.volume['id'], 'in-use')
- except Exception:
- self.fail("Could not attach volume to instance")
- finally:
- # Detach the volume from the instance
- resp, body = self.client.detach_volume(self.volume['id'])
- self.assertEqual(202, resp.status)
- self.client.wait_for_volume_status(self.volume['id'], 'available')
+ mountpoint = '/dev/vdc'
+ resp, body = self.client.attach_volume(self.volume['id'],
+ self.server['id'],
+ mountpoint)
+ self.assertEqual(202, resp.status)
+ self.client.wait_for_volume_status(self.volume['id'], 'in-use')
+ resp, body = self.client.detach_volume(self.volume['id'])
+ self.assertEqual(202, resp.status)
+ self.client.wait_for_volume_status(self.volume['id'], 'available')
+ @stresstest(class_setup_per='process')
@attr(type='gate')
+ @services('compute')
def test_get_volume_attachment(self):
# Verify that a volume's attachment information is retrieved
mountpoint = '/dev/vdc'
resp, body = self.client.attach_volume(self.volume['id'],
self.server['id'],
mountpoint)
- self.client.wait_for_volume_status(self.volume['id'], 'in-use')
self.assertEqual(202, resp.status)
- try:
- resp, volume = self.client.get_volume(self.volume['id'])
- self.assertEqual(200, resp.status)
- self.assertIn('attachments', volume)
- attachment = volume['attachments'][0]
- self.assertEqual(mountpoint, attachment['device'])
- self.assertEqual(self.server['id'], attachment['server_id'])
- self.assertEqual(self.volume['id'], attachment['id'])
- self.assertEqual(self.volume['id'], attachment['volume_id'])
- except Exception:
- self.fail("Could not get attachment details from volume")
- finally:
- self.client.detach_volume(self.volume['id'])
- self.client.wait_for_volume_status(self.volume['id'], 'available')
+ self.client.wait_for_volume_status(self.volume['id'], 'in-use')
+ # NOTE(gfidente): added in reverse order because functions will be
+ # called in reverse order to the order they are added (LIFO)
+ self.addCleanup(self.client.wait_for_volume_status,
+ self.volume['id'],
+ 'available')
+ self.addCleanup(self.client.detach_volume, self.volume['id'])
+ resp, volume = self.client.get_volume(self.volume['id'])
+ self.assertEqual(200, resp.status)
+ self.assertIn('attachments', volume)
+ attachment = self.client.get_attachment_from_volume(volume)
+ self.assertEqual(mountpoint, attachment['device'])
+ self.assertEqual(self.server['id'], attachment['server_id'])
+ self.assertEqual(self.volume['id'], attachment['id'])
+ self.assertEqual(self.volume['id'], attachment['volume_id'])
@attr(type='gate')
+ @services('image')
def test_volume_upload(self):
# NOTE(gfidente): the volume uploaded in Glance comes from setUpClass,
# it is shared with the other tests. After it is uploaded in Glance,
@@ -107,3 +109,7 @@
self.assertEqual(202, resp.status)
self.image_client.wait_for_image_status(image_id, 'active')
self.client.wait_for_volume_status(self.volume['id'], 'available')
+
+
+class VolumesActionsTestXML(VolumesActionsTest):
+ _interface = "xml"
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 2e90f16..f7f428c 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -18,6 +18,7 @@
from tempest.api.volume import base
from tempest.common.utils.data_utils import rand_name
from tempest.test import attr
+from tempest.test import services
class VolumesGetTest(base.BaseVolumeTest):
@@ -93,6 +94,7 @@
self._volume_create_get_delete()
@attr(type='smoke')
+ @services('image')
def test_volume_create_get_delete_from_image(self):
self._volume_create_get_delete(imageRef=self.config.compute.image_ref)
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index 5d5fd7e..8c39e08 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -32,6 +32,19 @@
_interface = 'json'
+ def assertVolumesIn(self, fetched_list, expected_list):
+ missing_vols = [v for v in expected_list if v not in fetched_list]
+ if len(missing_vols) == 0:
+ return
+
+ def str_vol(vol):
+ return "%s:%s" % (vol['id'], vol['display_name'])
+
+ raw_msg = "Could not find volumes %s in expected list %s; fetched %s"
+ self.fail(raw_msg % ([str_vol(v) for v in missing_vols],
+ [str_vol(v) for v in expected_list],
+ [str_vol(v) for v in fetched_list]))
+
@classmethod
def setUpClass(cls):
super(VolumesListTest, cls).setUpClass()
@@ -82,12 +95,7 @@
# Fetch all volumes
resp, fetched_list = self.client.list_volumes()
self.assertEqual(200, resp.status)
- # Now check if all the volumes created in setup are in fetched list
- missing_vols = [v for v in self.volume_list if v not in fetched_list]
- self.assertFalse(missing_vols,
- "Failed to find volume %s in fetched list" %
- ', '.join(m_vol['display_name']
- for m_vol in missing_vols))
+ self.assertVolumesIn(fetched_list, self.volume_list)
@attr(type='gate')
def test_volume_list_with_details(self):
@@ -95,12 +103,7 @@
# Fetch all Volumes
resp, fetched_list = self.client.list_volumes_with_detail()
self.assertEqual(200, resp.status)
- # Verify that all the volumes are returned
- missing_vols = [v for v in self.volume_list if v not in fetched_list]
- self.assertFalse(missing_vols,
- "Failed to find volume %s in fetched list" %
- ', '.join(m_vol['display_name']
- for m_vol in missing_vols))
+ self.assertVolumesIn(fetched_list, self.volume_list)
class VolumeListTestXML(VolumesListTest):
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index 08f585a..cbb8d08 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -101,31 +101,30 @@
flags = creds + ' ' + flags
return self.cmd(cmd, action, flags, params, fail_ok)
- def check_output(self, cmd, **kwargs):
- # substitutes subprocess.check_output which is not in python2.6
- kwargs['stdout'] = subprocess.PIPE
- proc = subprocess.Popen(cmd, **kwargs)
- output = proc.communicate()[0]
- if proc.returncode != 0:
- raise CommandFailed(proc.returncode, cmd, output)
- return output
-
def cmd(self, cmd, action, flags='', params='', fail_ok=False,
merge_stderr=False):
"""Executes specified command for the given action."""
cmd = ' '.join([os.path.join(CONF.cli.cli_dir, cmd),
flags, action, params])
LOG.info("running: '%s'" % cmd)
+ cmd_str = cmd
cmd = shlex.split(cmd)
+ result = ''
+ result_err = ''
try:
- if merge_stderr:
- result = self.check_output(cmd, stderr=subprocess.STDOUT)
- else:
- with open('/dev/null', 'w') as devnull:
- result = self.check_output(cmd, stderr=devnull)
- except subprocess.CalledProcessError as e:
- LOG.error("command output:\n%s" % e.output)
- raise
+ stdout = subprocess.PIPE
+ stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
+ proc = subprocess.Popen(
+ cmd, stdout=stdout, stderr=stderr)
+ result, result_err = proc.communicate()
+ if not fail_ok and proc.returncode != 0:
+ raise CommandFailed(proc.returncode,
+ cmd,
+ result)
+ finally:
+ LOG.debug('output of %s:\n%s' % (cmd_str, result))
+ if not merge_stderr and result_err:
+ LOG.debug('error output of %s:\n%s' % (cmd_str, result_err))
return result
def assertTableStruct(self, items, field_names):
diff --git a/tempest/cli/output_parser.py b/tempest/cli/output_parser.py
index bfd7f9e..f22ec4e 100644
--- a/tempest/cli/output_parser.py
+++ b/tempest/cli/output_parser.py
@@ -133,6 +133,10 @@
if not isinstance(output_lines, list):
output_lines = output_lines.split('\n')
+ if not output_lines[-1]:
+ # skip last line if empty (just newline at the end)
+ output_lines = output_lines[:-1]
+
for line in output_lines:
if delimiter_line.match(line):
columns = _table_columns(line)
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index 1e8009f..4c1c27f 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -46,9 +46,12 @@
out = self.keystone('catalog')
catalog = self.parser.details_multiple(out, with_label=True)
for svc in catalog:
- self.assertTrue(svc['__label'].startswith('Service:'),
- msg=('Invalid beginning of service block: %s' %
- svc['__label']))
+ if svc.get('__label'):
+ self.assertTrue(svc['__label'].startswith('Service:'),
+ msg=('Invalid beginning of service block: '
+ '%s' % svc['__label']))
+ self.assertIn('id', svc.keys())
+ self.assertIn('region', svc.keys())
def test_admin_endpoint_list(self):
out = self.keystone('endpoint-list')
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index d744e3d..8dfff6e 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -307,6 +307,12 @@
self.LOG.info("Response Status: " + status)
headers = resp.copy()
del headers['status']
+ if headers.get('x-compute-request-id'):
+ self.LOG.info("Nova request id: %s" %
+ headers.pop('x-compute-request-id'))
+ elif headers.get('x-openstack-request-id'):
+ self.LOG.info("Glance request id %s" %
+ headers.pop('x-openstack-request-id'))
if len(headers):
self.LOG.debug('Response Headers: ' + str(headers))
if resp_body:
diff --git a/tempest/common/ssh.py b/tempest/common/ssh.py
index be350c8..2052705 100644
--- a/tempest/common/ssh.py
+++ b/tempest/common/ssh.py
@@ -56,7 +56,7 @@
paramiko.AutoAddPolicy())
_start_time = time.time()
- while not self._is_timed_out(self.timeout, _start_time):
+ while not self._is_timed_out(_start_time):
try:
ssh.connect(self.host, username=self.username,
password=self.password,
@@ -76,8 +76,8 @@
password=self.password)
return ssh
- def _is_timed_out(self, timeout, start_time):
- return (time.time() - timeout) > start_time
+ def _is_timed_out(self, start_time):
+ return (time.time() - self.timeout) > start_time
def connect_until_closed(self):
"""Connect to the server and wait until connection is lost."""
@@ -85,10 +85,10 @@
ssh = self._get_ssh_connection()
_transport = ssh.get_transport()
_start_time = time.time()
- _timed_out = self._is_timed_out(self.timeout, _start_time)
+ _timed_out = self._is_timed_out(_start_time)
while _transport.is_active() and not _timed_out:
time.sleep(5)
- _timed_out = self._is_timed_out(self.timeout, _start_time)
+ _timed_out = self._is_timed_out(_start_time)
ssh.close()
except (EOFError, paramiko.AuthenticationException, socket.error):
return
@@ -114,9 +114,13 @@
err_data = []
poll = select.poll()
poll.register(channel, select.POLLIN)
+ start_time = time.time()
+
while True:
ready = poll.poll(self.channel_timeout)
if not any(ready):
+ if not self._is_timed_out(start_time):
+ continue
raise exceptions.TimeoutException(
"Command: '{0}' executed on host '{1}'.".format(
cmd, self.host))
diff --git a/tempest/config.py b/tempest/config.py
index e0ac843..acb0e8d 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -121,10 +121,17 @@
cfg.StrOpt('image_ssh_user',
default="root",
help="User name used to authenticate to an instance."),
+ cfg.StrOpt('image_ssh_password',
+ default="password",
+ help="Password used to authenticate to an instance."),
cfg.StrOpt('image_alt_ssh_user',
default="root",
help="User name used to authenticate to an instance using "
"the alternate image."),
+ cfg.StrOpt('image_alt_ssh_password',
+ default="password",
+ help="Password used to authenticate to an instance using "
+ "the alternate image."),
cfg.BoolOpt('resize_available',
default=False,
help="Does the test environment support resizing?"),
@@ -196,6 +203,10 @@
cfg.BoolOpt('flavor_extra_enabled',
default=True,
help="If false, skip flavor extra data test"),
+ cfg.StrOpt('volume_device_name',
+ default='vdb',
+ help="Expected device name when a volume is attached to "
+ "an instance")
]
@@ -227,36 +238,6 @@
for opt in ComputeAdminGroup:
conf.register_opt(opt, group='compute-admin')
-
-whitebox_group = cfg.OptGroup(name='whitebox',
- title="Whitebox Options")
-
-WhiteboxGroup = [
- cfg.BoolOpt('whitebox_enabled',
- default=False,
- help="Does the test environment support whitebox tests for "
- "Compute?"),
- cfg.StrOpt('db_uri',
- default=None,
- help="Connection string to the database of Compute service"),
- cfg.StrOpt('source_dir',
- default="/opt/stack/nova",
- help="Path of nova source directory"),
- cfg.StrOpt('config_path',
- default='/etc/nova/nova.conf',
- help="Path of nova configuration file"),
- cfg.StrOpt('bin_dir',
- default="/usr/local/bin/",
- help="Directory containing nova binaries such as nova-manage"),
-]
-
-
-def register_whitebox_opts(conf):
- conf.register_group(whitebox_group)
- for opt in WhiteboxGroup:
- conf.register_opt(opt, group='whitebox')
-
-
image_group = cfg.OptGroup(name='image',
title="Image Service Options")
@@ -519,7 +500,10 @@
help='regexp for list of log files.'),
cfg.StrOpt('log_check_interval',
default=60,
- help='time between log file error checks.')
+ help='time (in seconds) between log file error checks.'),
+ cfg.StrOpt('default_thread_number_per_action',
+ default=4,
+ help='The number of threads created while stress test.')
]
@@ -636,7 +620,6 @@
register_compute_opts(cfg.CONF)
register_identity_opts(cfg.CONF)
- register_whitebox_opts(cfg.CONF)
register_image_opts(cfg.CONF)
register_network_opts(cfg.CONF)
register_volume_opts(cfg.CONF)
@@ -649,7 +632,6 @@
register_scenario_opts(cfg.CONF)
register_service_available_opts(cfg.CONF)
self.compute = cfg.CONF.compute
- self.whitebox = cfg.CONF.whitebox
self.identity = cfg.CONF.identity
self.images = cfg.CONF.image
self.network = cfg.CONF.network
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 62bd8cf..924ebc9 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -65,6 +65,10 @@
message = 'Unauthorized'
+class InvalidServiceTag(RestClientException):
+ message = "Invalid service tag"
+
+
class TimeoutException(TempestException):
message = "Request timed out"
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index f9eb968..aa97211 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -22,6 +22,8 @@
SKIP_DECORATOR_RE = re.compile(r'\s*@testtools.skip\((.*)\)')
SKIP_STR_RE = re.compile(r'.*Bug #\d+.*')
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
+TEST_DEFINITION = re.compile(r'^\s*def test.*')
+SCENARIO_DECORATOR = re.compile(r'\s*@.*services\(')
def skip_bugs(physical_line):
@@ -53,17 +55,21 @@
" in tempest/api/* tests"))
-def import_no_files_in_tests(physical_line, filename):
- """Check for merges that try to land into tempest/tests
+def scenario_tests_need_service_tags(physical_line, filename,
+ previous_logical):
+ """Check that scenario tests have service tags
- T103: tempest/tests directory is deprecated
+ T104: Scenario tests require a services decorator
"""
- if "tempest/tests" in filename:
- return (0, ("T103: tempest/tests is deprecated"))
+ if 'tempest/scenario' in filename:
+ if TEST_DEFINITION.match(physical_line):
+ if not SCENARIO_DECORATOR.match(previous_logical):
+ return (physical_line.find('def'),
+ "T104: Scenario tests require a service decorator")
def factory(register):
register(skip_bugs)
register(import_no_clients_in_api)
- register(import_no_files_in_tests)
+ register(scenario_tests_need_service_tags)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index e3e35a2..21c37b9 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -29,13 +29,14 @@
from neutronclient.common import exceptions as exc
import neutronclient.v2_0.client
import novaclient.client
-
+from novaclient import exceptions as nova_exceptions
from tempest.api.network import common as net_common
from tempest.common import isolated_creds
from tempest.common import ssh
from tempest.common.utils.data_utils import rand_name
from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest import exceptions
import tempest.manager
from tempest.openstack.common import log
import tempest.test
@@ -286,27 +287,58 @@
expected status to show. At any time, if the returned
status of the thing is ERROR, fail out.
"""
+ self._status_timeout(things, thing_id, expected_status=expected_status)
+
+ def delete_timeout(self, things, thing_id):
+ """
+ Given a thing, do a loop, sleeping
+ for a configurable amount of time, checking for the
+ deleted status to show. At any time, if the returned
+ status of the thing is ERROR, fail out.
+ """
+ self._status_timeout(things,
+ thing_id,
+ allow_notfound=True)
+
+ def _status_timeout(self,
+ things,
+ thing_id,
+ expected_status=None,
+ allow_notfound=False):
+
+ log_status = expected_status if expected_status else ''
+ if allow_notfound:
+ log_status += ' or NotFound' if log_status != '' else 'NotFound'
+
def check_status():
# python-novaclient has resources available to its client
# that all implement a get() method taking an identifier
# for the singular resource to retrieve.
- thing = things.get(thing_id)
+ try:
+ thing = things.get(thing_id)
+ except nova_exceptions.NotFound:
+ if allow_notfound:
+ return True
+ else:
+ raise
+
new_status = thing.status
if new_status == 'ERROR':
- self.fail("%s failed to get to expected status. "
- "In ERROR state."
- % thing)
- elif new_status == expected_status:
+ message = "%s failed to get to expected status. \
+ In ERROR state." % (thing)
+ raise exceptions.BuildErrorException(message)
+ elif new_status == expected_status and expected_status is not None:
return True # All good.
LOG.debug("Waiting for %s to get to %s status. "
"Currently in %s status",
- thing, expected_status, new_status)
+ thing, log_status, new_status)
if not tempest.test.call_until_true(
check_status,
self.config.compute.build_timeout,
self.config.compute.build_interval):
- self.fail("Timed out waiting for thing %s to become %s"
- % (thing_id, expected_status))
+ message = "Timed out waiting for thing %s \
+ to become %s" % (thing_id, log_status)
+ raise exceptions.TimeoutException(message)
def create_loginable_secgroup_rule(self, client=None, secgroup_id=None):
if client is None:
@@ -353,11 +385,8 @@
LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
name, image, flavor)
server = client.servers.create(name, image, flavor, **create_kwargs)
- try:
- self.assertEqual(server.name, name)
- self.set_resource(name, server)
- except AttributeError:
- self.fail("Server not successfully created.")
+ self.assertEqual(server.name, name)
+ self.set_resource(name, server)
self.status_timeout(client.servers, server.id, 'ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
@@ -373,7 +402,7 @@
client = self.volume_client
if name is None:
name = rand_name('scenario-volume-')
- LOG.debug("Creating a volume (size :%s, name: %s)", size, name)
+ LOG.debug("Creating a volume (size: %s, name: %s)", size, name)
volume = client.volumes.create(size=size, display_name=name,
snapshot_id=snapshot_id,
imageRef=imageRef)
@@ -383,6 +412,24 @@
LOG.debug("Created volume: %s", volume)
return volume
+ def create_server_snapshot(self, server, compute_client=None,
+ image_client=None, name=None):
+ if compute_client is None:
+ compute_client = self.compute_client
+ if image_client is None:
+ image_client = self.image_client
+ if name is None:
+ name = rand_name('scenario-snapshot-')
+ LOG.debug("Creating a snapshot image for server: %s", server.name)
+ image_id = compute_client.servers.create_image(server, name)
+ self.addCleanup(image_client.images.delete, image_id)
+ self.status_timeout(image_client.images, image_id, 'active')
+ snapshot_image = image_client.images.get(image_id)
+ self.assertEquals(name, snapshot_image.name)
+ LOG.debug("Created snapshot image %s for server %s",
+ snapshot_image.name, server.name)
+ return snapshot_image
+
def create_keypair(self, client=None, name=None):
if client is None:
client = self.compute_client
@@ -439,12 +486,9 @@
sg_name = rand_name(namestart)
sg_desc = sg_name + " description"
secgroup = client.security_groups.create(sg_name, sg_desc)
- try:
- self.assertEqual(secgroup.name, sg_name)
- self.assertEqual(secgroup.description, sg_desc)
- self.set_resource(sg_name, secgroup)
- except AttributeError:
- self.fail("SecurityGroup object not successfully created.")
+ self.assertEqual(secgroup.name, sg_name)
+ self.assertEqual(secgroup.description, sg_desc)
+ self.set_resource(sg_name, secgroup)
# Add rules to the security group
self.create_loginable_secgroup_rule(client, secgroup.id)
@@ -605,7 +649,3 @@
@classmethod
def _stack_rand_name(cls):
return rand_name(cls.__name__ + '-')
-
- def _create_keypair(self):
- kp_name = rand_name('keypair-smoke')
- return self.compute_client.keypairs.create(kp_name)
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
index cd959a8..88f2ebd 100644
--- a/tempest/scenario/orchestration/test_autoscaling.py
+++ b/tempest/scenario/orchestration/test_autoscaling.py
@@ -12,14 +12,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.openstack.common import log as logging
+import time
+
from tempest.scenario import manager
from tempest.test import attr
from tempest.test import call_until_true
-import time
-
-
-LOG = logging.getLogger(__name__)
+from tempest.test import services
class AutoScalingTest(manager.OrchestrationScenarioTest):
@@ -35,9 +33,8 @@
if self.config.orchestration.keypair_name:
self.keypair_name = self.config.orchestration.keypair_name
else:
- self.keypair = self._create_keypair()
+ self.keypair = self.create_keypair()
self.keypair_name = self.keypair.id
- self.set_resource('keypair', self.keypair)
def launch_stack(self):
self.parameters = {
@@ -63,6 +60,7 @@
self.set_resource('stack', self.stack)
@attr(type='slow')
+ @services('orchestration', 'compute')
def test_scale_up_then_down(self):
self.assign_keypair()
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index 9a45572..1081a3e 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -20,6 +20,7 @@
from lxml import html
from tempest.scenario import manager
+from tempest.test import services
class TestDashboardBasicOps(manager.OfficialClientTest):
@@ -66,6 +67,7 @@
response = self.opener.open(self.config.dashboard.dashboard_url)
self.assertIn('Overview', response.read())
+ @services('dashboard')
def test_basic_scenario(self):
self.check_login_page()
self.user_login()
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index 39b1e10..33b7adc 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -18,6 +18,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from tempest.scenario import manager
+from tempest.test import services
LOG = logging.getLogger(__name__)
@@ -96,6 +97,7 @@
self.addCleanup(delete, self.servers)
self._wait_for_server_status('ACTIVE')
+ @services('compute', 'image')
def test_large_ops_scenario(self):
if self.config.scenario.large_ops_number < 1:
return
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 5cddde2..ce4d1bd 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -18,6 +18,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from tempest.scenario import manager
+from tempest.test import services
LOG = logging.getLogger(__name__)
@@ -145,6 +146,7 @@
volume = self.volume_client.volumes.get(self.volume.id)
self.assertEqual('available', volume.status)
+ @services('compute', 'volume', 'image', 'network')
def test_minimum_basic_scenario(self):
self.glance_image_create()
self.nova_keypair_add()
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 70939f6..662e919 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -19,8 +19,12 @@
from tempest.api.network import common as net_common
from tempest.common.utils.data_utils import rand_name
from tempest import config
+from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest.test import attr
+from tempest.test import services
+
+LOG = logging.getLogger(__name__)
class TestNetworkBasicOps(manager.NetworkScenarioTest):
@@ -158,18 +162,15 @@
self.set_resource(name, router)
return router
- @attr(type='smoke')
- def test_001_create_keypairs(self):
+ def _create_keypairs(self):
self.keypairs[self.tenant_id] = self.create_keypair(
name=rand_name('keypair-smoke-'))
- @attr(type='smoke')
- def test_002_create_security_groups(self):
+ def _create_security_groups(self):
self.security_groups[self.tenant_id] = self._create_security_group(
self.compute_client)
- @attr(type='smoke')
- def test_003_create_networks(self):
+ def _create_networks(self):
network = self._create_network(self.tenant_id)
router = self._get_router(self.tenant_id)
subnet = self._create_subnet(network)
@@ -178,8 +179,7 @@
self.subnets.append(subnet)
self.routers.append(router)
- @attr(type='smoke')
- def test_004_check_networks(self):
+ def _check_networks(self):
# Checks that we see the newly created network/subnet/router via
# checking the result of list_[networks,routers,subnets]
seen_nets = self._list_networks()
@@ -202,10 +202,7 @@
self.assertIn(myrouter.name, seen_router_names)
self.assertIn(myrouter.id, seen_router_ids)
- @attr(type='smoke')
- def test_005_create_servers(self):
- if not (self.keypairs or self.security_groups or self.networks):
- raise self.skipTest('Necessary resources have not been defined')
+ def _create_servers(self):
for i, network in enumerate(self.networks):
tenant_id = network.tenant_id
name = rand_name('server-smoke-%d-' % i)
@@ -222,13 +219,11 @@
create_kwargs=create_kwargs)
self.servers.append(server)
- @attr(type='smoke')
- def test_006_check_tenant_network_connectivity(self):
+ def _check_tenant_network_connectivity(self):
if not self.config.network.tenant_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
- raise self.skipTest(msg)
- if not self.servers:
- raise self.skipTest("No VM's have been created")
+ LOG.info(msg)
+ return
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
ssh_login = self.config.compute.image_ssh_user
@@ -239,22 +234,14 @@
self._check_vm_connectivity(ip_address, ssh_login,
private_key)
- @attr(type='smoke')
- def test_007_assign_floating_ips(self):
+ def _assign_floating_ips(self):
public_network_id = self.config.network.public_network_id
- if not public_network_id:
- raise self.skipTest('Public network not configured')
- if not self.servers:
- raise self.skipTest("No VM's have been created")
for server in self.servers:
floating_ip = self._create_floating_ip(server, public_network_id)
self.floating_ips.setdefault(server, [])
self.floating_ips[server].append(floating_ip)
- @attr(type='smoke')
- def test_008_check_public_network_connectivity(self):
- if not self.floating_ips:
- raise self.skipTest('No floating ips have been allocated.')
+ def _check_public_network_connectivity(self):
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
ssh_login = self.config.compute.image_ssh_user
@@ -263,3 +250,15 @@
for floating_ip in floating_ips:
ip_address = floating_ip.floating_ip_address
self._check_vm_connectivity(ip_address, ssh_login, private_key)
+
+ @attr(type='smoke')
+ @services('compute', 'network')
+ def test_network_basic_ops(self):
+ self._create_keypairs()
+ self._create_security_groups()
+ self._create_networks()
+ self._check_networks()
+ self._create_servers()
+ self._check_tenant_network_connectivity()
+ self._assign_floating_ips()
+ self._check_public_network_connectivity()
diff --git a/tempest/scenario/test_network_quotas.py b/tempest/scenario/test_network_quotas.py
index 267aff6..3268066 100644
--- a/tempest/scenario/test_network_quotas.py
+++ b/tempest/scenario/test_network_quotas.py
@@ -16,7 +16,9 @@
# under the License.
from neutronclient.common import exceptions as exc
+
from tempest.scenario.manager import NetworkScenarioTest
+from tempest.test import services
MAX_REASONABLE_ITERATIONS = 51 # more than enough. Default for port is 50.
@@ -41,6 +43,7 @@
cls.subnets = []
cls.ports = []
+ @services('network')
def test_create_network_until_quota_hit(self):
hit_limit = False
for n in xrange(MAX_REASONABLE_ITERATIONS):
@@ -55,6 +58,7 @@
break
self.assertTrue(hit_limit, "Failed: Did not hit quota limit !")
+ @services('network')
def test_create_subnet_until_quota_hit(self):
if not self.networks:
self.networks.append(
@@ -73,6 +77,7 @@
break
self.assertTrue(hit_limit, "Failed: Did not hit quota limit !")
+ @services('network')
def test_create_ports_until_quota_hit(self):
if not self.networks:
self.networks.append(
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 8ee740e..cf72cd4 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -18,6 +18,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from tempest.scenario import manager
+from tempest.test import services
LOG = logging.getLogger(__name__)
@@ -45,6 +46,7 @@
msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
raise cls.skipException(msg)
+ @services('compute')
def test_resize_server_confirm(self):
# We create an instance for use in this test
i_name = rand_name('instance')
@@ -73,6 +75,7 @@
self.status_timeout(
self.compute_client.servers, instance_id, 'ACTIVE')
+ @services('compute')
def test_server_sequence_suspend_resume(self):
# We create an instance for use in this test
i_name = rand_name('instance')
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 2903687..04204eb 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -18,6 +18,7 @@
from tempest.common.utils.data_utils import rand_name
from tempest.openstack.common import log as logging
from tempest.scenario import manager
+from tempest.test import services
LOG = logging.getLogger(__name__)
@@ -44,12 +45,9 @@
sg_desc = sg_name + " description"
self.secgroup = self.compute_client.security_groups.create(sg_name,
sg_desc)
- try:
- self.assertEqual(self.secgroup.name, sg_name)
- self.assertEqual(self.secgroup.description, sg_desc)
- self.set_resource('secgroup', self.secgroup)
- except AttributeError:
- self.fail("SecurityGroup object not successfully created.")
+ self.assertEqual(self.secgroup.name, sg_name)
+ self.assertEqual(self.secgroup.description, sg_desc)
+ self.set_resource('secgroup', self.secgroup)
# Add rules to the security group
self.create_loginable_secgroup_rule(secgroup_id=self.secgroup.id)
@@ -103,6 +101,7 @@
instance.delete()
self.remove_resource('instance')
+ @services('compute', 'network')
def test_server_basicops(self):
self.add_keypair()
self.create_security_group()
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index c55e2a3..8c2cc76 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -15,12 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common.utils.data_utils import rand_name
-from tempest.openstack.common import log as logging
from tempest.scenario import manager
-
-
-LOG = logging.getLogger(__name__)
+from tempest.test import services
class TestSnapshotPattern(manager.OfficialClientTest):
@@ -34,14 +30,6 @@
"""
- def _wait_for_server_status(self, server, status):
- self.status_timeout(self.compute_client.servers,
- server.id,
- status)
-
- def _wait_for_image_status(self, image_id, status):
- self.status_timeout(self.image_client.images, image_id, status)
-
def _boot_image(self, image_id):
create_kwargs = {
'key_name': self.keypair.name
@@ -61,17 +49,6 @@
ssh_client.exec_command('date > /tmp/timestamp; sync')
self.timestamp = ssh_client.exec_command('cat /tmp/timestamp')
- def _create_image(self, server):
- snapshot_name = rand_name('scenario-snapshot-')
- create_image_client = self.compute_client.servers.create_image
- image_id = create_image_client(server, snapshot_name)
- self.addCleanup(self.image_client.images.delete, image_id)
- self._wait_for_server_status(server, 'ACTIVE')
- self._wait_for_image_status(image_id, 'active')
- snapshot_image = self.image_client.images.get(image_id)
- self.assertEquals(snapshot_name, snapshot_image.name)
- return image_id
-
def _check_timestamp(self, server_or_ip):
ssh_client = self._ssh_to_server(server_or_ip)
got_timestamp = ssh_client.exec_command('cat /tmp/timestamp')
@@ -85,6 +62,7 @@
def _set_floating_ip_to_server(self, server, floating_ip):
server.add_floating_ip(floating_ip)
+ @services('compute', 'network', 'image')
def test_snapshot_pattern(self):
# prepare for booting a instance
self._add_keypair()
@@ -100,10 +78,10 @@
self._write_timestamp(server)
# snapshot the instance
- snapshot_image_id = self._create_image(server)
+ snapshot_image = self.create_server_snapshot(server=server)
# boot a second instance from the snapshot
- server_from_snapshot = self._boot_image(snapshot_image_id)
+ server_from_snapshot = self._boot_image(snapshot_image.id)
# check the existence of the timestamp file in the second instance
if self.config.compute.use_floatingip_for_ssh:
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index c74b88d..c5a4aaf 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -50,14 +50,6 @@
14. Check the existence of a file which created at 6. in volume2
"""
- def _wait_for_server_status(self, server, status):
- self.status_timeout(self.compute_client.servers,
- server.id,
- status)
-
- def _wait_for_image_status(self, image_id, status):
- self.status_timeout(self.image_client.images, image_id, status)
-
def _wait_for_volume_snapshot_status(self, volume_snapshot, status):
self.status_timeout(self.volume_client.volume_snapshots,
volume_snapshot.id, status)
@@ -84,17 +76,6 @@
linux_client = self.get_remote_client(server_or_ip)
return linux_client.ssh_client
- def _create_image(self, server):
- snapshot_name = rand_name('scenario-snapshot-')
- create_image_client = self.compute_client.servers.create_image
- image_id = create_image_client(server, snapshot_name)
- self.addCleanup(self.image_client.images.delete, image_id)
- self._wait_for_server_status(server, 'ACTIVE')
- self._wait_for_image_status(image_id, 'active')
- snapshot_image = self.image_client.images.get(image_id)
- self.assertEquals(snapshot_name, snapshot_image.name)
- return image_id
-
def _create_volume_snapshot(self, volume):
snapshot_name = rand_name('scenario-snapshot-')
volume_snapshots = self.volume_client.volume_snapshots
@@ -162,7 +143,8 @@
got_timestamp = ssh_client.exec_command('sudo cat /mnt/timestamp')
self.assertEqual(self.timestamp, got_timestamp)
- @testtools.skip("Until Bug #1205344 is fixed")
+ @testtools.skip("Skipped until the Bug #1205344 is resolved.")
+ @tempest.test.services('compute', 'network', 'volume', 'image')
def test_stamp_pattern(self):
# prepare for booting a instance
self._add_keypair()
@@ -189,14 +171,14 @@
volume_snapshot = self._create_volume_snapshot(volume)
# snapshot the instance
- snapshot_image_id = self._create_image(server)
+ snapshot_image = self.create_server_snapshot(server=server)
# create second volume from the snapshot(volume2)
volume_from_snapshot = self._create_volume(
snapshot_id=volume_snapshot.id)
# boot second instance from the snapshot(instance2)
- server_from_snapshot = self._boot_image(snapshot_image_id)
+ server_from_snapshot = self._boot_image(snapshot_image.id)
# create and add floating IP to server_from_snapshot
if self.config.compute.use_floatingip_for_ssh:
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
new file mode 100644
index 0000000..3572166
--- /dev/null
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -0,0 +1,163 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common.utils.data_utils import rand_name
+from tempest.scenario import manager
+from tempest.test import services
+
+
+class TestVolumeBootPattern(manager.OfficialClientTest):
+
+ """
+ This test case attempts to reproduce the following steps:
+
+ * Create in Cinder some bootable volume importing a Glance image
+ * Boot an instance from the bootable volume
+ * Write content to the volume
+ * Delete an instance and Boot a new instance from the volume
+ * Check written content in the instance
+ * Create a volume snapshot while the instance is running
+ * Boot an additional instance from the new snapshot based volume
+ * Check written content in the instance booted from snapshot
+ """
+
+ def _create_volume_from_image(self):
+ img_uuid = self.config.compute.image_ref
+ vol_name = rand_name('volume-origin')
+ return self.create_volume(name=vol_name, imageRef=img_uuid)
+
+ def _boot_instance_from_volume(self, vol_id, keypair):
+ # NOTE(gfidente): the syntax for block_device_mapping is
+ # dev_name=id:type:size:delete_on_terminate
+ # where type needs to be "snap" if the server is booted
+ # from a snapshot, size instead can be safely left empty
+ bd_map = {
+ 'vda': vol_id + ':::0'
+ }
+ create_kwargs = {
+ 'block_device_mapping': bd_map,
+ 'key_name': keypair.name
+ }
+ return self.create_server(self.compute_client,
+ create_kwargs=create_kwargs)
+
+ def _create_snapshot_from_volume(self, vol_id):
+ volume_snapshots = self.volume_client.volume_snapshots
+ snap_name = rand_name('snapshot')
+ snap = volume_snapshots.create(volume_id=vol_id,
+ force=True,
+ display_name=snap_name)
+ self.set_resource(snap.id, snap)
+ self.status_timeout(volume_snapshots,
+ snap.id,
+ 'available')
+ return snap
+
+ def _create_volume_from_snapshot(self, snap_id):
+ vol_name = rand_name('volume')
+ return self.create_volume(name=vol_name, snapshot_id=snap_id)
+
+ def _stop_instances(self, instances):
+ # NOTE(gfidente): two loops so we do not wait for the status twice
+ for i in instances:
+ self.compute_client.servers.stop(i)
+ for i in instances:
+ self.status_timeout(self.compute_client.servers,
+ i.id,
+ 'SHUTOFF')
+
+ def _detach_volumes(self, volumes):
+ # NOTE(gfidente): two loops so we do not wait for the status twice
+ for v in volumes:
+ self.volume_client.volumes.detach(v)
+ for v in volumes:
+ self.status_timeout(self.volume_client.volumes,
+ v.id,
+ 'available')
+
+ def _ssh_to_server(self, server, keypair):
+ if self.config.compute.use_floatingip_for_ssh:
+ floating_ip = self.compute_client.floating_ips.create()
+ fip_name = rand_name('scenario-fip')
+ self.set_resource(fip_name, floating_ip)
+ server.add_floating_ip(floating_ip)
+ ip = floating_ip.ip
+ else:
+ network_name_for_ssh = self.config.compute.network_for_ssh
+ ip = server.networks[network_name_for_ssh][0]
+
+ client = self.get_remote_client(ip,
+ private_key=keypair.private_key)
+ return client.ssh_client
+
+ def _get_content(self, ssh_client):
+ return ssh_client.exec_command('cat /tmp/text')
+
+ def _write_text(self, ssh_client):
+ text = rand_name('text-')
+ ssh_client.exec_command('echo "%s" > /tmp/text; sync' % (text))
+
+ return self._get_content(ssh_client)
+
+ def _delete_server(self, server):
+ self.compute_client.servers.delete(server)
+ self.delete_timeout(self.compute_client.servers, server.id)
+
+ def _check_content_of_written_file(self, ssh_client, expected):
+ actual = self._get_content(ssh_client)
+ self.assertEqual(expected, actual)
+
+ @services('compute', 'volume', 'image')
+ def test_volume_boot_pattern(self):
+ keypair = self.create_keypair()
+ self.create_loginable_secgroup_rule()
+
+ # create an instance from volume
+ volume_origin = self._create_volume_from_image()
+ instance_1st = self._boot_instance_from_volume(volume_origin.id,
+ keypair)
+
+ # write content to volume on instance
+ ssh_client_for_instance_1st = self._ssh_to_server(instance_1st,
+ keypair)
+ text = self._write_text(ssh_client_for_instance_1st)
+
+ # delete instance
+ self._delete_server(instance_1st)
+
+ # create a 2nd instance from volume
+ instance_2nd = self._boot_instance_from_volume(volume_origin.id,
+ keypair)
+
+ # check the content of written file
+ ssh_client_for_instance_2nd = self._ssh_to_server(instance_2nd,
+ keypair)
+ self._check_content_of_written_file(ssh_client_for_instance_2nd, text)
+
+ # snapshot a volume
+ snapshot = self._create_snapshot_from_volume(volume_origin.id)
+
+ # create a 3rd instance from snapshot
+ volume = self._create_volume_from_snapshot(snapshot.id)
+ instance_from_snapshot = self._boot_instance_from_volume(volume.id,
+ keypair)
+
+ # check the content of written file
+ ssh_client = self._ssh_to_server(instance_from_snapshot, keypair)
+ self._check_content_of_written_file(ssh_client, text)
+
+ # NOTE(gfidente): ensure resources are in clean state for
+ # deletion operations to succeed
+ self._stop_instances([instance_2nd, instance_from_snapshot])
+ self._detach_volumes([volume_origin, volume])
diff --git a/tempest/scenario/test_volume_snapshot_pattern.py b/tempest/scenario/test_volume_snapshot_pattern.py
deleted file mode 100644
index 8fa177e..0000000
--- a/tempest/scenario/test_volume_snapshot_pattern.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.openstack.common import log as logging
-
-from tempest.common.utils.data_utils import rand_name
-from tempest.scenario import manager
-
-LOG = logging.getLogger(__name__)
-
-
-class TestVolumeSnapshotPattern(manager.OfficialClientTest):
-
- """
- This test case attempts to reproduce the following steps:
-
- * Create in Cinder some bootable volume importing a Glance image
- * Boot an instance from the bootable volume
- * Create a volume snapshot while the instance is running
- * Boot an additional instance from the new snapshot based volume
- """
-
- def _create_volume_from_image(self):
- img_uuid = self.config.compute.image_ref
- vol_name = rand_name('volume-origin')
- return self.create_volume(name=vol_name, imageRef=img_uuid)
-
- def _boot_instance_from_volume(self, vol_id):
- # NOTE(gfidente): the syntax for block_device_mapping is
- # dev_name=id:type:size:delete_on_terminate
- # where type needs to be "snap" if the server is booted
- # from a snapshot, size instead can be safely left empty
- bd_map = {
- 'vda': vol_id + ':::0'
- }
- create_kwargs = {
- 'block_device_mapping': bd_map
- }
- return self.create_server(self.compute_client,
- create_kwargs=create_kwargs)
-
- def _create_snapshot_from_volume(self, vol_id):
- volume_snapshots = self.volume_client.volume_snapshots
- snap_name = rand_name('snapshot')
- snap = volume_snapshots.create(volume_id=vol_id,
- force=True,
- display_name=snap_name)
- self.set_resource(snap.id, snap)
- self.status_timeout(volume_snapshots,
- snap.id,
- 'available')
- return snap
-
- def _create_volume_from_snapshot(self, snap_id):
- vol_name = rand_name('volume')
- return self.create_volume(name=vol_name, snapshot_id=snap_id)
-
- def _stop_instances(self, instances):
- # NOTE(gfidente): two loops so we do not wait for the status twice
- for i in instances:
- self.compute_client.servers.stop(i)
- for i in instances:
- self.status_timeout(self.compute_client.servers,
- i.id,
- 'SHUTOFF')
-
- def _detach_volumes(self, volumes):
- # NOTE(gfidente): two loops so we do not wait for the status twice
- for v in volumes:
- self.volume_client.volumes.detach(v)
- for v in volumes:
- self.status_timeout(self.volume_client.volumes,
- v.id,
- 'available')
-
- def test_volume_snapshot_pattern(self):
- volume_origin = self._create_volume_from_image()
- i_origin = self._boot_instance_from_volume(volume_origin.id)
- snapshot = self._create_snapshot_from_volume(volume_origin.id)
- volume = self._create_volume_from_snapshot(snapshot.id)
- i = self._boot_instance_from_volume(volume.id)
- # NOTE(gfidente): ensure resources are in clean state for
- # deletion operations to succeed
- self._stop_instances([i_origin, i])
- self._detach_volumes([volume_origin, volume])
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 6906610..c5827f6 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -157,6 +157,8 @@
start = int(time.time())
while(server_status != status):
+ if status == 'BUILD' and server_status != 'UNKNOWN':
+ return
time.sleep(self.build_interval)
resp, body = self.get_server(server_id)
server_status = body['status']
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 5c7a629..6f17611 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -46,9 +46,14 @@
# expanded xml namespace.
type_ns_prefix = ('{http://docs.openstack.org/compute/ext/extended_ips/'
'api/v1.1}type')
+ mac_ns_prefix = ('{http://docs.openstack.org/compute/ext/extended_ips_mac'
+ '/api/v1.1}mac_addr')
+
if type_ns_prefix in ip:
- ip['OS-EXT-IPS:type'] = ip[type_ns_prefix]
- ip.pop(type_ns_prefix)
+ ip['OS-EXT-IPS:type'] = ip.pop(type_ns_prefix)
+
+ if mac_ns_prefix in ip:
+ ip['OS-EXT-IPS-MAC:mac_addr'] = ip.pop(mac_ns_prefix)
return ip
@@ -101,11 +106,35 @@
json['addresses'] = json_addresses
else:
json = xml_to_json(xml_dom)
- diskConfig = '{http://docs.openstack.org/compute/ext/disk_config/api/v1.1'\
- '}diskConfig'
+ diskConfig = ('{http://docs.openstack.org'
+ '/compute/ext/disk_config/api/v1.1}diskConfig')
+ terminated_at = ('{http://docs.openstack.org/'
+ 'compute/ext/server_usage/api/v1.1}terminated_at')
+ launched_at = ('{http://docs.openstack.org'
+ '/compute/ext/server_usage/api/v1.1}launched_at')
+ power_state = ('{http://docs.openstack.org'
+ '/compute/ext/extended_status/api/v1.1}power_state')
+ availability_zone = ('{http://docs.openstack.org'
+ '/compute/ext/extended_availability_zone/api/v2}'
+ 'availability_zone')
+ vm_state = ('{http://docs.openstack.org'
+ '/compute/ext/extended_status/api/v1.1}vm_state')
+ task_state = ('{http://docs.openstack.org'
+ '/compute/ext/extended_status/api/v1.1}task_state')
if diskConfig in json:
- json['OS-DCF:diskConfig'] = json[diskConfig]
- del json[diskConfig]
+ json['OS-DCF:diskConfig'] = json.pop(diskConfig)
+ if terminated_at in json:
+ json['OS-SRV-USG:terminated_at'] = json.pop(terminated_at)
+ if launched_at in json:
+ json['OS-SRV-USG:launched_at'] = json.pop(launched_at)
+ if power_state in json:
+ json['OS-EXT-STS:power_state'] = json.pop(power_state)
+ if availability_zone in json:
+ json['OS-EXT-AZ:availability_zone'] = json.pop(availability_zone)
+ if vm_state in json:
+ json['OS-EXT-STS:vm_state'] = json.pop(vm_state)
+ if task_state in json:
+ json['OS-EXT-STS:task_state'] = json.pop(task_state)
return json
@@ -312,6 +341,8 @@
start = int(time.time())
while(server_status != status):
+ if status == 'BUILD' and server_status != 'UNKNOWN':
+ return
time.sleep(self.build_interval)
resp, body = self.get_server(server_id)
server_status = body['status']
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 588dc8f..3550878 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -24,7 +24,7 @@
V1 API has been removed from the code base.
Implements create, delete, update, list and show for the basic Neutron
- abstractions (networks, sub-networks, routers and ports):
+ abstractions (networks, sub-networks, routers, ports and floating IP):
Implements add/remove interface to router using subnet ID / port ID
@@ -242,7 +242,7 @@
update_body['admin_state_up'] = kwargs.get(
'admin_state_up', body['router']['admin_state_up'])
# Must uncomment/modify these lines once LP question#233187 is solved
- #update_body['external_gateway_info'] = kwargs.get(
+ # update_body['external_gateway_info'] = kwargs.get(
# 'external_gateway_info', body['router']['external_gateway_info'])
update_body = dict(router=update_body)
update_body = json.dumps(update_body)
@@ -285,3 +285,103 @@
resp, body = self.put(uri, update_body, self.headers)
body = json.loads(body)
return resp, body
+
+ def create_floating_ip(self, ext_network_id, **kwargs):
+ post_body = {
+ 'floatingip': kwargs}
+ post_body['floatingip']['floating_network_id'] = ext_network_id
+ body = json.dumps(post_body)
+ uri = '%s/floatingips' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def list_security_groups(self):
+ uri = '%s/security-groups' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def delete_security_group(self, secgroup_id):
+ uri = '%s/security-groups/%s' % (self.uri_prefix, secgroup_id)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def create_security_group(self, name, **kwargs):
+ post_body = {
+ 'security_group': {
+ 'name': name,
+ }
+ }
+ for key, value in kwargs.iteritems():
+ post_body['security_group'][str(key)] = value
+ body = json.dumps(post_body)
+ uri = '%s/security-groups' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def show_floating_ip(self, floating_ip_id):
+ uri = '%s/floatingips/%s' % (self.uri_prefix, floating_ip_id)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def show_security_group(self, secgroup_id):
+ uri = '%s/security-groups/%s' % (self.uri_prefix, secgroup_id)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def list_floating_ips(self):
+ uri = '%s/floatingips' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def list_security_group_rules(self):
+ uri = '%s/security-group-rules' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def delete_floating_ip(self, floating_ip_id):
+ uri = '%s/floatingips/%s' % (self.uri_prefix, floating_ip_id)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def update_floating_ip(self, floating_ip_id, **kwargs):
+ post_body = {
+ 'floatingip': kwargs}
+ body = json.dumps(post_body)
+ uri = '%s/floatingips/%s' % (self.uri_prefix, floating_ip_id)
+ resp, body = self.put(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def create_security_group_rule(self, secgroup_id,
+ direction='ingress', **kwargs):
+ post_body = {
+ 'security_group_rule': {
+ 'direction': direction,
+ 'security_group_id': secgroup_id
+ }
+ }
+ for key, value in kwargs.iteritems():
+ post_body['security_group_rule'][str(key)] = value
+ body = json.dumps(post_body)
+ uri = '%s/security-group-rules' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def delete_security_group_rule(self, rule_id):
+ uri = '%s/security-group-rules/%s' % (self.uri_prefix, rule_id)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def show_security_group_rule(self, rule_id):
+ uri = '%s/security-group-rules/%s' % (self.uri_prefix, rule_id)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
index d4fb656..0aef92f 100755
--- a/tempest/services/network/xml/network_client.py
+++ b/tempest/services/network/xml/network_client.py
@@ -161,6 +161,64 @@
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
+ def create_security_group(self, name):
+ uri = '%s/security-groups' % (self.uri_prefix)
+ post_body = Element("security_group")
+ p2 = Element("name", name)
+ post_body.append(p2)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def list_security_groups(self):
+ url = '%s/security-groups' % (self.uri_prefix)
+ resp, body = self.get(url, self.headers)
+ secgroups = self._parse_array(etree.fromstring(body))
+ secgroups = {"security_groups": secgroups}
+ return resp, secgroups
+
+ def delete_security_group(self, secgroup_id):
+ uri = '%s/security-groups/%s' % (self.uri_prefix, str(secgroup_id))
+ return self.delete(uri, self.headers)
+
+ def show_security_group(self, secgroup_id):
+ uri = '%s/security-groups/%s' % (self.uri_prefix, str(secgroup_id))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def list_security_group_rules(self):
+ url = '%s/security-group-rules' % (self.uri_prefix)
+ resp, body = self.get(url, self.headers)
+ rules = self._parse_array(etree.fromstring(body))
+ rules = {"security_group_rules": rules}
+ return resp, rules
+
+ def create_security_group_rule(self, secgroup_id,
+ direction='ingress', **kwargs):
+ uri = '%s/security-group-rules' % (self.uri_prefix)
+ rule = Element("security_group_rule")
+ p1 = Element('security_group_id', secgroup_id)
+ p2 = Element('direction', direction)
+ rule.append(p1)
+ rule.append(p2)
+ for key, val in kwargs.items():
+ key = Element(key, val)
+ rule.append(key)
+ resp, body = self.post(uri, str(Document(rule)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def delete_security_group_rule(self, rule_id):
+ uri = '%s/security-group-rules/%s' % (self.uri_prefix, str(rule_id))
+ return self.delete(uri, self.headers)
+
+ def show_security_group_rule(self, rule_id):
+ uri = '%s/security-group-rules/%s' % (self.uri_prefix, str(rule_id))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
body = ET.fromstring(xml_returned_body)
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 22f3f26..e896e0d 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -42,7 +42,7 @@
resp, body = self.get(uri)
body = json.loads(body)
- return resp, body
+ return resp, body['stacks']
def create_stack(self, name, disable_rollback=True, parameters={},
timeout_mins=60, template=None, template_url=None):
@@ -135,7 +135,7 @@
# been created yet
pass
else:
- resource_name = body['logical_resource_id']
+ resource_name = body['resource_name']
resource_status = body['resource_status']
if resource_status == status:
return
@@ -176,3 +176,64 @@
(stack_name, status, self.build_timeout))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
+
+ def show_resource_metadata(self, stack_identifier, resource_name):
+ """Returns the resource's metadata."""
+ url = ('stacks/{stack_identifier}/resources/{resource_name}'
+ '/metadata'.format(**locals()))
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['metadata']
+
+ def list_events(self, stack_identifier):
+ """Returns list of all events for a stack."""
+ url = 'stacks/{stack_identifier}/events'.format(**locals())
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['events']
+
+ def list_resource_events(self, stack_identifier, resource_name):
+ """Returns list of all events for a resource from stack."""
+ url = ('stacks/{stack_identifier}/resources/{resource_name}'
+ '/events'.format(**locals()))
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['events']
+
+ def show_event(self, stack_identifier, resource_name, event_id):
+ """Returns the details of a single stack's event."""
+ url = ('stacks/{stack_identifier}/resources/{resource_name}/events'
+ '/{event_id}'.format(**locals()))
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['event']
+
+ def show_template(self, stack_identifier):
+ """Returns the template for the stack."""
+ url = ('stacks/{stack_identifier}/template'.format(**locals()))
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body
+
+ def _validate_template(self, post_body):
+ """Returns the validation request result."""
+ post_body = json.dumps(post_body)
+ resp, body = self.post('validate', post_body, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def validate_template(self, template, parameters={}):
+ """Returns the validation result for a template with parameters."""
+ post_body = {
+ 'template': template,
+ 'parameters': parameters,
+ }
+ return self._validate_template(post_body)
+
+ def validate_template_url(self, template_url, parameters={}):
+ """Returns the validation result for a template with parameters."""
+ post_body = {
+ 'template_url': template_url,
+ 'parameters': parameters,
+ }
+ return self._validate_template(post_body)
diff --git a/tempest/services/volume/json/volumes_client.py b/tempest/services/volume/json/volumes_client.py
index c22b398..2ae73b1 100644
--- a/tempest/services/volume/json/volumes_client.py
+++ b/tempest/services/volume/json/volumes_client.py
@@ -36,6 +36,10 @@
self.build_interval = self.config.volume.build_interval
self.build_timeout = self.config.volume.build_timeout
+ def get_attachment_from_volume(self, volume):
+ """Return the element 'attachment' from input volumes."""
+ return volume['attachments'][0]
+
def list_volumes(self, params=None):
"""List all the volumes created."""
url = 'volumes'
diff --git a/tempest/services/volume/xml/volumes_client.py b/tempest/services/volume/xml/volumes_client.py
index eaa3ae0..936e036 100644
--- a/tempest/services/volume/xml/volumes_client.py
+++ b/tempest/services/volume/xml/volumes_client.py
@@ -56,6 +56,10 @@
vol[tag] = xml_to_json(child)
return vol
+ def get_attachment_from_volume(self, volume):
+ """Return the element 'attachment' from input volumes."""
+ return volume['attachments']['attachment']
+
def list_volumes(self, params=None):
"""List all the volumes created."""
url = 'volumes'
@@ -157,3 +161,33 @@
except exceptions.NotFound:
return True
return False
+
+ def attach_volume(self, volume_id, instance_uuid, mountpoint):
+ """Attaches a volume to a given instance on a given mountpoint."""
+ post_body = Element("os-attach",
+ instance_uuid=instance_uuid,
+ mountpoint=mountpoint
+ )
+ url = 'volumes/%s/action' % str(volume_id)
+ resp, body = self.post(url, str(Document(post_body)), self.headers)
+ if body:
+ body = xml_to_json(etree.fromstring(body))
+ return resp, body
+
+ def detach_volume(self, volume_id):
+ """Detaches a volume from an instance."""
+ post_body = Element("os-detach")
+ url = 'volumes/%s/action' % str(volume_id)
+ resp, body = self.post(url, str(Document(post_body)), self.headers)
+ if body:
+ body = xml_to_json(etree.fromstring(body))
+ return resp, body
+
+ def upload_volume(self, volume_id, image_name):
+ """Uploads a volume in Glance."""
+ post_body = Element("os-volume_upload_image",
+ image_name=image_name)
+ url = 'volumes/%s/action' % str(volume_id)
+ resp, body = self.post(url, str(Document(post_body)), self.headers)
+ volume = xml_to_json(etree.fromstring(body))
+ return resp, volume
diff --git a/tempest/stress/actions/ssh_floating.py b/tempest/stress/actions/ssh_floating.py
new file mode 100644
index 0000000..36ef023
--- /dev/null
+++ b/tempest/stress/actions/ssh_floating.py
@@ -0,0 +1,189 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import socket
+import subprocess
+
+from tempest.common.utils.data_utils import rand_name
+import tempest.stress.stressaction as stressaction
+import tempest.test
+
+
+class FloatingStress(stressaction.StressAction):
+
+ # from the scenario manager
+ def ping_ip_address(self, ip_address):
+ cmd = ['ping', '-c1', '-w1', ip_address]
+
+ proc = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc.wait()
+ success = proc.returncode == 0
+ self.logger.info("%s(%s): %s", self.server_id, self.floating['ip'],
+ "pong!" if success else "no pong :(")
+ return success
+
+ def tcp_connect_scan(self, addr, port):
+ # like tcp
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ s.connect((addr, port))
+ except socket.error as exc:
+ self.logger.info("%s(%s): %s", self.server_id, self.floating['ip'],
+ str(exc))
+ return False
+ self.logger.info("%s(%s): Connected :)", self.server_id,
+ self.floating['ip'])
+ s.close()
+ return True
+
+ def check_port_ssh(self):
+ def func():
+ return self.tcp_connect_scan(self.floating['ip'], 22)
+ if not tempest.test.call_until_true(func, self.check_timeout,
+ self.check_interval):
+ raise RuntimeError("Cannot connect to the ssh port.")
+
+ def check_icmp_echo(self):
+ def func():
+ return self.ping_ip_address(self.floating['ip'])
+ if not tempest.test.call_until_true(func, self.check_timeout,
+ self.check_interval):
+ raise RuntimeError("Cannot ping the machine.")
+
+ def _create_vm(self):
+ self.name = name = rand_name("instance")
+ servers_client = self.manager.servers_client
+ self.logger.info("creating %s" % name)
+ vm_args = self.vm_extra_args.copy()
+ vm_args['security_groups'] = [{'name': self.sec_grp}]
+ resp, server = servers_client.create_server(name, self.image,
+ self.flavor,
+ **vm_args)
+ self.server_id = server['id']
+ assert(resp.status == 202)
+ if self.wait_after_vm_create:
+ self.manager.servers_client.wait_for_server_status(self.server_id,
+ 'ACTIVE')
+
+ def _destroy_vm(self):
+ self.logger.info("deleting %s" % self.server_id)
+ resp, _ = self.manager.servers_client.delete_server(self.server_id)
+ assert(resp.status == 204) # It cannot be 204 if I had to wait..
+ self.manager.servers_client.wait_for_server_termination(self.server_id)
+ self.logger.info("deleted %s" % self.server_id)
+
+ def _create_sec_group(self):
+ sec_grp_cli = self.manager.security_groups_client
+ s_name = rand_name('sec_grp-')
+ s_description = rand_name('desc-')
+ _, _sec_grp = sec_grp_cli.create_security_group(s_name,
+ s_description)
+ self.sec_grp = _sec_grp['id']
+ create_rule = sec_grp_cli.create_security_group_rule
+ create_rule(self.sec_grp, 'tcp', 22, 22)
+ create_rule(self.sec_grp, 'icmp', -1, -1)
+
+ def _destroy_sec_grp(self):
+ sec_grp_cli = self.manager.security_groups_client
+ sec_grp_cli.delete_security_group(self.sec_grp)
+
+ def _create_floating_ip(self):
+ floating_cli = self.manager.floating_ips_client
+ _, self.floating = floating_cli.create_floating_ip(self.floating_pool)
+
+ def _destroy_floating_ip(self):
+ cli = self.manager.floating_ips_client
+ cli.delete_floating_ip(self.floating['id'])
+ cli.wait_for_resource_deletion(self.floating['id'])
+ self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
+
+ def setUp(self, **kwargs):
+ self.image = self.manager.config.compute.image_ref
+ self.flavor = self.manager.config.compute.flavor_ref
+ self.vm_extra_args = kwargs.get('vm_extra_args', {})
+ self.wait_after_vm_create = kwargs.get('wait_after_vm_create',
+ True)
+ self.new_vm = kwargs.get('new_vm', False)
+ self.new_sec_grp = kwargs.get('new_sec_group', False)
+ self.new_floating = kwargs.get('new_floating', False)
+ self.reboot = kwargs.get('reboot', False)
+ self.floating_pool = kwargs.get('floating_pool', None)
+ self.verify = kwargs.get('verify', ('check_port_ssh',
+ 'check_icmp_echo'))
+ self.check_timeout = kwargs.get('check_timeout', 120)
+ self.check_interval = kwargs.get('check_interval', 1)
+ self.wait_for_disassociate = kwargs.get('wait_for_disassociate',
+ True)
+
+ # allocate floating
+ if not self.new_floating:
+ self._create_floating_ip()
+ # add security group
+ if not self.new_sec_grp:
+ self._create_sec_group()
+ # create vm
+ if not self.new_vm:
+ self._create_vm()
+
+ def wait_disassociate(self):
+ cli = self.manager.floating_ips_client
+
+ def func():
+ _, floating = cli.get_floating_ip_details(self.floating['id'])
+ return floating['instance_id'] is None
+
+ if not tempest.test.call_until_true(func, self.check_timeout,
+ self.check_interval):
+ raise RuntimeError("IP disassociate timeout!")
+
+ def run_core(self):
+ cli = self.manager.floating_ips_client
+ cli.associate_floating_ip_to_server(self.floating['ip'],
+ self.server_id)
+ for method in self.verify:
+ m = getattr(self, method)
+ m()
+ cli.disassociate_floating_ip_from_server(self.floating['ip'],
+ self.server_id)
+ if self.wait_for_disassociate:
+ self.wait_disassociate()
+
+ def run(self):
+ if self.new_sec_grp:
+ self._create_sec_group()
+ if self.new_floating:
+ self._create_floating_ip()
+ if self.new_vm:
+ self._create_vm()
+ if self.reboot:
+ self.manager.servers_client.reboot(self.server_id, 'HARD')
+
+ self.run_core()
+
+ if self.new_vm:
+ self._destroy_vm()
+ if self.new_floating:
+ self._destroy_floating_ip()
+ if self.new_sec_grp:
+ self._destroy_sec_grp()
+
+ def tearDown(self):
+ if not self.new_vm:
+ self._destroy_vm()
+ if not self.new_floating:
+ self._destroy_floating_ip()
+ if not self.new_sec_grp:
+ self._destroy_sec_grp()
diff --git a/tempest/stress/actions/unit_test.py b/tempest/stress/actions/unit_test.py
index 95cc1bc..5ab5573 100644
--- a/tempest/stress/actions/unit_test.py
+++ b/tempest/stress/actions/unit_test.py
@@ -13,6 +13,7 @@
# under the License.
from tempest.openstack.common import importutils
+from tempest.openstack.common import log as logging
import tempest.stress.stressaction as stressaction
@@ -46,6 +47,7 @@
method = kwargs['test_method'].split('.')
self.test_method = method.pop()
self.klass = importutils.import_class('.'.join(method))
+ self.logger = logging.getLogger('.'.join(method))
# valid options are 'process', 'application' , 'action'
self.class_setup_per = kwargs.get('class_setup_per',
SetUpClassRunTime.process)
@@ -55,6 +57,12 @@
self.klass.setUpClass()
self.setupclass_called = False
+ @property
+ def action(self):
+ if self.test_method:
+ return self.test_method
+ return super(UnitTest, self).action
+
def run_core(self):
res = self.klass(self.test_method).run()
if res.errors:
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index efc57a9..e518d28 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -102,6 +102,8 @@
"""
logfiles = admin_manager.config.stress.target_logfiles
log_check_interval = int(admin_manager.config.stress.log_check_interval)
+ default_thread_num = int(admin_manager.config.stress.
+ default_thread_number_per_action)
if logfiles:
controller = admin_manager.config.stress.target_controller
computes = _get_compute_nodes(controller)
@@ -112,7 +114,7 @@
manager = admin_manager
else:
manager = clients.Manager()
- for p_number in xrange(test.get('threads', 1)):
+ for p_number in xrange(test.get('threads', default_thread_num)):
if test.get('use_isolated_tenants', False):
username = rand_name("stress_user")
tenant_name = rand_name("stress_tenant")
@@ -146,7 +148,7 @@
process = {'process': p,
'p_number': p_number,
- 'action': test['action'],
+ 'action': test_run.action,
'statistic': shared_statistic}
processes.append(process)
diff --git a/tempest/stress/etc/ssh_floating.json b/tempest/stress/etc/ssh_floating.json
new file mode 100644
index 0000000..0cb6776
--- /dev/null
+++ b/tempest/stress/etc/ssh_floating.json
@@ -0,0 +1,16 @@
+[{"action": "tempest.stress.actions.ssh_floating.FloatingStress",
+ "threads": 8,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {"vm_extra_args": {},
+ "new_vm": true,
+ "new_sec_group": true,
+ "new_floating": true,
+ "verify": ["check_icmp_echo", "check_port_ssh"],
+ "check_timeout": 120,
+ "check_inerval": 1,
+ "wait_after_vm_create": true,
+ "wait_for_disassociate": true,
+ "reboot": false}
+}
+]
diff --git a/tempest/stress/run_stress.py b/tempest/stress/run_stress.py
index 32e3ae0..886d94b 100755
--- a/tempest/stress/run_stress.py
+++ b/tempest/stress/run_stress.py
@@ -17,15 +17,60 @@
# limitations under the License.
import argparse
+import inspect
import json
import sys
+from testtools.testsuite import iterate_tests
+from unittest import loader
+
+
+def discover_stress_tests(path="./", filter_attr=None, call_inherited=False):
+ """Discovers all tempest tests and create action out of them
+ """
+ tests = []
+ testloader = loader.TestLoader()
+ list = testloader.discover(path)
+ for func in (iterate_tests(list)):
+ try:
+ method_name = getattr(func, '_testMethodName')
+ full_name = "%s.%s.%s" % (func.__module__,
+ func.__class__.__name__,
+ method_name)
+ test_func = getattr(func, method_name)
+ # NOTE(mkoderer): this contains a list of all type attributes
+ attrs = getattr(test_func, "__testtools_attrs")
+ except Exception:
+ next
+ if 'stress' in attrs:
+ if filter_attr is not None and not filter_attr in attrs:
+ continue
+ class_setup_per = getattr(test_func, "st_class_setup_per")
+
+ action = {'action':
+ "tempest.stress.actions.unit_test.UnitTest",
+ 'kwargs': {"test_method": full_name,
+ "class_setup_per": class_setup_per
+ }
+ }
+ if (not call_inherited and
+ getattr(test_func, "st_allow_inheritance") is not True):
+ class_structure = inspect.getmro(test_func.im_class)
+ if test_func.__name__ not in class_structure[0].__dict__:
+ continue
+ tests.append(action)
+ return tests
def main(ns):
# NOTE(mkoderer): moved import to make "-h" possible without OpenStack
from tempest.stress import driver
result = 0
- tests = json.load(open(ns.tests, 'r'))
+ if not ns.all:
+ tests = json.load(open(ns.tests, 'r'))
+ else:
+ tests = discover_stress_tests(filter_attr=ns.type,
+ call_inherited=ns.call_inherited)
+
if ns.serial:
for test in tests:
step_result = driver.stress_openstack([test],
@@ -40,16 +85,25 @@
return result
-parser = argparse.ArgumentParser(description='Run stress tests. ')
+parser = argparse.ArgumentParser(description='Run stress tests')
parser.add_argument('-d', '--duration', default=300, type=int,
- help="Duration of test in secs.")
+ help="Duration of test in secs")
parser.add_argument('-s', '--serial', action='store_true',
- help="Trigger running tests serially.")
+ help="Trigger running tests serially")
parser.add_argument('-S', '--stop', action='store_true',
- default=False, help="Stop on first error.")
+ default=False, help="Stop on first error")
parser.add_argument('-n', '--number', type=int,
- help="How often an action is executed for each process.")
-parser.add_argument('tests', help="Name of the file with test description.")
+ help="How often an action is executed for each process")
+group = parser.add_mutually_exclusive_group(required=True)
+group.add_argument('-a', '--all', action='store_true',
+ help="Execute all stress tests")
+parser.add_argument('-T', '--type',
+ help="Filters tests of a certain type (e.g. gate)")
+parser.add_argument('-i', '--call-inherited', action='store_true',
+ default=False,
+ help="Call also inherited function with stress attribute")
+group.add_argument('-t', "--tests", nargs='?',
+ help="Name of the file with test description")
if __name__ == "__main__":
sys.exit(main(parser.parse_args()))
diff --git a/tempest/stress/stressaction.py b/tempest/stress/stressaction.py
index 3719841..28251af 100644
--- a/tempest/stress/stressaction.py
+++ b/tempest/stress/stressaction.py
@@ -33,6 +33,13 @@
self.tearDown()
sys.exit(0)
+ @property
+ def action(self):
+ """This methods returns the action. Overload this if you
+ create a stress test wrapper.
+ """
+ return self.__class__.__name__
+
def setUp(self, **kwargs):
"""This method is called before the run method
to help the test initiatlize any structures.
@@ -60,6 +67,8 @@
while self.max_runs is None or (shared_statistic['runs'] <
self.max_runs):
+ self.logger.debug("Trigger new run (run %d)" %
+ shared_statistic['runs'])
try:
self.run()
except Exception:
diff --git a/tempest/test.py b/tempest/test.py
index 68cedf0..24c4489 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -26,6 +26,7 @@
from tempest import clients
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -57,6 +58,51 @@
return decorator
+def services(*args, **kwargs):
+ """A decorator used to set an attr for each service used in a test case
+
+ This decorator applies a testtools attr for each service that gets
+ exercised by a test case.
+ """
+ valid_service_list = ['compute', 'image', 'volume', 'orchestration',
+ 'network', 'identity', 'object', 'dashboard']
+
+ def decorator(f):
+ for service in args:
+ if service not in valid_service_list:
+ raise exceptions.InvalidServiceTag('%s is not a valid service'
+ % service)
+ attr(type=list(args))(f)
+ return f
+ return decorator
+
+
+def stresstest(*args, **kwargs):
+ """Add stress test decorator
+
+ For all functions with this decorator a attr stress will be
+ set automatically.
+
+ @param class_setup_per: allowed values are application, process, action
+ ``application``: once in the stress job lifetime
+ ``process``: once in the worker process lifetime
+ ``action``: on each action
+ @param allow_inheritance: allows inheritance of this attribute
+ """
+ def decorator(f):
+ if 'class_setup_per' in kwargs:
+ setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
+ else:
+ setattr(f, "st_class_setup_per", 'process')
+ if 'allow_inheritance' in kwargs:
+ setattr(f, "st_allow_inheritance", kwargs['allow_inheritance'])
+ else:
+ setattr(f, "st_allow_inheritance", False)
+ attr(type='stress')(f)
+ return f
+ return decorator
+
+
# there is a mis-match between nose and testtools for older pythons.
# testtools will set skipException to be either
# unittest.case.SkipTest, unittest2.case.SkipTest or an internal skip
@@ -120,7 +166,7 @@
@classmethod
def tearDownClass(cls):
- at_exit_set.remove(cls)
+ at_exit_set.discard(cls)
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
super(BaseTestCase, cls).tearDownClass()
@@ -147,6 +193,11 @@
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
+ if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
+ os.environ.get('OS_LOG_CAPTURE') != '0'):
+ log_format = '%(asctime)-15s %(message)s'
+ self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
+ format=log_format))
@classmethod
def _get_identity_admin_client(cls):
diff --git a/tempest/tests/README.rst b/tempest/tests/README.rst
new file mode 100644
index 0000000..4098686
--- /dev/null
+++ b/tempest/tests/README.rst
@@ -0,0 +1,25 @@
+Tempest Guide to Unit tests
+===========================
+
+What are these tests?
+---------------------
+
+Unit tests are the self checks for Tempest. They provide functional
+verification and regression checking for the internal components of tempest.
+They should be used to just verify that the individual pieces of tempest are
+working as expected. They should not require an external service to be running
+and should be able to run solely from the tempest tree.
+
+Why are these tests in tempest?
+-------------------------------
+These tests exist to make sure that the mechanisms that we use inside of
+tempest to are valid and remain functional. They are only here for self
+validation of tempest.
+
+
+Scope of these tests
+--------------------
+Unit tests should not require an external service to be running or any extra
+configuration to run. Any state that is required for a test should either be
+mocked out or created in a temporary test directory. (see test_wrappers.py for
+an example of using a temporary test directory)
diff --git a/tempest/whitebox/__init__.py b/tempest/tests/__init__.py
similarity index 100%
rename from tempest/whitebox/__init__.py
rename to tempest/tests/__init__.py
diff --git a/tempest/whitebox/__init__.py b/tempest/tests/files/__init__.py
similarity index 100%
copy from tempest/whitebox/__init__.py
copy to tempest/tests/files/__init__.py
diff --git a/tempest/tests/files/failing-tests b/tempest/tests/files/failing-tests
new file mode 100644
index 0000000..0ec5421
--- /dev/null
+++ b/tempest/tests/files/failing-tests
@@ -0,0 +1,25 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+class FakeTestClass(testtools.TestCase):
+ def test_pass(self):
+ self.assertTrue(False)
+
+ def test_pass_list(self):
+ test_list = ['test', 'a', 'b']
+ self.assertIn('fail', test_list)
diff --git a/tempest/tests/files/passing-tests b/tempest/tests/files/passing-tests
new file mode 100644
index 0000000..2f5b7c9
--- /dev/null
+++ b/tempest/tests/files/passing-tests
@@ -0,0 +1,25 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+class FakeTestClass(testtools.TestCase):
+ def test_pass(self):
+ self.assertTrue(True)
+
+ def test_pass_list(self):
+ test_list = ['test', 'a', 'b']
+ self.assertIn('test', test_list)
diff --git a/tempest/tests/files/setup.cfg b/tempest/tests/files/setup.cfg
new file mode 100644
index 0000000..8639baa
--- /dev/null
+++ b/tempest/tests/files/setup.cfg
@@ -0,0 +1,20 @@
+[metadata]
+name = tempest_unit_tests
+version = 1
+summary = Fake Project for testing wrapper scripts
+author = OpenStack QA
+author-email = openstack-qa@lists.openstack.org
+home-page = http://www.openstack.org/
+classifier =
+ Intended Audience :: Information Technology
+ Intended Audience :: System Administrators
+ Intended Audience :: Developers
+ License :: OSI Approved :: Apache Software License
+ Operating System :: POSIX :: Linux
+ Programming Language :: Python
+ Programming Language :: Python :: 2
+ Programming Language :: Python :: 2.7
+
+[global]
+setup-hooks =
+ pbr.hooks.setup_hook
diff --git a/tempest/tests/files/testr-conf b/tempest/tests/files/testr-conf
new file mode 100644
index 0000000..d5ad083
--- /dev/null
+++ b/tempest/tests/files/testr-conf
@@ -0,0 +1,5 @@
+[DEFAULT]
+test_command=${PYTHON:-python} -m subunit.run discover -t ./ ./tests $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
+group_regex=([^\.]*\.)*
diff --git a/tempest/tests/test_wrappers.py b/tempest/tests/test_wrappers.py
new file mode 100644
index 0000000..aeea98d
--- /dev/null
+++ b/tempest/tests/test_wrappers.py
@@ -0,0 +1,103 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import shutil
+import subprocess
+import tempfile
+import testtools
+
+from tempest.test import attr
+
+DEVNULL = open(os.devnull, 'wb')
+
+
+class TestWrappers(testtools.TestCase):
+ def setUp(self):
+ super(TestWrappers, self).setUp()
+ # Setup test dirs
+ self.directory = tempfile.mkdtemp(prefix='tempest-unit')
+ self.test_dir = os.path.join(self.directory, 'tests')
+ os.mkdir(self.test_dir)
+ # Setup Test files
+ self.testr_conf_file = os.path.join(self.directory, '.testr.conf')
+ self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg')
+ self.passing_file = os.path.join(self.test_dir, 'test_passing.py')
+ self.failing_file = os.path.join(self.test_dir, 'test_failing.py')
+ self.init_file = os.path.join(self.test_dir, '__init__.py')
+ self.setup_py = os.path.join(self.directory, 'setup.py')
+ shutil.copy('tempest/tests/files/testr-conf', self.testr_conf_file)
+ shutil.copy('tempest/tests/files/passing-tests', self.passing_file)
+ shutil.copy('tempest/tests/files/failing-tests', self.failing_file)
+ shutil.copy('setup.py', self.setup_py)
+ shutil.copy('tempest/tests/files/setup.cfg', self.setup_cfg_file)
+ shutil.copy('tempest/tests/files/__init__.py', self.init_file)
+
+ @attr(type='smoke')
+ def test_pretty_tox(self):
+ # Copy wrapper script and requirements:
+ pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
+ shutil.copy('tools/pretty_tox.sh', pretty_tox)
+ # Change directory, run wrapper and check result
+ self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+ os.chdir(self.directory)
+ # Git init is required for the pbr testr command. pbr requires a git
+ # version or an sdist to work. so make the test directory a git repo
+ # too.
+ subprocess.call(['git', 'init'])
+ exit_code = subprocess.call('sh pretty_tox.sh tests.passing',
+ shell=True, stdout=DEVNULL, stderr=DEVNULL)
+ self.assertEquals(exit_code, 0)
+
+ @attr(type='smoke')
+ def test_pretty_tox_fails(self):
+ # Copy wrapper script and requirements:
+ pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
+ shutil.copy('tools/pretty_tox.sh', pretty_tox)
+ # Change directory, run wrapper and check result
+ self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+ os.chdir(self.directory)
+ # Git init is required for the pbr testr command. pbr requires a git
+ # version or an sdist to work. so make the test directory a git repo
+ # too.
+ subprocess.call(['git', 'init'])
+ exit_code = subprocess.call('sh pretty_tox.sh', shell=True,
+ stdout=DEVNULL, stderr=DEVNULL)
+ self.assertEquals(exit_code, 1)
+
+ @attr(type='smoke')
+ def test_pretty_tox_serial(self):
+ # Copy wrapper script and requirements:
+ pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
+ shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
+ # Change directory, run wrapper and check result
+ self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+ os.chdir(self.directory)
+ exit_code = subprocess.call('sh pretty_tox_serial.sh tests.passing',
+ shell=True, stdout=DEVNULL, stderr=DEVNULL)
+ self.assertEquals(exit_code, 0)
+
+ @attr(type='smoke')
+ def test_pretty_tox_serial_fails(self):
+ # Copy wrapper script and requirements:
+ pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
+ shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
+ # Change directory, run wrapper and check result
+ self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+ os.chdir(self.directory)
+ exit_code = subprocess.call('sh pretty_tox_serial.sh', shell=True,
+ stdout=DEVNULL, stderr=DEVNULL)
+ self.assertEquals(exit_code, 1)
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index 5007503..a848fc9 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -233,7 +233,7 @@
# NOTE(afazekas): doctored test case,
# with normal validation it would fail
- @testtools.skip("Until Bug #1182679 is fixed")
+ @testtools.skip("Skipped until the Bug #1182679 is resolved.")
@attr(type='smoke')
def test_integration_1(self):
# EC2 1. integration test (not strict)
diff --git a/tempest/whitebox/README.rst b/tempest/whitebox/README.rst
deleted file mode 100644
index 0e45421..0000000
--- a/tempest/whitebox/README.rst
+++ /dev/null
@@ -1,46 +0,0 @@
-Tempest Guide to Whitebox tests
-===============================
-
-
-What are these tests?
----------------------
-
-When you hit the OpenStack API, this causes internal state changes in
-the system. This might be database transitions, vm modifications,
-other deep state changes which aren't really accessible from the
-OpenStack API. These side effects are sometimes important to
-validate.
-
-White box testing is an approach there. In white box testing you are
-given database access to the environment, and can verify internal
-record changes after an API call.
-
-This is an optional part of testing, and requires extra setup, but can
-be useful for validating Tempest internals.
-
-
-Why are these tests in tempest?
--------------------------------
-
-Especially when it comes to something like VM state changing, which is
-a coordination of numerous running daemons, and a functioning VM, it's
-very difficult to get a realistic test like this in unit tests.
-
-
-Scope of these tests
---------------------
-
-White box tests should be limitted to tests where black box testing
-(using the OpenStack API to verify results) isn't sufficient.
-
-As these poke at internals of OpenStack, it should also be realized
-that these tests are very tightly coupled to current implementation of
-OpenStack. They will need to be maintained agressively to keep up with
-internals changes in OpenStack projects.
-
-
-Example of a good test
-----------------------
-
-Pushing VMs through a series of state transitions, and ensuring along
-the way the database state transitions match what's expected.
diff --git a/tempest/whitebox/manager.py b/tempest/whitebox/manager.py
deleted file mode 100644
index 3b1b107..0000000
--- a/tempest/whitebox/manager.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack, LLC
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-import shlex
-import subprocess
-import sys
-
-from sqlalchemy import create_engine, MetaData
-
-from tempest.common.ssh import Client
-from tempest.common.utils.data_utils import rand_name
-from tempest import exceptions
-from tempest.openstack.common import log as logging
-from tempest.scenario import manager
-
-LOG = logging.getLogger(__name__)
-
-
-class WhiteboxTest(object):
-
- """
- Base test case class mixin for "whitebox tests"
-
- Whitebox tests are tests that have the following characteristics:
-
- * Test common and advanced operations against a set of servers
- * Use a client that it is possible to send random or bad data with
- * SSH into either a host or a guest in order to validate server state
- * May execute SQL queries directly against internal databases to verify
- the state of data records
- """
- pass
-
-
-class ComputeWhiteboxTest(manager.OfficialClientTest):
-
- """
- Base smoke test case class for OpenStack Compute API (Nova)
- """
-
- @classmethod
- def setUpClass(cls):
- super(ComputeWhiteboxTest, cls).setUpClass()
- if not cls.config.whitebox.whitebox_enabled:
- msg = "Whitebox testing disabled"
- raise cls.skipException(msg)
-
- # Add some convenience attributes that tests use...
- cls.nova_dir = cls.config.whitebox.source_dir
- cls.compute_bin_dir = cls.config.whitebox.bin_dir
- cls.compute_config_path = cls.config.whitebox.config_path
- cls.build_interval = cls.config.compute.build_interval
- cls.build_timeout = cls.config.compute.build_timeout
- cls.ssh_user = cls.config.compute.ssh_user
- cls.image_ref = cls.config.compute.image_ref
- cls.image_ref_alt = cls.config.compute.image_ref_alt
- cls.flavor_ref = cls.config.compute.flavor_ref
- cls.flavor_ref_alt = cls.config.compute.flavor_ref_alt
-
- # NOTE(afazekas): Mimics the helper method used in the api tests
- @classmethod
- def create_server(cls, **kwargs):
- flavor_ref = cls.config.compute.flavor_ref
- image_ref = cls.config.compute.image_ref
- name = rand_name(cls.__name__ + "-instance")
- if 'name' in kwargs:
- name = kwargs.pop('name')
- flavor = kwargs.get('flavor', flavor_ref)
- image_id = kwargs.get('image_id', image_ref)
-
- server = cls.compute_client.servers.create(
- name, image_id, flavor, **kwargs)
-
- if 'wait_until' in kwargs:
- cls.status_timeout(cls.compute_client.servers, server.id,
- server['id'], kwargs['wait_until'])
-
- server = cls.compute_client.servers.get(server.id)
- cls.set_resource(name, server)
- return server
-
- @classmethod
- def get_db_handle_and_meta(cls, database='nova'):
- """Return a connection handle and metadata of an OpenStack database."""
- engine_args = {"echo": False,
- "convert_unicode": True,
- "pool_recycle": 3600
- }
-
- try:
- engine = create_engine(cls.config.whitebox.db_uri, **engine_args)
- connection = engine.connect()
- meta = MetaData()
- meta.reflect(bind=engine)
-
- except Exception as e:
- raise exceptions.SQLException(message=e)
-
- return connection, meta
-
- def nova_manage(self, category, action, params):
- """Executes nova-manage command for the given action."""
-
- nova_manage_path = os.path.join(self.compute_bin_dir, 'nova-manage')
- cmd = ' '.join([nova_manage_path, category, action, params])
-
- if self.deploy_mode == 'devstack-local':
- if not os.path.isdir(self.nova_dir):
- sys.exit("Cannot find Nova source directory: %s" %
- self.nova_dir)
-
- cmd = shlex.split(cmd)
- result = subprocess.Popen(cmd, stdout=subprocess.PIPE)
-
- # TODO(rohitk): Need to define host connection parameters in config
- else:
- client = self.get_ssh_connection(self.config.whitebox.api_host,
- self.config.whitebox.api_user,
- self.config.whitebox.api_passwd)
- result = client.exec_command(cmd)
-
- return result
-
- def get_ssh_connection(self, host, username, password):
- """Create an SSH connection object to a host."""
- ssh_timeout = self.config.compute.ssh_timeout
- ssh_client = Client(host, username, password, ssh_timeout)
- if not ssh_client.test_connection_auth():
- raise exceptions.SSHTimeout()
- else:
- return ssh_client
diff --git a/tempest/whitebox/test_images_whitebox.py b/tempest/whitebox/test_images_whitebox.py
deleted file mode 100644
index 0afb17e..0000000
--- a/tempest/whitebox/test_images_whitebox.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack, LLC
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.common.utils.data_utils import rand_name
-from tempest.whitebox import manager
-
-from novaclient import exceptions
-
-
-class ImagesWhiteboxTest(manager.ComputeWhiteboxTest):
- _interface = 'json'
-
- @classmethod
- def setUpClass(cls):
- super(ImagesWhiteboxTest, cls).setUpClass()
- cls.create_image = cls.compute_client.servers.create_image
- cls.connection, cls.meta = cls.get_db_handle_and_meta()
- cls.shared_server = cls.create_server()
- cls.image_ids = []
-
- @classmethod
- def tearDownClass(cls):
- """Delete images and server after a test is executed."""
- for image_id in cls.image_ids:
- cls.client.delete_image(image_id)
- cls.image_ids.remove(image_id)
- super(ImagesWhiteboxTest, cls).tearDownClass()
-
- @classmethod
- def update_state(self, server_id, vm_state, task_state, deleted=0):
- """Update states of an instance in database for validation."""
- if not task_state:
- task_state = "NULL"
-
- instances = self.meta.tables['instances']
- stmt = instances.update().where(instances.c.uuid == server_id).values(
- deleted=deleted,
- vm_state=vm_state,
- task_state=task_state)
-
- self.connection.execute(stmt, autocommit=True)
-
- def _test_create_image_409_base(self, vm_state, task_state, deleted=0):
- """Base method for create image tests based on vm and task states."""
- try:
- self.update_state(self.shared_server.id, vm_state,
- task_state, deleted)
-
- image_name = rand_name('snap-')
- self.assertRaises(exceptions.Conflict,
- self.create_image,
- self.shared_server.id, image_name)
- except Exception:
- self.fail("Should not allow create image when vm_state=%s and "
- "task_state=%s" % (vm_state, task_state))
- finally:
- self.update_state(self.shared_server.id, 'active', None)
-
- def test_create_image_when_vm_eq_building_task_eq_scheduling(self):
- # 409 error when instance states are building,scheduling
- self._test_create_image_409_base("building", "scheduling")
-
- def test_create_image_when_vm_eq_building_task_eq_networking(self):
- # 409 error when instance states are building,networking
- self._test_create_image_409_base("building", "networking")
-
- def test_create_image_when_vm_eq_building_task_eq_bdm(self):
- # 409 error when instance states are building,block_device_mapping
- self._test_create_image_409_base("building", "block_device_mapping")
-
- def test_create_image_when_vm_eq_building_task_eq_spawning(self):
- # 409 error when instance states are building,spawning
- self._test_create_image_409_base("building", "spawning")
-
- def test_create_image_when_vm_eq_active_task_eq_image_backup(self):
- # 409 error when instance states are active,image_backup
- self._test_create_image_409_base("active", "image_backup")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_prep(self):
- # 409 error when instance states are resized,resize_prep
- self._test_create_image_409_base("resized", "resize_prep")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_migrating(self):
- # 409 error when instance states are resized,resize_migrating
- self._test_create_image_409_base("resized", "resize_migrating")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_migrated(self):
- # 409 error when instance states are resized,resize_migrated
- self._test_create_image_409_base("resized", "resize_migrated")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_finish(self):
- # 409 error when instance states are resized,resize_finish
- self._test_create_image_409_base("resized", "resize_finish")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_reverting(self):
- # 409 error when instance states are resized,resize_reverting
- self._test_create_image_409_base("resized", "resize_reverting")
-
- def test_create_image_when_vm_eq_resized_task_eq_resize_confirming(self):
- # 409 error when instance states are resized,resize_confirming
- self._test_create_image_409_base("resized", "resize_confirming")
-
- def test_create_image_when_vm_eq_active_task_eq_resize_verify(self):
- # 409 error when instance states are active,resize_verify
- self._test_create_image_409_base("active", "resize_verify")
-
- def test_create_image_when_vm_eq_active_task_eq_updating_password(self):
- # 409 error when instance states are active,updating_password
- self._test_create_image_409_base("active", "updating_password")
-
- def test_create_image_when_vm_eq_active_task_eq_rebuilding(self):
- # 409 error when instance states are active,rebuilding
- self._test_create_image_409_base("active", "rebuilding")
-
- def test_create_image_when_vm_eq_active_task_eq_rebooting(self):
- # 409 error when instance states are active,rebooting
- self._test_create_image_409_base("active", "rebooting")
-
- def test_create_image_when_vm_eq_building_task_eq_deleting(self):
- # 409 error when instance states are building,deleting
- self._test_create_image_409_base("building", "deleting")
-
- def test_create_image_when_vm_eq_active_task_eq_deleting(self):
- # 409 error when instance states are active,deleting
- self._test_create_image_409_base("active", "deleting")
-
- def test_create_image_when_vm_eq_error_task_eq_building(self):
- # 409 error when instance states are error,building
- self._test_create_image_409_base("error", "building")
-
- def test_create_image_when_vm_eq_error_task_eq_none(self):
- # 409 error when instance states are error,None
- self._test_create_image_409_base("error", None)
-
- def test_create_image_when_vm_eq_deleted_task_eq_none(self):
- # 409 error when instance states are deleted,None
- self._test_create_image_409_base("deleted", None)
-
- def test_create_image_when_vm_eq_resized_task_eq_none(self):
- # 409 error when instance states are resized,None
- self._test_create_image_409_base("resized", None)
-
- def test_create_image_when_vm_eq_error_task_eq_resize_prep(self):
- # 409 error when instance states are error,resize_prep
- self._test_create_image_409_base("error", "resize_prep")
diff --git a/tempest/whitebox/test_servers_whitebox.py b/tempest/whitebox/test_servers_whitebox.py
deleted file mode 100644
index abe903c..0000000
--- a/tempest/whitebox/test_servers_whitebox.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack, LLC
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.identity.base import BaseIdentityAdminTest
-from tempest import exceptions
-from tempest.whitebox import manager
-
-
-class ServersWhiteboxTest(manager.ComputeWhiteboxTest):
- _interface = 'json'
-
- @classmethod
- def setUpClass(cls):
- super(ServersWhiteboxTest, cls).setUpClass()
- # NOTE(afazekas): Strange relationship
- BaseIdentityAdminTest.setUpClass()
- cls.client = cls.servers_client
- cls.img_client = cls.images_client
- cls.admin_client = BaseIdentityAdminTest.client
-
- cls.connection, cls.meta = cls.get_db_handle_and_meta()
-
- resp, tenants = cls.admin_client.list_tenants()
- cls.tenant_id = [
- tnt['id']
- for tnt in tenants if tnt['name'] == cls.config.compute.tenant_name
- ][0]
-
- cls.shared_server = cls.create_server()
-
- def tearDown(cls):
- for server in cls.servers:
- try:
- cls.client.delete_server(server['id'])
- except exceptions.NotFound:
- continue
-
- def update_state(self, server_id, vm_state, task_state, deleted=0):
- """Update states of an instance in database for validation."""
- if not task_state:
- task_state = 'NULL'
-
- instances = self.meta.tables['instances']
- stmt = instances.update().where(instances.c.uuid == server_id).values(
- deleted=deleted,
- vm_state=vm_state,
- task_state=task_state)
- self.connection.execute(stmt, autocommit=True)
-
- def _test_delete_server_base(self, vm_state, task_state):
- """
- Base method for delete server tests based on vm and task states.
- Validates for successful server termination.
- """
- try:
- server = self.create_server()
- self.update_state(server['id'], vm_state, task_state)
-
- resp, body = self.client.delete_server(server['id'])
- self.assertEqual('204', resp['status'])
- self.client.wait_for_server_termination(server['id'],
- ignore_error=True)
-
- instances = self.meta.tables['instances']
- stmt = instances.select().where(instances.c.uuid == server['id'])
- result = self.connection.execute(stmt).first()
-
- self.assertEqual(True, result.deleted > 0)
- self.assertEqual('deleted', result.vm_state)
- self.assertEqual(None, result.task_state)
- except Exception:
- self.fail("Should be able to delete a server when vm_state=%s and "
- "task_state=%s" % (vm_state, task_state))
-
- def _test_delete_server_403_base(self, vm_state, task_state):
- """
- Base method for delete server tests based on vm and task states.
- Validates for 403 error code.
- """
- try:
- self.update_state(self.shared_server['id'], vm_state, task_state)
-
- self.assertRaises(exceptions.Unauthorized,
- self.client.delete_server,
- self.shared_server['id'])
- except Exception:
- self.fail("Should not allow delete server when vm_state=%s and "
- "task_state=%s" % (vm_state, task_state))
- finally:
- self.update_state(self.shared_server['id'], 'active', None)
-
- def test_delete_server_when_vm_eq_building_task_eq_networking(self):
- # Delete server when instance states are building,networking
- self._test_delete_server_base('building', 'networking')
-
- def test_delete_server_when_vm_eq_building_task_eq_bdm(self):
- # Delete server when instance states are building,block device mapping
- self._test_delete_server_base('building', 'block_device_mapping')
-
- def test_delete_server_when_vm_eq_building_task_eq_spawning(self):
- # Delete server when instance states are building,spawning
- self._test_delete_server_base('building', 'spawning')
-
- def test_delete_server_when_vm_eq_active_task_eq_image_backup(self):
- # Delete server when instance states are active,image_backup
- self._test_delete_server_base('active', 'image_backup')
-
- def test_delete_server_when_vm_eq_active_task_eq_rebuilding(self):
- # Delete server when instance states are active,rebuilding
- self._test_delete_server_base('active', 'rebuilding')
-
- def test_delete_server_when_vm_eq_error_task_eq_spawning(self):
- # Delete server when instance states are error,spawning
- self._test_delete_server_base('error', 'spawning')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_prep(self):
- # Delete server when instance states are resized,resize_prep
- self._test_delete_server_403_base('resized', 'resize_prep')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_migrating(self):
- # Delete server when instance states are resized,resize_migrating
- self._test_delete_server_403_base('resized', 'resize_migrating')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_migrated(self):
- # Delete server when instance states are resized,resize_migrated
- self._test_delete_server_403_base('resized', 'resize_migrated')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_finish(self):
- # Delete server when instance states are resized,resize_finish
- self._test_delete_server_403_base('resized', 'resize_finish')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_reverting(self):
- # Delete server when instance states are resized,resize_reverting
- self._test_delete_server_403_base('resized', 'resize_reverting')
-
- def test_delete_server_when_vm_eq_resized_task_eq_resize_confirming(self):
- # Delete server when instance states are resized,resize_confirming
- self._test_delete_server_403_base('resized', 'resize_confirming')
-
- def test_delete_server_when_vm_eq_active_task_eq_resize_verify(self):
- # Delete server when instance states are active,resize_verify
- self._test_delete_server_base('active', 'resize_verify')
-
- def test_delete_server_when_vm_eq_active_task_eq_rebooting(self):
- # Delete server when instance states are active,rebooting
- self._test_delete_server_base('active', 'rebooting')
-
- def test_delete_server_when_vm_eq_building_task_eq_deleting(self):
- # Delete server when instance states are building,deleting
- self._test_delete_server_base('building', 'deleting')
-
- def test_delete_server_when_vm_eq_active_task_eq_deleting(self):
- # Delete server when instance states are active,deleting
- self._test_delete_server_base('active', 'deleting')
-
- def test_delete_server_when_vm_eq_error_task_eq_none(self):
- # Delete server when instance states are error,None
- self._test_delete_server_base('error', None)
-
- def test_delete_server_when_vm_eq_resized_task_eq_none(self):
- # Delete server when instance states are resized,None
- self._test_delete_server_403_base('resized', None)
-
- def test_delete_server_when_vm_eq_error_task_eq_resize_prep(self):
- # Delete server when instance states are error,resize_prep
- self._test_delete_server_base('error', 'resize_prep')
-
- def test_delete_server_when_vm_eq_error_task_eq_error(self):
- # Delete server when instance states are error,error
- self._test_delete_server_base('error', 'error')
diff --git a/tox.ini b/tox.ini
index dc48735..0b57eb2 100644
--- a/tox.ini
+++ b/tox.ini
@@ -19,13 +19,13 @@
# The regex below is used to select which tests to run and exclude the slow tag:
# See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610
commands =
- sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
+ sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli|tests)) {posargs}'
[testenv:testr-full]
sitepackages = True
setenv = VIRTUAL_ENV={envdir}
commands =
- sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
+ sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli|tests)) {posargs}'
[testenv:heat-slow]
sitepackages = True
@@ -34,6 +34,13 @@
commands =
sh tools/pretty_tox_serial.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
+[testenv:large-ops]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+commands =
+ python setup.py testr --slowest --testr-args='tempest.scenario.test_large_ops {posargs}'
+
+
[testenv:py26-full]
sitepackages = True
setenv = VIRTUAL_ENV={envdir}
@@ -44,7 +51,7 @@
NOSE_OPENSTACK_SHOW_ELAPSED=1
NOSE_OPENSTACK_STDOUT=1
commands =
- nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit -sv --xunit-file=nosetests-full.xml tempest/api tempest/scenario tempest/thirdparty tempest/cli {posargs}
+ nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit -sv --xunit-file=nosetests-full.xml tempest/api tempest/scenario tempest/thirdparty tempest/cli tempest/tests {posargs}
[testenv:py26-smoke]
setenv = VIRTUAL_ENV={envdir}
@@ -71,14 +78,14 @@
setenv = VIRTUAL_ENV={envdir}
commands =
python -m tools/tempest_coverage -c start --combine
- sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli))'
+ sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli|tests))'
python -m tools/tempest_coverage -c report --html {posargs}
[testenv:stress]
sitepackages = True
setenv = VIRTUAL_ENV={envdir}
commands =
- python -m tempest/stress/run_stress tempest/stress/etc/stress-tox-job.json -d 3600
+ python -m tempest/stress/run_stress -a -d 3600
[testenv:venv]
commands = {posargs}