Merge "Log request ids from glance and nova"
diff --git a/doc/source/field_guide/unit_tests.rst b/doc/source/field_guide/unit_tests.rst
new file mode 120000
index 0000000..67a8b20
--- /dev/null
+++ b/doc/source/field_guide/unit_tests.rst
@@ -0,0 +1 @@
+../../../tempest/tests/README.rst
\ No newline at end of file
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 00c4e9a..f70cdd1 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -32,6 +32,7 @@
field_guide/stress
field_guide/thirdparty
field_guide/whitebox
+ field_guide/unit_tests
------------------
API and test cases
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index f1aaa07..d39ef70 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -1,5 +1,5 @@
[DEFAULT]
-# log_config = /opt/stack/tempest/etc/logging.conf.sample
+#log_config = /opt/stack/tempest/etc/logging.conf.sample
# disable logging to the stderr
use_stderr = False
@@ -272,6 +272,9 @@
# Set to True if the Account Quota middleware is enabled
accounts_quotas_available = True
+# Set operator role for tests that require creating a container
+operator_role = Member
+
[boto]
# This section contains configuration options used when executing tests
# with boto.
@@ -285,7 +288,7 @@
aws_access =
aws_secret =
-#Image materials for S3 upload
+# Image materials for S3 upload
# ALL content of the specified directory will be uploaded to S3
s3_materials_path = /opt/stack/devstack/files/images/s3-materials/cirros-0.3.1
@@ -293,22 +296,22 @@
# Subdirectories not allowed!
# The filenames will be used as a Keys in the S3 Buckets
-#ARI Ramdisk manifest. Must be in the above s3_materials_path
+# ARI Ramdisk manifest. Must be in the above s3_materials_path
ari_manifest = cirros-0.3.1-x86_64-initrd.manifest.xml
-#AMI Machine Image manifest. Must be in the above s3_materials_path
+# AMI Machine Image manifest. Must be in the above s3_materials_path
ami_manifest = cirros-0.3.1-x86_64-blank.img.manifest.xml
-#AKI Kernel Image manifest, Must be in the above s3_materials_path
+# AKI Kernel Image manifest, Must be in the above s3_materials_path
aki_manifest = cirros-0.3.1-x86_64-vmlinuz.manifest.xml
-#Instance type
+# Instance type
instance_type = m1.tiny
-#TCP/IP connection timeout
+# TCP/IP connection timeout
http_socket_timeout = 5
-#Number of retries actions on connection or 5xx error
+# Number of retries actions on connection or 5xx error
num_retries = 1
# Status change wait timout
@@ -387,3 +390,11 @@
heat = false
# Whether or not horizon is expected to be available
horizon = True
+
+[stress]
+# Maximum number of instances to create during test
+max_instances = 32
+# Time (in seconds) between log file error checks
+log_check_interval = 60
+# The default number of threads created while stress test
+default_thread_number_per_action=4
diff --git a/requirements.txt b/requirements.txt
index cc61b01..877b23c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,22 +1,23 @@
-d2to1>=0.2.10,<0.3
-pbr>=0.5,<0.6
-anyjson
+pbr>=0.5.21,<1.0
+anyjson>=0.3.3
nose
-httplib2>=0.7.0
+httplib2
+jsonschema>=1.3.0,!=1.4.0
testtools>=0.9.32
-lxml
-boto>=2.2.1
-paramiko
+lxml>=2.3
+boto>=2.4.0
+paramiko>=1.8.0
netaddr
-python-glanceclient>=0.5.0
-python-keystoneclient>=0.2.0
-python-novaclient>=2.10.0
-python-neutronclient>=2.2.3,<3.0.0
+python-glanceclient>=0.9.0
+python-keystoneclient>=0.3.0
+python-novaclient>=2.12.0
+python-neutronclient>=2.2.3,<3
python-cinderclient>=1.0.4
-testresources
-keyring
-testrepository
+python-heatclient>=0.2.3
+testresources>=0.2.4
+keyring>=1.6.1
+testrepository>=0.0.17
oslo.config>=1.1.0
# Needed for whitebox testing
-sqlalchemy
-eventlet>=0.12.0
+SQLAlchemy>=0.7.8,<=0.7.99
+eventlet>=0.13.0
diff --git a/run_tests.sh b/run_tests.sh
index f8636c1..856ce54 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -11,7 +11,7 @@
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -s, --smoke Only run smoke tests"
echo " -w, --whitebox Only run whitebox tests"
- echo " -t, --parallel Run testr parallel"
+ echo " -t, --serial Run testr serially"
echo " -c, --nova-coverage Enable Nova coverage collection"
echo " -C, --config Config file location"
echo " -p, --pep8 Just run pep8"
@@ -26,7 +26,7 @@
just_pep8=0
venv=.venv
with_venv=tools/with_venv.sh
-parallel=0
+serial=0
always_venv=0
never_venv=0
no_site_packages=0
@@ -38,7 +38,7 @@
logging=0
logging_config=etc/logging.conf
-if ! options=$(getopt -o VNnfuswtcphdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,whitebox,parallel,nova-coverage,pep8,help,debug,config:,logging,logging-config: -- "$@")
+if ! options=$(getopt -o VNnfuswtcphdC:lL: -l virtual-env,no-virtual-env,no-site-packages,force,update,smoke,whitebox,serial,nova-coverage,pep8,help,debug,config:,logging,logging-config: -- "$@")
then
# parse error
usage
@@ -61,7 +61,7 @@
-p|--pep8) let just_pep8=1;;
-s|--smoke) testrargs="$testrargs smoke";;
-w|--whitebox) testrargs="$testrargs whitebox";;
- -t|--parallel) parallel=1;;
+ -t|--serial) serial=1;;
-l|--logging) logging=1;;
-L|--logging-config) logging_config=$2; shift;;
--) [ "yes" == "$first_uu" ] || testrargs="$testrargs $1"; first_uu=no ;;
@@ -101,13 +101,28 @@
function run_tests {
testr_init
${wrapper} find . -type f -name "*.pyc" -delete
- if [ $parallel -eq 1 ]; then
- ${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
- else
+ if [ $serial -eq 1 ]; then
${wrapper} testr run --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
+ else
+ ${wrapper} testr run --parallel --subunit $testrargs | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py
fi
}
+function run_tests_nose {
+ NOSE_WITH_OPENSTACK=1
+ NOSE_OPENSTACK_COLOR=1
+ NOSE_OPENSTACK_RED=15.00
+ NOSE_OPENSTACK_YELLOW=3.00
+ NOSE_OPENSTACK_SHOW_ELAPSED=1
+ NOSE_OPENSTACK_STDOUT=1
+ if [[ "x$noseargs" =~ "tempest" ]]; then
+ noseargs="$testrargs"
+ else
+ noseargs="$noseargs tempest"
+ fi
+ ${wrapper} nosetests $noseargs
+}
+
function run_pep8 {
echo "Running pep8 ..."
${wrapper} flake8
@@ -162,7 +177,13 @@
run_coverage_start
fi
-run_tests
+
+py_version=`${wrapper} python --version 2>&1`
+if [[ $py_version =~ "2.6" ]] ; then
+ run_tests_nose
+else
+ run_tests
+fi
retval=$?
if [ $nova_coverage -eq 1 ]; then
diff --git a/tempest/api/README.rst b/tempest/api/README.rst
index 617fda4..9d8dc10 100644
--- a/tempest/api/README.rst
+++ b/tempest/api/README.rst
@@ -9,15 +9,15 @@
works with the OpenStack API as documented. The current largest
portion of Tempest code is devoted to test cases that do exactly this.
-It's also important to test not only the expected possitive path on
+It's also important to test not only the expected positive path on
APIs, but also to provide them with invalid data to ensure they fail
in expected and documented ways. Over the course of the OpenStack
project Tempest has discovered many fundamental bugs by doing just
this.
-In order for some APIs to return meaniful results, there must be
+In order for some APIs to return meaningful results, there must be
enough data in the system. This means these tests might start by
-spinning up a server, image, etc, then opperating on it.
+spinning up a server, image, etc, then operating on it.
Why are these tests in tempest?
diff --git a/tempest/api/compute/admin/test_fixed_ips.py b/tempest/api/compute/admin/test_fixed_ips.py
index 8b96370..895f773 100644
--- a/tempest/api/compute/admin/test_fixed_ips.py
+++ b/tempest/api/compute/admin/test_fixed_ips.py
@@ -15,8 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testtools
-
from tempest.api.compute import base
from tempest import config
from tempest import exceptions
@@ -30,6 +28,9 @@
@classmethod
def setUpClass(cls):
super(FixedIPsBase, cls).setUpClass()
+ if cls.config.service_available.neutron:
+ msg = ("%s skipped as neutron is available" % cls.__name__)
+ raise cls.skipException(msg)
# NOTE(maurosr): The idea here is: the server creation is just an
# auxiliary element to the ip details or reservation, there was no way
# (at least none in my mind) to get an valid and existing ip except
@@ -56,8 +57,6 @@
CONF = config.TempestConfig()
- @testtools.skipIf(CONF.service_available.neutron, "This feature is not" +
- "implemented by Neutron. See bug: #1194569")
@attr(type='gate')
def test_list_fixed_ip_details(self):
resp, fixed_ip = self.client.get_fixed_ip_details(self.ip)
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 6d0a5b5..5f31084 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api import compute
from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_int_id
@@ -193,9 +195,10 @@
flag = True
self.assertTrue(flag)
+ @testtools.skip("Skipped until the Bug #1209101 is resolved")
@attr(type='gate')
- def test_flavor_not_public_verify_entry_not_in_list_details(self):
- # Create a flavor with os-flavor-access:is_public false should not
+ def test_list_non_public_flavor(self):
+ # Create a flavor with os-flavor-access:is_public false should
# be present in list_details.
# This operation requires the user to have 'admin' role
flavor_name = rand_name(self.flavor_name_prefix)
@@ -208,13 +211,22 @@
new_flavor_id,
is_public="False")
self.addCleanup(self.flavor_clean_up, flavor['id'])
- flag = False
# Verify flavor is retrieved
+ flag = False
resp, flavors = self.client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
if flavor['name'] == flavor_name:
flag = True
+ self.assertTrue(flag)
+
+ # Verify flavor is not retrieved with other user
+ flag = False
+ resp, flavors = self.user_client.list_flavors_with_detail()
+ self.assertEqual(resp.status, 200)
+ for flavor in flavors:
+ if flavor['name'] == flavor_name:
+ flag = True
self.assertFalse(flag)
@attr(type='gate')
@@ -276,7 +288,7 @@
r, flavors = self.client.list_flavors_with_detail(params)
self.assertEqual(r.status, 200)
flavor = _flavor_lookup(flavors, flavor_name)
- self.assertNotEqual(flavor, None)
+ self.assertIsNotNone(flavor)
_test_string_variations(['f', 'false', 'no', '0'],
flavor_name_not_public)
diff --git a/tempest/api/compute/admin/test_hosts.py b/tempest/api/compute/admin/test_hosts.py
index 849cebb..af76ad0 100644
--- a/tempest/api/compute/admin/test_hosts.py
+++ b/tempest/api/compute/admin/test_hosts.py
@@ -33,13 +33,13 @@
cls.client = cls.os_adm.hosts_client
cls.non_admin_client = cls.os.hosts_client
- @attr(type=['positive', 'gate'])
+ @attr(type='gate')
def test_list_hosts(self):
resp, hosts = self.client.list_hosts()
self.assertEqual(200, resp.status)
self.assertTrue(len(hosts) >= 2)
- @attr(type='positive')
+ @attr(type='gate')
def test_list_hosts_with_zone(self):
resp, hosts = self.client.list_hosts()
host = hosts[0]
diff --git a/tempest/api/compute/admin/test_hypervisor.py b/tempest/api/compute/admin/test_hypervisor.py
index 00a5955..5ca16f4 100644
--- a/tempest/api/compute/admin/test_hypervisor.py
+++ b/tempest/api/compute/admin/test_hypervisor.py
@@ -40,20 +40,20 @@
self.assertEqual(200, resp.status)
return hypers
- @attr(type=['positive', 'gate'])
+ @attr(type='gate')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertTrue(len(hypers) > 0)
- @attr(type=['positive', 'gate'])
+ @attr(type='gate')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
resp, hypers = self.client.get_hypervisor_list_details()
self.assertEqual(200, resp.status)
self.assertTrue(len(hypers) > 0)
- @attr(type=['positive', 'gate'])
+ @attr(type='gate')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
@@ -66,7 +66,7 @@
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
- @attr(type=['positive', 'gate'])
+ @attr(type='gate')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
@@ -77,14 +77,14 @@
self.assertEqual(200, resp.status)
self.assertTrue(len(hypervisors) > 0)
- @attr(type=['positive', 'gate'])
+ @attr(type='gate')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
resp, stats = self.client.get_hypervisor_stats()
self.assertEqual(200, resp.status)
self.assertTrue(len(stats) > 0)
- @attr(type=['positive', 'gate'])
+ @attr(type='gate')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index e4e87c0..083fbd7 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -84,23 +84,17 @@
# Keypair should be created, Got details by name and deleted
k_name = rand_name('keypair-')
resp, keypair = self.client.create_keypair(k_name)
- try:
- resp, keypair_detail = self.client.get_keypair(k_name)
- self.assertEqual(200, resp.status)
- self.assertIn('name', keypair_detail)
- self.assertIn('public_key', keypair_detail)
- self.assertEqual(keypair_detail['name'], k_name,
- "The created keypair name is not equal "
- "to requested name")
- public_key = keypair_detail['public_key']
- self.assertTrue(public_key is not None,
- "Field public_key is empty or not found.")
- except Exception:
- self.fail("GET keypair details requested by keypair name "
- "has failed")
- finally:
- resp, _ = self.client.delete_keypair(k_name)
- self.assertEqual(202, resp.status)
+ self.addCleanup(self.client.delete_keypair, k_name)
+ resp, keypair_detail = self.client.get_keypair(k_name)
+ self.assertEqual(200, resp.status)
+ self.assertIn('name', keypair_detail)
+ self.assertIn('public_key', keypair_detail)
+ self.assertEqual(keypair_detail['name'], k_name,
+ "The created keypair name is not equal "
+ "to requested name")
+ public_key = keypair_detail['public_key']
+ self.assertTrue(public_key is not None,
+ "Field public_key is empty or not found.")
@attr(type='gate')
def test_keypair_create_with_pub_key(self):
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index b8f965c..ade7604 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -20,6 +20,7 @@
from tempest.api.compute import base
from tempest.api import utils
from tempest.common.utils.data_utils import rand_name
+from tempest import config
from tempest import exceptions
from tempest.test import attr
@@ -218,6 +219,8 @@
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
+ @testtools.skipIf(config.TempestConfig().service_available.neutron,
+ "Skipped until the Bug #1182883 is resolved")
@attr(type='gate')
def test_list_servers_filtered_by_ip_regex(self):
# Filter servers by regex ip
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 893d9e0..25df6e6 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -225,7 +225,7 @@
resp, output = self.servers_client.get_console_output(
self.server_id, 10)
self.assertEqual(200, resp.status)
- self.assertNotEqual(output, None)
+ self.assertIsNotNone(output)
lines = len(output.split('\n'))
self.assertEqual(lines, 10)
self.wait_for(get_output)
@@ -249,7 +249,7 @@
resp, output = self.servers_client.get_console_output(self.server_id,
10)
self.assertEqual(200, resp.status)
- self.assertNotEqual(output, None)
+ self.assertIsNotNone(output)
lines = len(output.split('\n'))
self.assertEqual(lines, 10)
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index 2a5be8c..e5ea30e 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -45,7 +45,7 @@
# for a given server_id
resp, output = self.client.list_virtual_interfaces(self.server_id)
self.assertEqual(200, resp.status)
- self.assertNotEqual(output, None)
+ self.assertIsNotNone(output)
virt_ifaces = output
self.assertNotEqual(0, len(virt_ifaces['virtual_interfaces']),
'Expected virtual interfaces, got 0 interfaces.')
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 60297a9..efdadb0 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -21,8 +21,11 @@
from tempest.common.utils.data_utils import parse_image_id
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.test import attr
+LOG = logging.getLogger(__name__)
+
class AuthorizationTestJSON(base.BaseComputeTest):
_interface = 'json'
@@ -204,7 +207,7 @@
self.alt_keypairs_client.base_url = self.saved_base_url
if (resp['status'] is not None):
resp, _ = self.alt_keypairs_client.delete_keypair(k_name)
- self.fail("Create keypair request should not happen "
+ LOG.error("Create keypair request should not happen "
"if the tenant id does not match the current user")
@attr(type='gate')
@@ -255,7 +258,7 @@
self.alt_security_client.base_url = self.saved_base_url
if resp['status'] is not None:
self.alt_security_client.delete_security_group(resp['id'])
- self.fail("Create Security Group request should not happen if"
+ LOG.error("Create Security Group request should not happen if"
"the tenant id does not match the current user")
@attr(type='gate')
@@ -297,7 +300,7 @@
self.alt_security_client.base_url = self.saved_base_url
if resp['status'] is not None:
self.alt_security_client.delete_security_group_rule(resp['id'])
- self.fail("Create security group rule request should not "
+ LOG.error("Create security group rule request should not "
"happen if the tenant id does not match the"
" current user")
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index a3b051e..b67a5e0 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -55,6 +55,7 @@
# Start a server and wait for it to become ready
resp, server = self.create_server(wait_until='ACTIVE',
adminPass='password')
+ self.server = server
# Record addresses so that we can ssh later
resp, server['addresses'] = \
diff --git a/tempest/api/identity/admin/test_users.py b/tempest/api/identity/admin/test_users.py
index 6f90b04..4cfeb45 100644
--- a/tempest/api/identity/admin/test_users.py
+++ b/tempest/api/identity/admin/test_users.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from testtools.matchers._basic import Contains
+from testtools.matchers import Contains
from tempest.api.identity import base
from tempest.common.utils.data_utils import rand_name
@@ -26,11 +26,14 @@
class UsersTestJSON(base.BaseIdentityAdminTest):
_interface = 'json'
- alt_user = rand_name('test_user_')
- alt_password = rand_name('pass_')
- alt_email = alt_user + '@testmail.tm'
- alt_tenant = rand_name('test_tenant_')
- alt_description = rand_name('desc_')
+ @classmethod
+ def setUpClass(cls):
+ super(UsersTestJSON, cls).setUpClass()
+ cls.alt_user = rand_name('test_user_')
+ cls.alt_password = rand_name('pass_')
+ cls.alt_email = cls.alt_user + '@testmail.tm'
+ cls.alt_tenant = rand_name('test_tenant_')
+ cls.alt_description = rand_name('desc_')
@attr(type='smoke')
def test_create_user(self):
@@ -101,8 +104,9 @@
@attr(type='smoke')
def test_delete_user(self):
# Delete a user
+ alt_user2 = rand_name('alt_user_')
self.data.setup_test_tenant()
- resp, user = self.client.create_user('user_1234', self.alt_password,
+ resp, user = self.client.create_user(alt_user2, self.alt_password,
self.data.tenant['id'],
self.alt_email)
self.assertEquals('200', resp['status'])
@@ -228,13 +232,16 @@
self.data.setup_test_tenant()
user_ids = list()
fetched_user_ids = list()
- resp, user1 = self.client.create_user('tenant_user1', 'password1',
+ alt_tenant_user1 = rand_name('tenant_user1_')
+ resp, user1 = self.client.create_user(alt_tenant_user1, 'password1',
self.data.tenant['id'],
'user1@123')
self.assertEquals('200', resp['status'])
user_ids.append(user1['id'])
self.data.users.append(user1)
- resp, user2 = self.client.create_user('tenant_user2', 'password2',
+
+ alt_tenant_user2 = rand_name('tenant_user2_')
+ resp, user2 = self.client.create_user(alt_tenant_user2, 'password2',
self.data.tenant['id'],
'user2@123')
self.assertEquals('200', resp['status'])
@@ -267,9 +274,11 @@
resp, role = self.client.assign_user_role(tenant['id'], user['id'],
role['id'])
self.assertEquals('200', resp['status'])
- resp, second_user = self.client.create_user('second_user', 'password1',
+
+ alt_user2 = rand_name('second_user_')
+ resp, second_user = self.client.create_user(alt_user2, 'password1',
self.data.tenant['id'],
- 'user1@123')
+ 'user2@123')
self.assertEquals('200', resp['status'])
user_ids.append(second_user['id'])
self.data.users.append(second_user)
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index d98fb71..9d143ed 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -68,44 +68,30 @@
', '.join(str(e) for e in missing_endpoints))
@attr(type='gate')
- def test_create_delete_endpoint(self):
+ def test_create_list_delete_endpoint(self):
region = rand_name('region')
url = rand_name('url')
interface = 'public'
- create_flag = False
- matched = False
- try:
- resp, endpoint =\
- self.client.create_endpoint(self.service_id, interface, url,
- region=region, enabled=True)
- create_flag = True
- # Asserting Create Endpoint response body
- self.assertEqual(resp['status'], '201')
- self.assertEqual(region, endpoint['region'])
- self.assertEqual(url, endpoint['url'])
- # Checking if created endpoint is present in the list of endpoints
- resp, fetched_endpoints = self.client.list_endpoints()
- for e in fetched_endpoints:
- if endpoint['id'] == e['id']:
- matched = True
- if not matched:
- self.fail("Created endpoint does not appear in the list"
- " of endpoints")
- finally:
- if create_flag:
- matched = False
- # Deleting the endpoint created in this method
- resp_header, resp_body =\
- self.client.delete_endpoint(endpoint['id'])
- self.assertEqual(resp_header['status'], '204')
- self.assertEqual(resp_body, '')
- # Checking whether endpoint is deleted successfully
- resp, fetched_endpoints = self.client.list_endpoints()
- for e in fetched_endpoints:
- if endpoint['id'] == e['id']:
- matched = True
- if matched:
- self.fail("Delete endpoint is not successful")
+ resp, endpoint =\
+ self.client.create_endpoint(self.service_id, interface, url,
+ region=region, enabled=True)
+ # Asserting Create Endpoint response body
+ self.assertEqual(resp['status'], '201')
+ self.assertIn('id', endpoint)
+ self.assertEqual(region, endpoint['region'])
+ self.assertEqual(url, endpoint['url'])
+ # Checking if created endpoint is present in the list of endpoints
+ resp, fetched_endpoints = self.client.list_endpoints()
+ fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
+ self.assertIn(endpoint['id'], fetched_endpoints_id)
+ # Deleting the endpoint created in this method
+ resp, body = self.client.delete_endpoint(endpoint['id'])
+ self.assertEqual(resp['status'], '204')
+ self.assertEqual(body, '')
+ # Checking whether endpoint is deleted successfully
+ resp, fetched_endpoints = self.client.list_endpoints()
+ fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
+ self.assertNotIn(endpoint['id'], fetched_endpoints_id)
@attr(type='smoke')
def test_update_endpoint(self):
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index cab84c0..980323a 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -54,7 +54,7 @@
resp[1], _ = cls.v3_client.delete_group(cls.group_body['id'])
resp[2], _ = cls.v3_client.delete_user(cls.user_body['id'])
resp[3], _ = cls.v3_client.delete_project(cls.project['id'])
- #NOTE(harika-vakadi): It is necessary to disable the domian
+ # NOTE(harika-vakadi): It is necessary to disable the domian
# before deleting,or else it would result in unauthorized error
cls.v3_client.update_domain(cls.domain['id'], enabled=False)
resp[4], _ = cls.v3_client.delete_domain(cls.domain['id'])
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 2a3b3f7..19c5f84 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -48,7 +48,7 @@
@classmethod
def setUpClass(cls):
super(BaseNetworkTest, cls).setUpClass()
- os = clients.Manager()
+ os = clients.Manager(interface=cls._interface)
cls.network_cfg = os.config.network
if not cls.config.service_available.neutron:
raise cls.skipException("Neutron support is required")
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 00a8ef7..7f49452 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -23,7 +23,8 @@
from tempest.test import attr
-class NetworksTest(base.BaseNetworkTest):
+class NetworksTestJSON(base.BaseNetworkTest):
+ _interface = 'json'
"""
Tests the following operations in the Neutron API using the REST client for
@@ -55,13 +56,25 @@
@classmethod
def setUpClass(cls):
- super(NetworksTest, cls).setUpClass()
+ super(NetworksTestJSON, cls).setUpClass()
cls.network = cls.create_network()
cls.name = cls.network['name']
cls.subnet = cls.create_subnet(cls.network)
cls.cidr = cls.subnet['cidr']
cls.port = cls.create_port(cls.network)
+ def _delete_networks(self, created_networks):
+ for n in created_networks:
+ resp, body = self.client.delete_network(n['id'])
+ self.assertEqual(204, resp.status)
+ # Asserting that the networks are not found in the list after deletion
+ resp, body = self.client.list_networks()
+ networks_list = list()
+ for network in body['networks']:
+ networks_list.append(network['id'])
+ for n in created_networks:
+ self.assertNotIn(n['id'], networks_list)
+
@attr(type='gate')
def test_create_update_delete_network_subnet(self):
# Creates a network
@@ -97,7 +110,7 @@
self.assertEqual('200', resp['status'])
updated_subnet = body['subnet']
self.assertEqual(updated_subnet['name'], new_subnet)
- # Deletes subnet and network
+ # Delete subnet and network
resp, body = self.client.delete_subnet(subnet_id)
self.assertEqual('204', resp['status'])
resp, body = self.client.delete_network(net_id)
@@ -116,6 +129,7 @@
def test_list_networks(self):
# Verify the network exists in the list of all networks
resp, body = self.client.list_networks()
+ self.assertEqual('200', resp['status'])
networks = body['networks']
found = None
for n in networks:
@@ -137,6 +151,7 @@
def test_list_subnets(self):
# Verify the subnet exists in the list of all subnets
resp, body = self.client.list_subnets()
+ self.assertEqual('200', resp['status'])
subnets = body['subnets']
found = None
for n in subnets:
@@ -147,7 +162,7 @@
@attr(type='gate')
def test_create_update_delete_port(self):
- # Verify that successful port creation & deletion
+ # Verify that successful port creation, update & deletion
resp, body = self.client.create_port(self.network['id'])
self.assertEqual('201', resp['status'])
port = body['port']
@@ -162,7 +177,7 @@
self.assertEqual('204', resp['status'])
@attr(type='gate')
- def test_show_ports(self):
+ def test_show_port(self):
# Verify the details of port
resp, body = self.client.show_port(self.port['id'])
self.assertEqual('200', resp['status'])
@@ -192,3 +207,24 @@
non_exist_id = rand_name('subnet')
self.assertRaises(exceptions.NotFound, self.client.show_subnet,
non_exist_id)
+
+ @attr(type='gate')
+ def test_bulk_create_delete_network(self):
+ # Creates 2 networks in one request
+ network_names = [rand_name('network-'), rand_name('network-')]
+ resp, body = self.client.create_bulk_network(2, network_names)
+ created_networks = body['networks']
+ self.assertEqual('201', resp['status'])
+ self.addCleanup(self._delete_networks, created_networks)
+ # Asserting that the networks are found in the list after creation
+ resp, body = self.client.list_networks()
+ networks_list = list()
+ for network in body['networks']:
+ networks_list.append(network['id'])
+ for n in created_networks:
+ self.assertIsNotNone(n['id'])
+ self.assertIn(n['id'], networks_list)
+
+
+class NetworksTestXML(NetworksTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/network/test_quotas.py b/tempest/api/network/test_quotas.py
index ba70f34..b49cbe8 100644
--- a/tempest/api/network/test_quotas.py
+++ b/tempest/api/network/test_quotas.py
@@ -23,6 +23,7 @@
class QuotasTest(base.BaseNetworkTest):
+ _interface = 'json'
"""
Tests the following operations in the Neutron API using the REST client for
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
new file mode 100644
index 0000000..4f687b0
--- /dev/null
+++ b/tempest/api/network/test_routers.py
@@ -0,0 +1,134 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+class RoutersTest(base.BaseNetworkTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(RoutersTest, cls).setUpClass()
+
+ def _delete_router(self, router_id):
+ resp, _ = self.client.delete_router(router_id)
+ self.assertEqual(204, resp.status)
+ # Asserting that the router is not found in the list
+ # after deletion
+ resp, list_body = self.client.list_routers()
+ self.assertEqual('200', resp['status'])
+ routers_list = list()
+ for router in list_body['routers']:
+ routers_list.append(router['id'])
+ self.assertNotIn(router_id, routers_list)
+
+ def _remove_router_interface_with_subnet_id(self, router_id, subnet_id):
+ resp, _ = self.client.remove_router_interface_with_subnet_id(
+ router_id, subnet_id)
+ self.assertEqual('200', resp['status'])
+
+ def _remove_router_interface_with_port_id(self, router_id, port_id):
+ resp, _ = self.client.remove_router_interface_with_port_id(
+ router_id, port_id)
+ self.assertEqual('200', resp['status'])
+
+ @attr(type='gate')
+ def test_create_show_list_update_delete_router(self):
+ # Create a router
+ name = rand_name('router-')
+ resp, create_body = self.client.create_router(
+ name, external_gateway_info={
+ "network_id": self.network_cfg.public_network_id},
+ admin_state_up=False)
+ self.assertEqual('201', resp['status'])
+ self.addCleanup(self._delete_router, create_body['router']['id'])
+ self.assertEqual(create_body['router']['name'], name)
+ self.assertEqual(
+ create_body['router']['external_gateway_info']['network_id'],
+ self.network_cfg.public_network_id)
+ self.assertEqual(create_body['router']['admin_state_up'], False)
+ # Show details of the created router
+ resp, show_body = self.client.show_router(
+ create_body['router']['id'])
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(show_body['router']['name'], name)
+ self.assertEqual(
+ show_body['router']['external_gateway_info']['network_id'],
+ self.network_cfg.public_network_id)
+ self.assertEqual(show_body['router']['admin_state_up'], False)
+ # List routers and verify if created router is there in response
+ resp, list_body = self.client.list_routers()
+ self.assertEqual('200', resp['status'])
+ routers_list = list()
+ for router in list_body['routers']:
+ routers_list.append(router['id'])
+ self.assertIn(create_body['router']['id'], routers_list)
+ # Update the name of router and verify if it is updated
+ updated_name = 'updated ' + name
+ resp, update_body = self.client.update_router(
+ create_body['router']['id'], name=updated_name)
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(update_body['router']['name'], updated_name)
+ resp, show_body = self.client.show_router(
+ create_body['router']['id'])
+ self.assertEqual(show_body['router']['name'], updated_name)
+
+ @attr(type='gate')
+ def test_add_remove_router_interface_with_subnet_id(self):
+ network = self.create_network()
+ subnet = self.create_subnet(network)
+ name = rand_name('router-')
+ resp, create_body = self.client.create_router(name)
+ self.addCleanup(self.client.delete_router, create_body['router']['id'])
+ # Add router interafce with subnet id
+ resp, interface = self.client.add_router_interface_with_subnet_id(
+ create_body['router']['id'], subnet['id'])
+ self.assertEqual('200', resp['status'])
+ self.addCleanup(self._remove_router_interface_with_subnet_id,
+ create_body['router']['id'], subnet['id'])
+ self.assertTrue('subnet_id' in interface.keys())
+ self.assertTrue('port_id' in interface.keys())
+ # Verify router id is equal to device id in port details
+ resp, show_port_body = self.client.show_port(
+ interface['port_id'])
+ self.assertEqual(show_port_body['port']['device_id'],
+ create_body['router']['id'])
+
+ @attr(type='gate')
+ def test_add_remove_router_interface_with_port_id(self):
+ network = self.create_network()
+ self.create_subnet(network)
+ name = rand_name('router-')
+ resp, create_body = self.client.create_router(name)
+ self.addCleanup(self.client.delete_router, create_body['router']['id'])
+ resp, port_body = self.client.create_port(network['id'])
+ # add router interface to port created above
+ resp, interface = self.client.add_router_interface_with_port_id(
+ create_body['router']['id'], port_body['port']['id'])
+ self.assertEqual('200', resp['status'])
+ self.addCleanup(self._remove_router_interface_with_port_id,
+ create_body['router']['id'], port_body['port']['id'])
+ self.assertTrue('subnet_id' in interface.keys())
+ self.assertTrue('port_id' in interface.keys())
+ # Verify router id is equal to device id in port details
+ resp, show_port_body = self.client.show_port(
+ interface['port_id'])
+ self.assertEqual(show_port_body['port']['device_id'],
+ create_body['router']['id'])
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 820328c..e6e8d17 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -18,6 +18,7 @@
from tempest.api.identity.base import DataGenerator
from tempest import clients
+from tempest.common import isolated_creds
from tempest import exceptions
import tempest.test
@@ -30,16 +31,41 @@
if not cls.config.service_available.swift:
skip_msg = ("%s skipped as swift is not available" % cls.__name__)
raise cls.skipException(skip_msg)
- cls.os = clients.Manager()
+ cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__)
+ if cls.config.compute.allow_tenant_isolation:
+ # Get isolated creds for normal user
+ creds = cls.isolated_creds.get_primary_creds()
+ username, tenant_name, password = creds
+ cls.os = clients.Manager(username=username,
+ password=password,
+ tenant_name=tenant_name)
+ # Get isolated creds for admin user
+ admin_creds = cls.isolated_creds.get_admin_creds()
+ admin_username, admin_tenant_name, admin_password = admin_creds
+ cls.os_admin = clients.Manager(username=admin_username,
+ password=admin_password,
+ tenant_name=admin_tenant_name)
+ # Get isolated creds for alt user
+ alt_creds = cls.isolated_creds.get_alt_creds()
+ alt_username, alt_tenant, alt_password = alt_creds
+ cls.os_alt = clients.Manager(username=alt_username,
+ password=alt_password,
+ tenant_name=alt_tenant)
+ # Add isolated users to operator role so that they can create a
+ # container in swift.
+ cls._assign_member_role()
+ else:
+ cls.os = clients.Manager()
+ cls.os_admin = clients.AdminManager()
+ cls.os_alt = clients.AltManager()
+
cls.object_client = cls.os.object_client
cls.container_client = cls.os.container_client
cls.account_client = cls.os.account_client
cls.custom_object_client = cls.os.custom_object_client
- cls.os_admin = clients.AdminManager()
cls.token_client = cls.os_admin.token_client
cls.identity_admin_client = cls.os_admin.identity_client
cls.custom_account_client = cls.os.custom_account_client
- cls.os_alt = clients.AltManager()
cls.object_client_alt = cls.os_alt.object_client
cls.container_client_alt = cls.os_alt.container_client
cls.identity_client_alt = cls.os_alt.identity_client
@@ -47,6 +73,22 @@
cls.data = DataGenerator(cls.identity_admin_client)
@classmethod
+ def _assign_member_role(cls):
+ primary_user = cls.isolated_creds.get_primary_user()
+ alt_user = cls.isolated_creds.get_alt_user()
+ swift_role = cls.config.object_storage.operator_role
+ try:
+ resp, roles = cls.os_admin.identity_client.list_roles()
+ role = next(r for r in roles if r['name'] == swift_role)
+ except StopIteration:
+ msg = "No role named %s found" % swift_role
+ raise exceptions.NotFound(msg)
+ for user in [primary_user, alt_user]:
+ cls.os_admin.identity_client.assign_user_role(user['tenantId'],
+ user['id'],
+ role['id'])
+
+ @classmethod
def delete_containers(cls, containers, container_client=None,
object_client=None):
"""Remove given containers and all objects in them.
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index bc050dc..65fe1ac 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -75,22 +75,27 @@
cls.data.test_password,
cls.data.test_tenant)
- headers = {"X-Auth-Token": cls.reselleradmin_token,
+ def setUp(self):
+ super(AccountQuotasTest, self).setUp()
+
+ # Set a quota of 20 bytes on the user's account before each test
+ headers = {"X-Auth-Token": self.reselleradmin_token,
"X-Account-Meta-Quota-Bytes": "20"}
- cls.os.custom_account_client.request("POST", "", headers, "")
+ self.os.custom_account_client.request("POST", "", headers, "")
+
+ def tearDown(self):
+ # remove the quota from the container
+ headers = {"X-Auth-Token": self.reselleradmin_token,
+ "X-Remove-Account-Meta-Quota-Bytes": "x"}
+
+ self.os.custom_account_client.request("POST", "", headers, "")
+ super(AccountQuotasTest, self).tearDown()
@classmethod
def tearDownClass(cls):
cls.delete_containers([cls.container_name])
cls.data.teardown_all()
-
- # remove the quota from the container
- headers = {"X-Auth-Token": cls.reselleradmin_token,
- "X-Remove-Account-Meta-Quota-Bytes": "x"}
-
- cls.os.custom_account_client.request("POST", "", headers, "")
-
super(AccountQuotasTest, cls).tearDownClass()
@testtools.skipIf(not accounts_quotas_available,
@@ -113,3 +118,45 @@
self.assertRaises(exceptions.OverLimit,
self.object_client.create_object,
self.container_name, object_name, data)
+
+ @testtools.skipIf(not accounts_quotas_available,
+ "Account Quotas middleware not available")
+ @attr(type=["smoke"])
+ def test_admin_modify_quota(self):
+ """Test that the ResellerAdmin is able to modify and remove the quota
+ on a user's account.
+
+ Using the custom_account client, the test modifies the quota
+ successively to:
+
+ * "25": a random value different from the initial quota value.
+ * "" : an empty value, equivalent to the removal of the quota.
+ * "20": set the quota to its initial value.
+ """
+ for quota in ("25", "", "20"):
+
+ headers = {"X-Auth-Token": self.reselleradmin_token,
+ "X-Account-Meta-Quota-Bytes": quota}
+
+ resp, _ = self.os.custom_account_client.request("POST", "",
+ headers, "")
+
+ self.assertEqual(resp["status"], "204")
+
+ @testtools.skipIf(not accounts_quotas_available,
+ "Account Quotas middleware not available")
+ @attr(type=["negative", "smoke"])
+ def test_user_modify_quota(self):
+ """Test that a user is not able to modify or remove a quota on
+ its account.
+ """
+
+ # Not able to remove quota
+ self.assertRaises(exceptions.Unauthorized,
+ self.account_client.create_account_metadata,
+ {"Quota-Bytes": ""})
+
+ # Not able to modify quota
+ self.assertRaises(exceptions.Unauthorized,
+ self.account_client.create_account_metadata,
+ {"Quota-Bytes": "100"})
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index c599562..dd724c7 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -211,24 +211,18 @@
object_name,
orig_metadata)
self.assertIn(int(resp['status']), HTTP_SUCCESS)
- try:
- # copy object from source container to destination container
- resp, _ = self.object_client.copy_object_across_containers(
- src_container_name, object_name, dst_container_name,
- object_name)
- self.assertEqual(resp['status'], '201')
-
- # check if object is present in destination container
- resp, body = self.object_client.get_object(dst_container_name,
- object_name)
- self.assertEqual(body, data)
- actual_meta_key = 'x-object-meta-' + meta_key
- self.assertTrue(actual_meta_key in resp)
- self.assertEqual(resp[actual_meta_key], meta_value)
-
- except Exception as e:
- self.fail("Got exception :%s ; while copying"
- " object across containers" % e)
+ # copy object from source container to destination container
+ resp, _ = self.object_client.copy_object_across_containers(
+ src_container_name, object_name, dst_container_name,
+ object_name)
+ self.assertEqual(resp['status'], '201')
+ # check if object is present in destination container
+ resp, body = self.object_client.get_object(dst_container_name,
+ object_name)
+ self.assertEqual(body, data)
+ actual_meta_key = 'x-object-meta-' + meta_key
+ self.assertTrue(actual_meta_key in resp)
+ self.assertEqual(resp[actual_meta_key], meta_value)
@attr(type='gate')
def test_get_object_using_temp_url(self):
@@ -367,36 +361,32 @@
def test_access_public_object_with_another_user_creds(self):
# make container public-readable and access an object in it using
# another user's credentials
- try:
- cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
- resp_meta, body = self.container_client.update_container_metadata(
- self.container_name, metadata=cont_headers,
- metadata_prefix='')
- self.assertIn(int(resp_meta['status']), HTTP_SUCCESS)
- # create object
- object_name = rand_name(name='Object')
- data = arbitrary_string(size=len(object_name) * 1,
- base_text=object_name)
- resp, _ = self.object_client.create_object(self.container_name,
- object_name, data)
- self.assertEqual(resp['status'], '201')
+ cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
+ resp_meta, body = self.container_client.update_container_metadata(
+ self.container_name, metadata=cont_headers,
+ metadata_prefix='')
+ self.assertIn(int(resp_meta['status']), HTTP_SUCCESS)
- # list container metadata
- resp, _ = self.container_client.list_container_metadata(
- self.container_name)
- self.assertIn(int(resp['status']), HTTP_SUCCESS)
- self.assertIn('x-container-read', resp)
- self.assertEqual(resp['x-container-read'], '.r:*,.rlistings')
+ # create object
+ object_name = rand_name(name='Object')
+ data = arbitrary_string(size=len(object_name) * 1,
+ base_text=object_name)
+ resp, _ = self.object_client.create_object(self.container_name,
+ object_name, data)
+ self.assertEqual(resp['status'], '201')
- # get auth token of alternative user
- token = self.identity_client_alt.get_auth()
- headers = {'X-Auth-Token': token}
- # access object using alternate user creds
- resp, body = self.custom_object_client.get_object(
- self.container_name, object_name,
- metadata=headers)
- self.assertEqual(body, data)
+ # list container metadata
+ resp, _ = self.container_client.list_container_metadata(
+ self.container_name)
+ self.assertIn(int(resp['status']), HTTP_SUCCESS)
+ self.assertIn('x-container-read', resp)
+ self.assertEqual(resp['x-container-read'], '.r:*,.rlistings')
- except Exception as e:
- self.fail("Failed to get public readable object with another"
- " user creds raised exception is %s" % e)
+ # get auth token of alternative user
+ token = self.identity_client_alt.get_auth()
+ headers = {'X-Auth-Token': token}
+ # access object using alternate user creds
+ resp, body = self.custom_object_client.get_object(
+ self.container_name, object_name,
+ metadata=headers)
+ self.assertEqual(body, data)
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 745dd87..2a72c95 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -89,8 +89,8 @@
pass
@classmethod
- def _create_keypair(cls, namestart='keypair-heat-'):
- kp_name = rand_name(namestart)
+ def _create_keypair(cls, name_start='keypair-heat-'):
+ kp_name = rand_name(name_start)
resp, body = cls.keypairs_client.create_keypair(kp_name)
cls.keypairs.append(kp_name)
return body
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
new file mode 100644
index 0000000..c934020
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -0,0 +1,211 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest import clients
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+LOG = logging.getLogger(__name__)
+
+
+class NeutronResourcesTestJSON(base.BaseOrchestrationTest):
+ _interface = 'json'
+
+ template = """
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+ Template which creates single EC2 instance
+Parameters:
+ KeyName:
+ Type: String
+ InstanceType:
+ Type: String
+ ImageId:
+ Type: String
+ ExternalRouterId:
+ Type: String
+Resources:
+ Network:
+ Type: OS::Quantum::Net
+ Properties: {name: NewNetwork}
+ Subnet:
+ Type: OS::Quantum::Subnet
+ Properties:
+ network_id: {Ref: Network}
+ name: NewSubnet
+ ip_version: 4
+ cidr: 10.0.3.0/24
+ dns_nameservers: ["8.8.8.8"]
+ allocation_pools:
+ - {end: 10.0.3.150, start: 10.0.3.20}
+ RouterInterface:
+ Type: OS::Quantum::RouterInterface
+ Properties:
+ router_id: {Ref: ExternalRouterId}
+ subnet_id: {Ref: Subnet}
+ Server:
+ Type: AWS::EC2::Instance
+ Metadata:
+ Name: SmokeServer
+ Properties:
+ ImageId: {Ref: ImageId}
+ InstanceType: {Ref: InstanceType}
+ KeyName: {Ref: KeyName}
+ SubnetId: {Ref: Subnet}
+ UserData:
+ Fn::Base64:
+ Fn::Join:
+ - ''
+ - - '#!/bin/bash -v
+
+ '
+ - /opt/aws/bin/cfn-signal -e 0 -r "SmokeServer created" '
+ - {Ref: WaitHandle}
+ - '''
+
+ '
+ WaitHandle:
+ Type: AWS::CloudFormation::WaitConditionHandle
+ WaitCondition:
+ Type: AWS::CloudFormation::WaitCondition
+ DependsOn: Server
+ Properties:
+ Handle: {Ref: WaitHandle}
+ Timeout: '600'
+"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(NeutronResourcesTestJSON, cls).setUpClass()
+ if not cls.orchestration_cfg.image_ref:
+ raise cls.skipException("No image available to test")
+ cls.client = cls.orchestration_client
+ os = clients.Manager()
+ cls.network_cfg = os.config.network
+ if not cls.config.service_available.neutron:
+ raise cls.skipException("Neutron support is required")
+ cls.network_client = os.network_client
+ cls.stack_name = rand_name('heat')
+ cls.keypair_name = (cls.orchestration_cfg.keypair_name or
+ cls._create_keypair()['name'])
+ cls.external_router_id = cls._get_external_router_id()
+
+ # create the stack
+ cls.stack_identifier = cls.create_stack(
+ cls.stack_name,
+ cls.template,
+ parameters={
+ 'KeyName': cls.keypair_name,
+ 'InstanceType': cls.orchestration_cfg.instance_type,
+ 'ImageId': cls.orchestration_cfg.image_ref,
+ 'ExternalRouterId': cls.external_router_id
+ })
+ cls.stack_id = cls.stack_identifier.split('/')[1]
+ cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
+ _, resources = cls.client.list_resources(cls.stack_identifier)
+ cls.test_resources = {}
+ for resource in resources:
+ cls.test_resources[resource['logical_resource_id']] = resource
+
+ @classmethod
+ def _get_external_router_id(cls):
+ resp, body = cls.network_client.list_ports()
+ ports = body['ports']
+ router_ports = filter(lambda port: port['device_owner'] ==
+ 'network:router_interface', ports)
+ return router_ports[0]['device_id']
+
+ @attr(type='slow')
+ def test_created_resources(self):
+ """Verifies created neutron resources."""
+ resources = [('Network', 'OS::Quantum::Net'),
+ ('Subnet', 'OS::Quantum::Subnet'),
+ ('RouterInterface', 'OS::Quantum::RouterInterface'),
+ ('Server', 'AWS::EC2::Instance')]
+ for resource_name, resource_type in resources:
+ resource = self.test_resources.get(resource_name, None)
+ self.assertIsInstance(resource, dict)
+ self.assertEqual(resource_name, resource['logical_resource_id'])
+ self.assertEqual(resource_type, resource['resource_type'])
+ self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
+
+ @attr(type='slow')
+ def test_created_network(self):
+ """Verifies created netowrk."""
+ network_id = self.test_resources.get('Network')['physical_resource_id']
+ resp, body = self.network_client.show_network(network_id)
+ self.assertEqual('200', resp['status'])
+ network = body['network']
+ self.assertIsInstance(network, dict)
+ self.assertEqual(network_id, network['id'])
+ self.assertEqual('NewNetwork', network['name'])
+
+ @attr(type='slow')
+ def test_created_subnet(self):
+ """Verifies created subnet."""
+ subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
+ resp, body = self.network_client.show_subnet(subnet_id)
+ self.assertEqual('200', resp['status'])
+ subnet = body['subnet']
+ network_id = self.test_resources.get('Network')['physical_resource_id']
+ self.assertEqual(subnet_id, subnet['id'])
+ self.assertEqual(network_id, subnet['network_id'])
+ self.assertEqual('NewSubnet', subnet['name'])
+ self.assertEqual('8.8.8.8', subnet['dns_nameservers'][0])
+ self.assertEqual('10.0.3.20', subnet['allocation_pools'][0]['start'])
+ self.assertEqual('10.0.3.150', subnet['allocation_pools'][0]['end'])
+ self.assertEqual(4, subnet['ip_version'])
+ self.assertEqual('10.0.3.0/24', subnet['cidr'])
+
+ @attr(type='slow')
+ def test_created_router_interface(self):
+ """Verifies created router interface."""
+ network_id = self.test_resources.get('Network')['physical_resource_id']
+ subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
+ resp, body = self.network_client.list_ports()
+ self.assertEqual('200', resp['status'])
+ ports = body['ports']
+ router_ports = filter(lambda port: port['device_id'] ==
+ self.external_router_id, ports)
+ created_network_ports = filter(lambda port: port['network_id'] ==
+ network_id, router_ports)
+ self.assertEqual(1, len(created_network_ports))
+ router_interface = created_network_ports[0]
+ fixed_ips = router_interface['fixed_ips']
+ subnet_fixed_ips = filter(lambda port: port['subnet_id'] ==
+ subnet_id, fixed_ips)
+ self.assertEqual(1, len(subnet_fixed_ips))
+ router_interface_ip = subnet_fixed_ips[0]['ip_address']
+ self.assertEqual('10.0.3.1', router_interface_ip)
+
+ @attr(type='slow')
+ def test_created_server(self):
+ """Verifies created sever."""
+ server_id = self.test_resources.get('Server')['physical_resource_id']
+ resp, server = self.servers_client.get_server(server_id)
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(self.keypair_name, server['key_name'])
+ self.assertEqual('ACTIVE', server['status'])
+ network = server['addresses']['NewNetwork'][0]
+ self.assertEqual(4, network['version'])
+ ip_addr_prefix = network['addr'][:7]
+ ip_addr_suffix = int(network['addr'].split('.')[3])
+ self.assertEqual('10.0.3.', ip_addr_prefix)
+ self.assertTrue(ip_addr_suffix >= 20)
+ self.assertTrue(ip_addr_suffix <= 150)
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
new file mode 100644
index 0000000..defb910
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -0,0 +1,169 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+LOG = logging.getLogger(__name__)
+
+
+class StacksTestJSON(base.BaseOrchestrationTest):
+ _interface = 'json'
+
+ template = """
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+ Template which creates single EC2 instance
+Parameters:
+ KeyName:
+ Type: String
+ InstanceType:
+ Type: String
+ ImageId:
+ Type: String
+Resources:
+ SmokeServer:
+ Type: AWS::EC2::Instance
+ Metadata:
+ Name: SmokeServer
+ Properties:
+ ImageId: {Ref: ImageId}
+ InstanceType: {Ref: InstanceType}
+ KeyName: {Ref: KeyName}
+ UserData:
+ Fn::Base64:
+ Fn::Join:
+ - ''
+ - - '#!/bin/bash -v
+
+ '
+ - /opt/aws/bin/cfn-signal -e 0 -r "SmokeServer created" '
+ - {Ref: WaitHandle}
+ - '''
+
+ '
+ WaitHandle:
+ Type: AWS::CloudFormation::WaitConditionHandle
+ WaitCondition:
+ Type: AWS::CloudFormation::WaitCondition
+ DependsOn: SmokeServer
+ Properties:
+ Handle: {Ref: WaitHandle}
+ Timeout: '600'
+"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(StacksTestJSON, cls).setUpClass()
+ if not cls.orchestration_cfg.image_ref:
+ raise cls.skipException("No image available to test")
+ cls.client = cls.orchestration_client
+ cls.stack_name = rand_name('heat')
+ keypair_name = (cls.orchestration_cfg.keypair_name or
+ cls._create_keypair()['name'])
+
+ # create the stack
+ cls.stack_identifier = cls.create_stack(
+ cls.stack_name,
+ cls.template,
+ parameters={
+ 'KeyName': keypair_name,
+ 'InstanceType': cls.orchestration_cfg.instance_type,
+ 'ImageId': cls.orchestration_cfg.image_ref
+ })
+ cls.stack_id = cls.stack_identifier.split('/')[1]
+ cls.resource_name = 'SmokeServer'
+ cls.resource_type = 'AWS::EC2::Instance'
+ cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
+
+ @attr(type='slow')
+ def test_stack_list(self):
+ """Created stack should be on the list of existing stacks."""
+ resp, stacks = self.client.list_stacks()
+ self.assertEqual('200', resp['status'])
+ self.assertIsInstance(stacks, list)
+ stacks_names = map(lambda stack: stack['stack_name'], stacks)
+ self.assertIn(self.stack_name, stacks_names)
+
+ @attr(type='slow')
+ def test_stack_show(self):
+ """Getting details about created stack should be possible."""
+ resp, stack = self.client.get_stack(self.stack_name)
+ self.assertEqual('200', resp['status'])
+ self.assertIsInstance(stack, dict)
+ self.assertEqual(self.stack_name, stack['stack_name'])
+ self.assertEqual(self.stack_id, stack['id'])
+
+ @attr(type='slow')
+ def test_list_resources(self):
+ """Getting list of created resources for the stack should be possible.
+ """
+ resp, resources = self.client.list_resources(self.stack_identifier)
+ self.assertEqual('200', resp['status'])
+ self.assertIsInstance(resources, list)
+ resources_names = map(lambda resource: resource['logical_resource_id'],
+ resources)
+ self.assertIn(self.resource_name, resources_names)
+ resources_types = map(lambda resource: resource['resource_type'],
+ resources)
+ self.assertIn(self.resource_type, resources_types)
+
+ @attr(type='slow')
+ def test_show_resource(self):
+ """Getting details about created resource should be possible."""
+ resp, resource = self.client.get_resource(self.stack_identifier,
+ self.resource_name)
+ self.assertIsInstance(resource, dict)
+ self.assertEqual(self.resource_name, resource['logical_resource_id'])
+ self.assertEqual(self.resource_type, resource['resource_type'])
+
+ @attr(type='slow')
+ def test_resource_metadata(self):
+ """Getting metadata for created resource should be possible."""
+ resp, metadata = self.client.show_resource_metadata(
+ self.stack_identifier,
+ self.resource_name)
+ self.assertEqual('200', resp['status'])
+ self.assertIsInstance(metadata, dict)
+ self.assertEqual(self.resource_name, metadata.get('Name', None))
+
+ @attr(type='slow')
+ def test_list_events(self):
+ """Getting list of created events for the stack should be possible."""
+ resp, events = self.client.list_events(self.stack_identifier)
+ self.assertEqual('200', resp['status'])
+ self.assertIsInstance(events, list)
+ resource_statuses = map(lambda event: event['resource_status'], events)
+ self.assertIn('CREATE_IN_PROGRESS', resource_statuses)
+ self.assertIn('CREATE_COMPLETE', resource_statuses)
+
+ @attr(type='slow')
+ def test_show_event(self):
+ """Getting details about existing event should be possible."""
+ resp, events = self.client.list_resource_events(self.stack_identifier,
+ self.resource_name)
+ self.assertNotEqual([], events)
+ events.sort(key=lambda event: event['event_time'])
+ event_id = events[0]['id']
+ resp, event = self.client.show_event(self.stack_identifier,
+ self.resource_name, event_id)
+ self.assertEqual('200', resp['status'])
+ self.assertEqual('CREATE_IN_PROGRESS', event['resource_status'])
+ self.assertEqual('state changed', event['resource_status_reason'])
+ self.assertEqual(self.resource_name, event['logical_resource_id'])
+ self.assertIsInstance(event, dict)
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index f1f1f7e..4bda5ab 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -33,8 +33,7 @@
@attr(type='smoke')
def test_stack_list_responds(self):
- resp, body = self.client.list_stacks()
- stacks = body['stacks']
+ resp, stacks = self.client.list_stacks()
self.assertEqual('200', resp['status'])
self.assertIsInstance(stacks, list)
@@ -42,9 +41,6 @@
def test_stack_crud_no_resources(self):
stack_name = rand_name('heat')
- # count how many stacks to start with
- resp, body = self.client.list_stacks()
-
# create the stack
stack_identifier = self.create_stack(
stack_name, self.empty_template)
@@ -54,21 +50,21 @@
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
# check for stack in list
- resp, body = self.client.list_stacks()
- list_ids = list([stack['id'] for stack in body['stacks']])
+ resp, stacks = self.client.list_stacks()
+ list_ids = list([stack['id'] for stack in stacks])
self.assertIn(stack_id, list_ids)
# fetch the stack
- resp, body = self.client.get_stack(stack_identifier)
- self.assertEqual('CREATE_COMPLETE', body['stack_status'])
+ resp, stack = self.client.get_stack(stack_identifier)
+ self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
# fetch the stack by name
- resp, body = self.client.get_stack(stack_name)
- self.assertEqual('CREATE_COMPLETE', body['stack_status'])
+ resp, stack = self.client.get_stack(stack_name)
+ self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
# fetch the stack by id
- resp, body = self.client.get_stack(stack_id)
- self.assertEqual('CREATE_COMPLETE', body['stack_status'])
+ resp, stack = self.client.get_stack(stack_id)
+ self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
# delete the stack
resp = self.client.delete_stack(stack_identifier)
diff --git a/tempest/api/orchestration/stacks/test_templates.py b/tempest/api/orchestration/stacks/test_templates.py
new file mode 100644
index 0000000..6a7c541
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_templates.py
@@ -0,0 +1,86 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+from tempest.test import attr
+
+
+LOG = logging.getLogger(__name__)
+
+
+class TemplateYAMLTestJSON(base.BaseOrchestrationTest):
+ _interface = 'json'
+
+ template = """
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+ Template which creates only a new user
+Resources:
+ CfnUser:
+ Type: AWS::IAM::User
+"""
+
+ invalid_template_url = 'http://www.example.com/template.yaml'
+
+ @classmethod
+ def setUpClass(cls):
+ super(TemplateYAMLTestJSON, cls).setUpClass()
+ cls.client = cls.orchestration_client
+ cls.stack_name = rand_name('heat')
+ cls.stack_identifier = cls.create_stack(cls.stack_name, cls.template)
+ cls.client.wait_for_stack_status(cls.stack_identifier,
+ 'CREATE_COMPLETE')
+ cls.stack_id = cls.stack_identifier.split('/')[1]
+ cls.parameters = {}
+
+ @attr(type='gate')
+ def test_show_template(self):
+ """Getting template used to create the stack."""
+ resp, template = self.client.show_template(self.stack_identifier)
+ self.assertEqual('200', resp['status'])
+
+ @attr(type='gate')
+ def test_validate_template(self):
+ """Validating template passing it content."""
+ resp, parameters = self.client.validate_template(self.template,
+ self.parameters)
+ self.assertEqual('200', resp['status'])
+
+ @attr(type=['gate', 'negative'])
+ def test_validate_template_url(self):
+ """Validating template passing url to it."""
+ self.assertRaises(exceptions.BadRequest,
+ self.client.validate_template_url,
+ template_url=self.invalid_template_url,
+ parameters=self.parameters)
+
+
+class TemplateAWSTestJSON(TemplateYAMLTestJSON):
+ template = """
+{
+ "AWSTemplateFormatVersion" : "2010-09-09",
+ "Description" : "Template which creates only a new user",
+ "Resources" : {
+ "CfnUser" : {
+ "Type" : "AWS::IAM::User"
+ }
+ }
+}
+"""
+
+ invalid_template_url = 'http://www.example.com/template.template'
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 5861497..9fa86b6 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -18,6 +18,7 @@
from tempest.api.volume.base import BaseVolumeTest
from tempest.common.utils.data_utils import rand_name
from tempest.test import attr
+from tempest.test import stresstest
class VolumesActionsTest(BaseVolumeTest):
@@ -52,24 +53,21 @@
super(VolumesActionsTest, cls).tearDownClass()
+ @stresstest(class_setup_per='process')
@attr(type='smoke')
def test_attach_detach_volume_to_instance(self):
# Volume is attached and detached successfully from an instance
- try:
- mountpoint = '/dev/vdc'
- resp, body = self.client.attach_volume(self.volume['id'],
- self.server['id'],
- mountpoint)
- self.assertEqual(202, resp.status)
- self.client.wait_for_volume_status(self.volume['id'], 'in-use')
- except Exception:
- self.fail("Could not attach volume to instance")
- finally:
- # Detach the volume from the instance
- resp, body = self.client.detach_volume(self.volume['id'])
- self.assertEqual(202, resp.status)
- self.client.wait_for_volume_status(self.volume['id'], 'available')
+ mountpoint = '/dev/vdc'
+ resp, body = self.client.attach_volume(self.volume['id'],
+ self.server['id'],
+ mountpoint)
+ self.assertEqual(202, resp.status)
+ self.client.wait_for_volume_status(self.volume['id'], 'in-use')
+ resp, body = self.client.detach_volume(self.volume['id'])
+ self.assertEqual(202, resp.status)
+ self.client.wait_for_volume_status(self.volume['id'], 'available')
+ @stresstest(class_setup_per='process')
@attr(type='gate')
def test_get_volume_attachment(self):
# Verify that a volume's attachment information is retrieved
@@ -77,22 +75,22 @@
resp, body = self.client.attach_volume(self.volume['id'],
self.server['id'],
mountpoint)
- self.client.wait_for_volume_status(self.volume['id'], 'in-use')
self.assertEqual(202, resp.status)
- try:
- resp, volume = self.client.get_volume(self.volume['id'])
- self.assertEqual(200, resp.status)
- self.assertIn('attachments', volume)
- attachment = volume['attachments'][0]
- self.assertEqual(mountpoint, attachment['device'])
- self.assertEqual(self.server['id'], attachment['server_id'])
- self.assertEqual(self.volume['id'], attachment['id'])
- self.assertEqual(self.volume['id'], attachment['volume_id'])
- except Exception:
- self.fail("Could not get attachment details from volume")
- finally:
- self.client.detach_volume(self.volume['id'])
- self.client.wait_for_volume_status(self.volume['id'], 'available')
+ self.client.wait_for_volume_status(self.volume['id'], 'in-use')
+ # NOTE(gfidente): added in reverse order because functions will be
+ # called in reverse order to the order they are added (LIFO)
+ self.addCleanup(self.client.wait_for_volume_status,
+ self.volume['id'],
+ 'available')
+ self.addCleanup(self.client.detach_volume, self.volume['id'])
+ resp, volume = self.client.get_volume(self.volume['id'])
+ self.assertEqual(200, resp.status)
+ self.assertIn('attachments', volume)
+ attachment = volume['attachments'][0]
+ self.assertEqual(mountpoint, attachment['device'])
+ self.assertEqual(self.server['id'], attachment['server_id'])
+ self.assertEqual(self.volume['id'], attachment['id'])
+ self.assertEqual(self.volume['id'], attachment['volume_id'])
@attr(type='gate')
def test_volume_upload(self):
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index f04d23f..cbb8d08 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -93,7 +93,7 @@
def cmd_with_auth(self, cmd, action, flags='', params='',
admin=True, fail_ok=False):
"""Executes given command with auth attributes appended."""
- #TODO(jogo) make admin=False work
+ # TODO(jogo) make admin=False work
creds = ('--os-username %s --os-tenant-name %s --os-password %s '
'--os-auth-url %s ' % (self.identity.admin_username,
self.identity.admin_tenant_name, self.identity.admin_password,
@@ -101,31 +101,30 @@
flags = creds + ' ' + flags
return self.cmd(cmd, action, flags, params, fail_ok)
- def check_output(self, cmd, **kwargs):
- # substitutes subprocess.check_output which is not in python2.6
- kwargs['stdout'] = subprocess.PIPE
- proc = subprocess.Popen(cmd, **kwargs)
- output = proc.communicate()[0]
- if proc.returncode != 0:
- raise CommandFailed(proc.returncode, cmd, output)
- return output
-
def cmd(self, cmd, action, flags='', params='', fail_ok=False,
merge_stderr=False):
"""Executes specified command for the given action."""
cmd = ' '.join([os.path.join(CONF.cli.cli_dir, cmd),
flags, action, params])
LOG.info("running: '%s'" % cmd)
+ cmd_str = cmd
cmd = shlex.split(cmd)
+ result = ''
+ result_err = ''
try:
- if merge_stderr:
- result = self.check_output(cmd, stderr=subprocess.STDOUT)
- else:
- with open('/dev/null', 'w') as devnull:
- result = self.check_output(cmd, stderr=devnull)
- except subprocess.CalledProcessError as e:
- LOG.error("command output:\n%s" % e.output)
- raise
+ stdout = subprocess.PIPE
+ stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
+ proc = subprocess.Popen(
+ cmd, stdout=stdout, stderr=stderr)
+ result, result_err = proc.communicate()
+ if not fail_ok and proc.returncode != 0:
+ raise CommandFailed(proc.returncode,
+ cmd,
+ result)
+ finally:
+ LOG.debug('output of %s:\n%s' % (cmd_str, result))
+ if not merge_stderr and result_err:
+ LOG.debug('error output of %s:\n%s' % (cmd_str, result_err))
return result
def assertTableStruct(self, items, field_names):
@@ -134,6 +133,11 @@
for field in field_names:
self.assertIn(field, item)
+ def assertFirstLineStartsWith(self, lines, beginning):
+ self.assertTrue(lines[0].startswith(beginning),
+ msg=('Beginning of first line has invalid content: %s'
+ % lines[:3]))
+
class CommandFailed(subprocess.CalledProcessError):
# adds output attribute for python2.6
diff --git a/tempest/cli/output_parser.py b/tempest/cli/output_parser.py
index bfd7f9e..f22ec4e 100644
--- a/tempest/cli/output_parser.py
+++ b/tempest/cli/output_parser.py
@@ -133,6 +133,10 @@
if not isinstance(output_lines, list):
output_lines = output_lines.split('\n')
+ if not output_lines[-1]:
+ # skip last line if empty (just newline at the end)
+ output_lines = output_lines[:-1]
+
for line in output_lines:
if delimiter_line.match(line):
columns = _table_columns(line)
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/test_cinder.py
index e9ce87b..21acae8 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/test_cinder.py
@@ -87,7 +87,7 @@
def test_admin_help(self):
help_text = self.cinder('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: cinder'))
+ self.assertFirstLineStartsWith(lines, 'usage: cinder')
commands = []
cmds_start = lines.index('Positional arguments:')
diff --git a/tempest/cli/simple_read_only/test_compute.py b/tempest/cli/simple_read_only/test_compute.py
index 4c7f604..9b358e6 100644
--- a/tempest/cli/simple_read_only/test_compute.py
+++ b/tempest/cli/simple_read_only/test_compute.py
@@ -48,7 +48,7 @@
self.nova,
'this-does-nova-exist')
- #NOTE(jogo): Commands in order listed in 'nova help'
+ # NOTE(jogo): Commands in order listed in 'nova help'
# Positional arguments:
diff --git a/tempest/cli/simple_read_only/test_compute_manage.py b/tempest/cli/simple_read_only/test_compute_manage.py
index 1848827..523c65f 100644
--- a/tempest/cli/simple_read_only/test_compute_manage.py
+++ b/tempest/cli/simple_read_only/test_compute_manage.py
@@ -41,7 +41,7 @@
self.nova_manage,
'this-does-nova-exist')
- #NOTE(jogo): Commands in order listed in 'nova-manage -h'
+ # NOTE(jogo): Commands in order listed in 'nova-manage -h'
# test flags
def test_help_flag(self):
diff --git a/tempest/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/test_glance.py
index 3d58451..d02c60b 100644
--- a/tempest/cli/simple_read_only/test_glance.py
+++ b/tempest/cli/simple_read_only/test_glance.py
@@ -48,7 +48,7 @@
def test_glance_help(self):
help_text = self.glance('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: glance'))
+ self.assertFirstLineStartsWith(lines, 'usage: glance')
commands = []
cmds_start = lines.index('Positional arguments:')
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index 4c7982b..4c1c27f 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -46,7 +46,12 @@
out = self.keystone('catalog')
catalog = self.parser.details_multiple(out, with_label=True)
for svc in catalog:
- self.assertTrue(svc['__label'].startswith('Service:'))
+ if svc.get('__label'):
+ self.assertTrue(svc['__label'].startswith('Service:'),
+ msg=('Invalid beginning of service block: '
+ '%s' % svc['__label']))
+ self.assertIn('id', svc.keys())
+ self.assertIn('region', svc.keys())
def test_admin_endpoint_list(self):
out = self.keystone('endpoint-list')
@@ -94,7 +99,7 @@
def test_admin_help(self):
help_text = self.keystone('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: keystone'))
+ self.assertFirstLineStartsWith(lines, 'usage: keystone')
commands = []
cmds_start = lines.index('Positional arguments:')
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
index 7b8340d..ae3a1a7 100644
--- a/tempest/cli/simple_read_only/test_neutron.py
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -92,7 +92,7 @@
def test_neutron_help(self):
help_text = self.neutron('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: neutron'))
+ self.assertFirstLineStartsWith(lines, 'usage: neutron')
commands = []
cmds_start = lines.index('Commands for API v2.0:')
diff --git a/tempest/clients.py b/tempest/clients.py
index 195cb89..48e4939 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -90,7 +90,8 @@
from tempest.services.identity.xml.identity_client import TokenClientXML
from tempest.services.image.v1.json.image_client import ImageClientJSON
from tempest.services.image.v2.json.image_client import ImageClientV2JSON
-from tempest.services.network.json.network_client import NetworkClient
+from tempest.services.network.json.network_client import NetworkClientJSON
+from tempest.services.network.xml.network_client import NetworkClientXML
from tempest.services.object_storage.account_client import AccountClient
from tempest.services.object_storage.account_client import \
AccountClientCustomizedHeader
@@ -116,6 +117,11 @@
"xml": ImagesClientXML,
}
+NETWORKS_CLIENTS = {
+ "json": NetworkClientJSON,
+ "xml": NetworkClientXML,
+}
+
KEYPAIRS_CLIENTS = {
"json": KeyPairsClientJSON,
"xml": KeyPairsClientXML,
@@ -295,6 +301,7 @@
try:
self.servers_client = SERVERS_CLIENTS[interface](*client_args)
+ self.network_client = NETWORKS_CLIENTS[interface](*client_args)
self.limits_client = LIMITS_CLIENTS[interface](*client_args)
if self.config.service_available.glance:
self.images_client = IMAGES_CLIENTS[interface](*client_args)
@@ -339,7 +346,6 @@
except KeyError:
msg = "Unsupported interface type `%s'" % interface
raise exceptions.InvalidConfiguration(msg)
- self.network_client = NetworkClient(*client_args)
self.hosts_client = HostsClientJSON(*client_args)
self.account_client = AccountClient(*client_args)
if self.config.service_available.glance:
diff --git a/tempest/common/http.py b/tempest/common/http.py
new file mode 100644
index 0000000..49dca18
--- /dev/null
+++ b/tempest/common/http.py
@@ -0,0 +1,27 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack, LLC
+# Copyright 2013 Citrix Systems, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import httplib2
+
+
+class ClosingHttp(httplib2.Http):
+ def request(self, *args, **kwargs):
+ original_headers = kwargs.get('headers', {})
+ new_headers = dict(original_headers, connection='close')
+ new_kwargs = dict(kwargs, headers=new_headers)
+ return super(ClosingHttp, self).request(*args, **new_kwargs)
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 4c4d61b..8dfff6e 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -18,12 +18,12 @@
import collections
import hashlib
-import httplib2
import json
from lxml import etree
import re
import time
+from tempest.common import http
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.services.compute.xml.common import xml_to_json
@@ -64,7 +64,8 @@
'retry-after', 'server',
'vary', 'www-authenticate'))
dscv = self.config.identity.disable_ssl_certificate_validation
- self.http_obj = httplib2.Http(disable_ssl_certificate_validation=dscv)
+ self.http_obj = http.ClosingHttp(
+ disable_ssl_certificate_validation=dscv)
def _set_auth(self):
"""
@@ -329,7 +330,7 @@
if (resp.status in set((204, 205, 304)) or resp.status < 200 or
method.upper() == 'HEAD') and resp_body:
raise exceptions.ResponseWithNonEmptyBody(status=resp.status)
- #NOTE(afazekas):
+ # NOTE(afazekas):
# If the HTTP Status Code is 205
# 'The response MUST NOT include an entity.'
# A HTTP entity has an entity-body and an 'entity-header'.
@@ -342,7 +343,7 @@
0 != len(set(resp.keys()) - set(('status',)) -
self.response_header_lc - self.general_header_lc)):
raise exceptions.ResponseWithEntity()
- #NOTE(afazekas)
+ # NOTE(afazekas)
# Now the swift sometimes (delete not empty container)
# returns with non json error response, we can create new rest class
# for swift.
@@ -464,8 +465,8 @@
message = resp_body
if parse_resp:
resp_body = self._parse_resp(resp_body)
- #I'm seeing both computeFault and cloudServersFault come back.
- #Will file a bug to fix, but leave as is for now.
+ # I'm seeing both computeFault and cloudServersFault come back.
+ # Will file a bug to fix, but leave as is for now.
if 'cloudServersFault' in resp_body:
message = resp_body['cloudServersFault']['message']
elif 'computeFault' in resp_body:
diff --git a/tempest/common/ssh.py b/tempest/common/ssh.py
index be350c8..2ed1057 100644
--- a/tempest/common/ssh.py
+++ b/tempest/common/ssh.py
@@ -114,9 +114,13 @@
err_data = []
poll = select.poll()
poll.register(channel, select.POLLIN)
+ start_time = time.time()
+
while True:
ready = poll.poll(self.channel_timeout)
if not any(ready):
+ if not self._is_timed_out(self.timeout, start_time):
+ continue
raise exceptions.TimeoutException(
"Command: '{0}' executed on host '{1}'.".format(
cmd, self.host))
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index de2bf43..2cbb74d 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -24,7 +24,7 @@
class RemoteClient():
- #Note(afazekas): It should always get an address instead of server
+ # NOTE(afazekas): It should always get an address instead of server
def __init__(self, server, username, password=None, pkey=None):
ssh_timeout = TempestConfig().compute.ssh_timeout
network = TempestConfig().compute.network_for_ssh
diff --git a/tempest/config.py b/tempest/config.py
index 9b1a91e..3b09b5e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -369,6 +369,10 @@
cfg.BoolOpt('accounts_quotas_available',
default=True,
help="Set to True if the Account Quota middleware is enabled"),
+ cfg.StrOpt('operator_role',
+ default='Member',
+ help="Role to add to users created for swift tests to "
+ "enable creating containers"),
]
@@ -515,7 +519,10 @@
help='regexp for list of log files.'),
cfg.StrOpt('log_check_interval',
default=60,
- help='time between log file error checks.')
+ help='time (in seconds) between log file error checks.'),
+ cfg.StrOpt('default_thread_number_per_action',
+ default=4,
+ help='The number of threads created while stress test.')
]
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index f9eb968..8cfd548 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -53,17 +53,6 @@
" in tempest/api/* tests"))
-def import_no_files_in_tests(physical_line, filename):
- """Check for merges that try to land into tempest/tests
-
- T103: tempest/tests directory is deprecated
- """
-
- if "tempest/tests" in filename:
- return (0, ("T103: tempest/tests is deprecated"))
-
-
def factory(register):
register(skip_bugs)
register(import_no_clients_in_api)
- register(import_no_files_in_tests)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index d0f0127..7681f04 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -16,22 +16,25 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
import subprocess
# Default client libs
import cinderclient.client
import glanceclient
+import heatclient.client
import keystoneclient.v2_0.client
import netaddr
from neutronclient.common import exceptions as exc
import neutronclient.v2_0.client
import novaclient.client
-
from tempest.api.network import common as net_common
from tempest.common import isolated_creds
from tempest.common import ssh
from tempest.common.utils.data_utils import rand_name
+from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest import exceptions
import tempest.manager
from tempest.openstack.common import log as logging
import tempest.test
@@ -48,6 +51,7 @@
NOVACLIENT_VERSION = '2'
CINDERCLIENT_VERSION = '1'
+ HEATCLIENT_VERSION = '1'
def __init__(self, username, password, tenant_name):
super(OfficialClientManager, self).__init__()
@@ -62,6 +66,10 @@
self.volume_client = self._get_volume_client(username,
password,
tenant_name)
+ self.orchestration_client = self._get_orchestration_client(
+ username,
+ password,
+ tenant_name)
def _get_compute_client(self, username, password, tenant_name):
# Novaclient will not execute operations for anyone but the
@@ -98,6 +106,32 @@
tenant_name,
auth_url)
+ def _get_orchestration_client(self, username=None, password=None,
+ tenant_name=None):
+ if not username:
+ username = self.config.identity.admin_username
+ if not password:
+ password = self.config.identity.admin_password
+ if not tenant_name:
+ tenant_name = self.config.identity.tenant_name
+
+ self._validate_credentials(username, password, tenant_name)
+
+ keystone = self._get_identity_client(username, password, tenant_name)
+ token = keystone.auth_token
+ try:
+ endpoint = keystone.service_catalog.url_for(
+ service_type='orchestration',
+ endpoint_type='publicURL')
+ except keystoneclient.exceptions.EndpointNotFound:
+ return None
+ else:
+ return heatclient.client.Client(self.HEATCLIENT_VERSION,
+ endpoint,
+ token=token,
+ username=username,
+ password=password)
+
def _get_identity_client(self, username, password, tenant_name):
# This identity client is not intended to check the security
# of the identity service, so use admin credentials by default.
@@ -153,13 +187,8 @@
super(OfficialClientTest, cls).setUpClass()
cls.isolated_creds = isolated_creds.IsolatedCreds(
__name__, tempest_client=False)
- if cls.config.compute.allow_tenant_isolation:
- creds = cls.isolated_creds.get_primary_creds()
- username, tenant_name, password = creds
- else:
- username = cls.config.identity.username
- password = cls.config.identity.password
- tenant_name = cls.config.identity.tenant_name
+
+ username, tenant_name, password = cls.credentials()
cls.manager = OfficialClientManager(username, password, tenant_name)
cls.compute_client = cls.manager.compute_client
@@ -167,10 +196,21 @@
cls.identity_client = cls.manager.identity_client
cls.network_client = cls.manager.network_client
cls.volume_client = cls.manager.volume_client
+ cls.orchestration_client = cls.manager.orchestration_client
cls.resource_keys = {}
cls.os_resources = []
@classmethod
+ def credentials(cls):
+ if cls.config.compute.allow_tenant_isolation:
+ return cls.isolated_creds.get_primary_creds()
+
+ username = cls.config.identity.username
+ password = cls.config.identity.password
+ tenant_name = cls.config.identity.tenant_name
+ return username, tenant_name, password
+
+ @classmethod
def tearDownClass(cls):
# NOTE(jaypipes): Because scenario tests are typically run in a
# specific order, and because test methods in scenario tests
@@ -243,9 +283,9 @@
thing = things.get(thing_id)
new_status = thing.status
if new_status == 'ERROR':
- self.fail("%s failed to get to expected status. "
- "In ERROR state."
- % thing)
+ message = "%s failed to get to expected status. \
+ In ERROR state." % (thing)
+ raise exceptions.BuildErrorException(message)
elif new_status == expected_status:
return True # All good.
LOG.debug("Waiting for %s to get to %s status. "
@@ -255,61 +295,18 @@
check_status,
self.config.compute.build_timeout,
self.config.compute.build_interval):
- self.fail("Timed out waiting for thing %s to become %s"
- % (thing_id, expected_status))
+ message = "Timed out waiting for thing %s \
+ to become %s" % (thing_id, expected_status)
+ raise exceptions.TimeoutException(message)
-
-class NetworkScenarioTest(OfficialClientTest):
- """
- Base class for network scenario tests
- """
-
- @classmethod
- def check_preconditions(cls):
- if (cls.config.service_available.neutron):
- cls.enabled = True
- #verify that neutron_available is telling the truth
- try:
- cls.network_client.list_networks()
- except exc.EndpointNotFound:
- cls.enabled = False
- raise
- else:
- cls.enabled = False
- msg = 'Neutron not available'
- raise cls.skipException(msg)
-
- @classmethod
- def setUpClass(cls):
- super(NetworkScenarioTest, cls).setUpClass()
- cls.tenant_id = cls.manager._get_identity_client(
- cls.config.identity.username,
- cls.config.identity.password,
- cls.config.identity.tenant_name).tenant_id
-
- def _create_keypair(self, client, namestart='keypair-smoke-'):
- kp_name = rand_name(namestart)
- keypair = client.keypairs.create(kp_name)
- try:
- self.assertEqual(keypair.id, kp_name)
- self.set_resource(kp_name, keypair)
- except AttributeError:
- self.fail("Keypair object not successfully created.")
- return keypair
-
- def _create_security_group(self, client, namestart='secgroup-smoke-'):
- # Create security group
- sg_name = rand_name(namestart)
- sg_desc = sg_name + " description"
- secgroup = client.security_groups.create(sg_name, sg_desc)
- try:
- self.assertEqual(secgroup.name, sg_name)
- self.assertEqual(secgroup.description, sg_desc)
- self.set_resource(sg_name, secgroup)
- except AttributeError:
- self.fail("SecurityGroup object not successfully created.")
-
- # Add rules to the security group
+ def create_loginable_secgroup_rule(self, client=None, secgroup_id=None):
+ if client is None:
+ client = self.compute_client
+ if secgroup_id is None:
+ sgs = client.security_groups.list()
+ for sg in sgs:
+ if sg.name == 'default':
+ secgroup_id = sg.id
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
@@ -332,10 +329,128 @@
}
]
for ruleset in rulesets:
+ sg_rule = client.security_group_rules.create(secgroup_id,
+ **ruleset)
+ self.set_resource(sg_rule.id, sg_rule)
+
+ def create_server(self, client, name=None, image=None, flavor=None,
+ create_kwargs={}):
+ if name is None:
+ name = rand_name('scenario-server-')
+ if image is None:
+ image = self.config.compute.image_ref
+ if flavor is None:
+ flavor = self.config.compute.flavor_ref
+ LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
+ name, image, flavor)
+ server = client.servers.create(name, image, flavor, **create_kwargs)
+ self.assertEqual(server.name, name)
+ self.set_resource(name, server)
+ self.status_timeout(client.servers, server.id, 'ACTIVE')
+ # The instance retrieved on creation is missing network
+ # details, necessitating retrieval after it becomes active to
+ # ensure correct details.
+ server = client.servers.get(server.id)
+ self.set_resource(name, server)
+ LOG.debug("Created server: %s", server)
+ return server
+
+ def create_volume(self, client=None, size=1, name=None,
+ snapshot_id=None, imageRef=None):
+ if client is None:
+ client = self.volume_client
+ if name is None:
+ name = rand_name('scenario-volume-')
+ LOG.debug("Creating a volume (size :%s, name: %s)", size, name)
+ volume = client.volumes.create(size=size, display_name=name,
+ snapshot_id=snapshot_id,
+ imageRef=imageRef)
+ self.set_resource(name, volume)
+ self.assertEqual(name, volume.display_name)
+ self.status_timeout(client.volumes, volume.id, 'available')
+ LOG.debug("Created volume: %s", volume)
+ return volume
+
+ def create_server_snapshot(self, server, compute_client=None,
+ image_client=None, name=None):
+ if compute_client is None:
+ compute_client = self.compute_client
+ if image_client is None:
+ image_client = self.image_client
+ if name is None:
+ name = rand_name('scenario-snapshot-')
+ LOG.debug("Creating a snapshot image for server: %s", server.name)
+ image_id = compute_client.servers.create_image(server, name)
+ self.addCleanup(image_client.images.delete, image_id)
+ self.status_timeout(image_client.images, image_id, 'active')
+ snapshot_image = image_client.images.get(image_id)
+ self.assertEquals(name, snapshot_image.name)
+ LOG.debug("Created snapshot image %s for server %s",
+ snapshot_image.name, server.name)
+ return snapshot_image
+
+ def create_keypair(self, client=None, name=None):
+ if client is None:
+ client = self.compute_client
+ if name is None:
+ name = rand_name('scenario-keypair-')
+ keypair = client.keypairs.create(name)
+ self.assertEqual(keypair.name, name)
+ self.set_resource(name, keypair)
+ return keypair
+
+ def get_remote_client(self, server_or_ip, username=None, private_key=None):
+ if isinstance(server_or_ip, basestring):
+ ip = server_or_ip
+ else:
+ network_name_for_ssh = self.config.compute.network_for_ssh
+ ip = server_or_ip.networks[network_name_for_ssh][0]
+ if username is None:
+ username = self.config.scenario.ssh_user
+ if private_key is None:
+ private_key = self.keypair.private_key
+ return RemoteClient(ip, username, pkey=private_key)
+
+
+class NetworkScenarioTest(OfficialClientTest):
+ """
+ Base class for network scenario tests
+ """
+
+ @classmethod
+ def check_preconditions(cls):
+ if (cls.config.service_available.neutron):
+ cls.enabled = True
+ # verify that neutron_available is telling the truth
try:
- client.security_group_rules.create(secgroup.id, **ruleset)
- except Exception:
- self.fail("Failed to create rule in security group.")
+ cls.network_client.list_networks()
+ except exc.EndpointNotFound:
+ cls.enabled = False
+ raise
+ else:
+ cls.enabled = False
+ msg = 'Neutron not available'
+ raise cls.skipException(msg)
+
+ @classmethod
+ def setUpClass(cls):
+ super(NetworkScenarioTest, cls).setUpClass()
+ cls.tenant_id = cls.manager._get_identity_client(
+ cls.config.identity.username,
+ cls.config.identity.password,
+ cls.config.identity.tenant_name).tenant_id
+
+ def _create_security_group(self, client, namestart='secgroup-smoke-'):
+ # Create security group
+ sg_name = rand_name(namestart)
+ sg_desc = sg_name + " description"
+ secgroup = client.security_groups.create(sg_name, sg_desc)
+ self.assertEqual(secgroup.name, sg_name)
+ self.assertEqual(secgroup.description, sg_desc)
+ self.set_resource(sg_name, secgroup)
+
+ # Add rules to the security group
+ self.create_loginable_secgroup_rule(client, secgroup.id)
return secgroup
@@ -412,31 +527,6 @@
self.set_resource(name, port)
return port
- def _create_server(self, client, network, name, key_name, security_groups):
- flavor_id = self.config.compute.flavor_ref
- base_image_id = self.config.compute.image_ref
- create_kwargs = {
- 'nics': [
- {'net-id': network.id},
- ],
- 'key_name': key_name,
- 'security_groups': security_groups,
- }
- server = client.servers.create(name, base_image_id, flavor_id,
- **create_kwargs)
- try:
- self.assertEqual(server.name, name)
- self.set_resource(name, server)
- except AttributeError:
- self.fail("Server not successfully created.")
- self.status_timeout(client.servers, server.id, 'ACTIVE')
- # The instance retrieved on creation is missing network
- # details, necessitating retrieval after it becomes active to
- # ensure correct details.
- server = client.servers.get(server.id)
- self.set_resource(name, server)
- return server
-
def _create_floating_ip(self, server, external_network_id):
result = self.network_client.list_ports(device_id=server.id)
ports = result.get('ports', [])
@@ -489,3 +579,32 @@
timeout=self.config.compute.ssh_timeout),
'Auth failure in connecting to %s@%s via ssh' %
(username, ip_address))
+
+
+class OrchestrationScenarioTest(OfficialClientTest):
+ """
+ Base class for orchestration scenario tests
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(OrchestrationScenarioTest, cls).setUpClass()
+ if not cls.config.service_available.heat:
+ raise cls.skipException("Heat support is required")
+
+ @classmethod
+ def credentials(cls):
+ username = cls.config.identity.admin_username
+ password = cls.config.identity.admin_password
+ tenant_name = cls.config.identity.tenant_name
+ return username, tenant_name, password
+
+ def _load_template(self, base_file, file_name):
+ filepath = os.path.join(os.path.dirname(os.path.realpath(base_file)),
+ file_name)
+ with open(filepath) as f:
+ return f.read()
+
+ @classmethod
+ def _stack_rand_name(cls):
+ return rand_name(cls.__name__ + '-')
diff --git a/tempest/scenario/orchestration/__init__.py b/tempest/scenario/orchestration/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/scenario/orchestration/__init__.py
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
new file mode 100644
index 0000000..17870a1
--- /dev/null
+++ b/tempest/scenario/orchestration/test_autoscaling.py
@@ -0,0 +1,107 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+from tempest.test import attr
+from tempest.test import call_until_true
+import time
+
+
+LOG = logging.getLogger(__name__)
+
+
+class AutoScalingTest(manager.OrchestrationScenarioTest):
+
+ def setUp(self):
+ super(AutoScalingTest, self).setUp()
+ if not self.config.orchestration.image_ref:
+ raise self.skipException("No image available to test")
+ self.client = self.orchestration_client
+
+ def assign_keypair(self):
+ self.stack_name = self._stack_rand_name()
+ if self.config.orchestration.keypair_name:
+ self.keypair_name = self.config.orchestration.keypair_name
+ else:
+ self.keypair = self.create_keypair()
+ self.keypair_name = self.keypair.id
+
+ def launch_stack(self):
+ self.parameters = {
+ 'KeyName': self.keypair_name,
+ 'InstanceType': self.config.orchestration.instance_type,
+ 'ImageId': self.config.orchestration.image_ref,
+ 'StackStart': str(time.time())
+ }
+
+ # create the stack
+ self.template = self._load_template(__file__, 'test_autoscaling.yaml')
+ self.client.stacks.create(
+ stack_name=self.stack_name,
+ template=self.template,
+ parameters=self.parameters)
+
+ self.stack = self.client.stacks.get(self.stack_name)
+ self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id)
+
+ # if a keypair was set, do not delete the stack on exit to allow
+ # for manual post-mortums
+ if not self.config.orchestration.keypair_name:
+ self.set_resource('stack', self.stack)
+
+ @attr(type='slow')
+ def test_scale_up_then_down(self):
+
+ self.assign_keypair()
+ self.launch_stack()
+
+ sid = self.stack_identifier
+ timeout = self.config.orchestration.build_timeout
+ interval = 10
+
+ self.assertEqual('CREATE', self.stack.action)
+ # wait for create to complete.
+ self.status_timeout(self.client.stacks, sid, 'COMPLETE')
+
+ self.stack.get()
+ self.assertEqual('CREATE_COMPLETE', self.stack.stack_status)
+
+ # the resource SmokeServerGroup is implemented as a nested
+ # stack, so servers can be counted by counting the resources
+ # inside that nested stack
+ resource = self.client.resources.get(sid, 'SmokeServerGroup')
+ nested_stack_id = resource.physical_resource_id
+
+ def server_count():
+ # the number of servers is the number of resources
+ # in the nexted stack
+ self.server_count = len(
+ self.client.resources.list(nested_stack_id))
+ return self.server_count
+
+ def assertScale(from_servers, to_servers):
+ call_until_true(lambda: server_count() == to_servers,
+ timeout, interval)
+ self.assertEqual(to_servers, self.server_count,
+ 'Failed scaling from %d to %d servers' % (
+ from_servers, to_servers))
+
+ # he marched them up to the top of the hill
+ assertScale(1, 2)
+ assertScale(2, 3)
+
+ # and he marched them down again
+ assertScale(3, 2)
+ assertScale(2, 1)
diff --git a/tempest/scenario/orchestration/test_autoscaling.yaml b/tempest/scenario/orchestration/test_autoscaling.yaml
new file mode 100644
index 0000000..045b3bc
--- /dev/null
+++ b/tempest/scenario/orchestration/test_autoscaling.yaml
@@ -0,0 +1,182 @@
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+ Template which tests autoscaling and load balancing
+Parameters:
+ KeyName:
+ Type: String
+ InstanceType:
+ Type: String
+ ImageId:
+ Type: String
+ StackStart:
+ Description: Epoch seconds when the stack was launched
+ Type: Number
+ ConsumeStartSeconds:
+ Description: Seconds after invocation when memory should be consumed
+ Type: Number
+ Default: '60'
+ ConsumeStopSeconds:
+ Description: Seconds after StackStart when memory should be released
+ Type: Number
+ Default: '420'
+ ScaleUpThreshold:
+ Description: Memory percentage threshold to scale up on
+ Type: Number
+ Default: '70'
+ ScaleDownThreshold:
+ Description: Memory percentage threshold to scale down on
+ Type: Number
+ Default: '60'
+ ConsumeMemoryLimit:
+ Description: Memory percentage threshold to consume
+ Type: Number
+ Default: '71'
+Resources:
+ SmokeServerGroup:
+ Type: AWS::AutoScaling::AutoScalingGroup
+ Properties:
+ AvailabilityZones: {'Fn::GetAZs': ''}
+ LaunchConfigurationName: {Ref: LaunchConfig}
+ MinSize: '1'
+ MaxSize: '3'
+ SmokeServerScaleUpPolicy:
+ Type: AWS::AutoScaling::ScalingPolicy
+ Properties:
+ AdjustmentType: ChangeInCapacity
+ AutoScalingGroupName: {Ref: SmokeServerGroup}
+ Cooldown: '60'
+ ScalingAdjustment: '1'
+ SmokeServerScaleDownPolicy:
+ Type: AWS::AutoScaling::ScalingPolicy
+ Properties:
+ AdjustmentType: ChangeInCapacity
+ AutoScalingGroupName: {Ref: SmokeServerGroup}
+ Cooldown: '60'
+ ScalingAdjustment: '-1'
+ MEMAlarmHigh:
+ Type: AWS::CloudWatch::Alarm
+ Properties:
+ AlarmDescription: Scale-up if MEM > ScaleUpThreshold% for 10 seconds
+ MetricName: MemoryUtilization
+ Namespace: system/linux
+ Statistic: Average
+ Period: '10'
+ EvaluationPeriods: '1'
+ Threshold: {Ref: ScaleUpThreshold}
+ AlarmActions: [{Ref: SmokeServerScaleUpPolicy}]
+ Dimensions:
+ - Name: AutoScalingGroupName
+ Value: {Ref: SmokeServerGroup}
+ ComparisonOperator: GreaterThanThreshold
+ MEMAlarmLow:
+ Type: AWS::CloudWatch::Alarm
+ Properties:
+ AlarmDescription: Scale-down if MEM < ScaleDownThreshold% for 10 seconds
+ MetricName: MemoryUtilization
+ Namespace: system/linux
+ Statistic: Average
+ Period: '10'
+ EvaluationPeriods: '1'
+ Threshold: {Ref: ScaleDownThreshold}
+ AlarmActions: [{Ref: SmokeServerScaleDownPolicy}]
+ Dimensions:
+ - Name: AutoScalingGroupName
+ Value: {Ref: SmokeServerGroup}
+ ComparisonOperator: LessThanThreshold
+ CfnUser:
+ Type: AWS::IAM::User
+ SmokeKeys:
+ Type: AWS::IAM::AccessKey
+ Properties:
+ UserName: {Ref: CfnUser}
+ SmokeSecurityGroup:
+ Type: AWS::EC2::SecurityGroup
+ Properties:
+ GroupDescription: Standard firewall rules
+ SecurityGroupIngress:
+ - {IpProtocol: tcp, FromPort: '22', ToPort: '22', CidrIp: 0.0.0.0/0}
+ - {IpProtocol: tcp, FromPort: '80', ToPort: '80', CidrIp: 0.0.0.0/0}
+ LaunchConfig:
+ Type: AWS::AutoScaling::LaunchConfiguration
+ Metadata:
+ AWS::CloudFormation::Init:
+ config:
+ files:
+ /etc/cfn/cfn-credentials:
+ content:
+ Fn::Replace:
+ - $AWSAccessKeyId: {Ref: SmokeKeys}
+ $AWSSecretKey: {'Fn::GetAtt': [SmokeKeys, SecretAccessKey]}
+ - |
+ AWSAccessKeyId=$AWSAccessKeyId
+ AWSSecretKey=$AWSSecretKey
+ mode: '000400'
+ owner: root
+ group: root
+ /root/watch_loop:
+ content:
+ Fn::Replace:
+ - _hi_: {Ref: MEMAlarmHigh}
+ _lo_: {Ref: MEMAlarmLow}
+ - |
+ #!/bin/bash
+ while :
+ do
+ /opt/aws/bin/cfn-push-stats --watch _hi_ --mem-util
+ /opt/aws/bin/cfn-push-stats --watch _lo_ --mem-util
+ sleep 4
+ done
+ mode: '000700'
+ owner: root
+ group: root
+ /root/consume_memory:
+ content:
+ Fn::Replace:
+ - StackStart: {Ref: StackStart}
+ ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
+ ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
+ ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
+ - |
+ #!/usr/bin/env python
+ import psutil
+ import time
+ import datetime
+ import sys
+ a = []
+ sleep_until_consume = ConsumeStartSeconds
+ stack_start = StackStart
+ consume_stop_time = stack_start + ConsumeStopSeconds
+ memory_limit = ConsumeMemoryLimit
+ if sleep_until_consume > 0:
+ sys.stdout.flush()
+ time.sleep(sleep_until_consume)
+ while psutil.virtual_memory().percent < memory_limit:
+ sys.stdout.flush()
+ a.append(' ' * 10**5)
+ time.sleep(0.1)
+ sleep_until_exit = consume_stop_time - time.time()
+ if sleep_until_exit > 0:
+ time.sleep(sleep_until_exit)
+ mode: '000700'
+ owner: root
+ group: root
+ Properties:
+ ImageId: {Ref: ImageId}
+ InstanceType: {Ref: InstanceType}
+ KeyName: {Ref: KeyName}
+ SecurityGroups: [{Ref: SmokeSecurityGroup}]
+ UserData:
+ Fn::Base64:
+ Fn::Replace:
+ - ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
+ ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
+ ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
+ - |
+ #!/bin/bash -v
+ /opt/aws/bin/cfn-init
+ # report on memory consumption every 4 seconds
+ /root/watch_loop &
+ # wait ConsumeStartSeconds then ramp up memory consumption
+ # until it is over ConsumeMemoryLimit%
+ # then exits ConsumeStopSeconds seconds after stack launch
+ /root/consume_memory > /root/consume_memory.log &
\ No newline at end of file
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 13b31ec..5cddde2 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -16,7 +16,6 @@
# under the License.
from tempest.common.utils.data_utils import rand_name
-from tempest.common.utils.linux.remote_client import RemoteClient
from tempest.openstack.common import log as logging
from tempest.scenario import manager
@@ -83,22 +82,13 @@
properties=properties)
def nova_keypair_add(self):
- name = rand_name('scenario-keypair-')
-
- self.keypair = self.compute_client.keypairs.create(name=name)
- self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
- self.assertEqual(name, self.keypair.name)
+ self.keypair = self.create_keypair()
def nova_boot(self):
- name = rand_name('scenario-server-')
- client = self.compute_client
- flavor_id = self.config.compute.flavor_ref
- self.server = client.servers.create(name=name, image=self.image,
- flavor=flavor_id,
- key_name=self.keypair.name)
- self.addCleanup(self.compute_client.servers.delete, self.server)
- self.assertEqual(name, self.server.name)
- self._wait_for_server_status('ACTIVE')
+ create_kwargs = {'key_name': self.keypair.name}
+ self.server = self.create_server(self.compute_client,
+ image=self.image,
+ create_kwargs=create_kwargs)
def nova_list(self):
servers = self.compute_client.servers.list()
@@ -111,15 +101,7 @@
self.assertEqual(self.server, got_server)
def cinder_create(self):
- name = rand_name('scenario-volume-')
- LOG.debug("volume display-name:%s" % name)
- self.volume = self.volume_client.volumes.create(size=1,
- display_name=name)
- LOG.debug("volume created:%s" % self.volume.display_name)
- self._wait_for_volume_status('available')
-
- self.addCleanup(self.volume_client.volumes.delete, self.volume)
- self.assertEqual(name, self.volume.display_name)
+ self.volume = self.create_volume()
def cinder_list(self):
volumes = self.volume_client.volumes.list()
@@ -148,30 +130,8 @@
def nova_floating_ip_add(self):
self.server.add_floating_ip(self.floating_ip)
- def nova_security_group_rule_create(self):
- sgs = self.compute_client.security_groups.list()
- for sg in sgs:
- if sg.name == 'default':
- secgroup = sg
-
- ruleset = {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- 'group_id': None
- }
- sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
- **ruleset)
- self.addCleanup(self.compute_client.security_group_rules.delete,
- sg_rule.id)
-
def ssh_to_server(self):
- username = self.config.scenario.ssh_user
- self.linux_client = RemoteClient(self.floating_ip.ip,
- username,
- pkey=self.keypair.private_key)
+ self.linux_client = self.get_remote_client(self.floating_ip.ip)
def check_partitions(self):
partitions = self.linux_client.get_partitions()
@@ -200,7 +160,7 @@
self.nova_floating_ip_create()
self.nova_floating_ip_add()
- self.nova_security_group_rule_create()
+ self.create_loginable_secgroup_rule()
self.ssh_to_server()
self.check_partitions()
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 5311eae..70939f6 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -16,8 +16,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testtools
-
from tempest.api.network import common as net_common
from tempest.common.utils.data_utils import rand_name
from tempest import config
@@ -43,7 +41,7 @@
ssh server hosted at the IP address. This check guarantees
that the IP address is associated with the target VM.
- #TODO(mnewby) - Need to implement the following:
+ # TODO(mnewby) - Need to implement the following:
- the Tempest host can ssh into the VM via the IP address and
successfully execute the following:
@@ -162,8 +160,8 @@
@attr(type='smoke')
def test_001_create_keypairs(self):
- self.keypairs[self.tenant_id] = self._create_keypair(
- self.compute_client)
+ self.keypairs[self.tenant_id] = self.create_keypair(
+ name=rand_name('keypair-smoke-'))
@attr(type='smoke')
def test_002_create_security_groups(self):
@@ -182,8 +180,8 @@
@attr(type='smoke')
def test_004_check_networks(self):
- #Checks that we see the newly created network/subnet/router via
- #checking the result of list_[networks,routers,subnets]
+ # Checks that we see the newly created network/subnet/router via
+ # checking the result of list_[networks,routers,subnets]
seen_nets = self._list_networks()
seen_names = [n['name'] for n in seen_nets]
seen_ids = [n['id'] for n in seen_nets]
@@ -213,8 +211,15 @@
name = rand_name('server-smoke-%d-' % i)
keypair_name = self.keypairs[tenant_id].name
security_groups = [self.security_groups[tenant_id].name]
- server = self._create_server(self.compute_client, network,
- name, keypair_name, security_groups)
+ create_kwargs = {
+ 'nics': [
+ {'net-id': network.id},
+ ],
+ 'key_name': keypair_name,
+ 'security_groups': security_groups,
+ }
+ server = self.create_server(self.compute_client, name=name,
+ create_kwargs=create_kwargs)
self.servers.append(server)
@attr(type='smoke')
@@ -247,8 +252,6 @@
self.floating_ips[server].append(floating_ip)
@attr(type='smoke')
- @testtools.skipIf(CONF.service_available.neutron,
- "Skipped unti bug #1210664 is resolved")
def test_008_check_public_network_connectivity(self):
if not self.floating_ips:
raise self.skipTest('No floating ips have been allocated.')
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 0ec3a1d..8e14b06 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -36,72 +36,28 @@
* Terminate the instance
"""
- def create_keypair(self):
- kp_name = rand_name('keypair-smoke')
- self.keypair = self.compute_client.keypairs.create(kp_name)
- try:
- self.assertEqual(self.keypair.id, kp_name)
- self.set_resource('keypair', self.keypair)
- except AttributeError:
- self.fail("Keypair object not successfully created.")
+ def add_keypair(self):
+ self.keypair = self.create_keypair()
def create_security_group(self):
sg_name = rand_name('secgroup-smoke')
sg_desc = sg_name + " description"
self.secgroup = self.compute_client.security_groups.create(sg_name,
sg_desc)
- try:
- self.assertEqual(self.secgroup.name, sg_name)
- self.assertEqual(self.secgroup.description, sg_desc)
- self.set_resource('secgroup', self.secgroup)
- except AttributeError:
- self.fail("SecurityGroup object not successfully created.")
+ self.assertEqual(self.secgroup.name, sg_name)
+ self.assertEqual(self.secgroup.description, sg_desc)
+ self.set_resource('secgroup', self.secgroup)
# Add rules to the security group
- rulesets = [
- {
- 'ip_protocol': 'tcp',
- 'from_port': 1,
- 'to_port': 65535,
- 'cidr': '0.0.0.0/0',
- 'group_id': self.secgroup.id
- },
- {
- 'ip_protocol': 'icmp',
- 'from_port': -1,
- 'to_port': -1,
- 'cidr': '0.0.0.0/0',
- 'group_id': self.secgroup.id
- }
- ]
- for ruleset in rulesets:
- try:
- self.compute_client.security_group_rules.create(
- self.secgroup.id, **ruleset)
- except Exception:
- self.fail("Failed to create rule in security group.")
+ self.create_loginable_secgroup_rule(secgroup_id=self.secgroup.id)
def boot_instance(self):
- i_name = rand_name('instance')
- flavor_id = self.config.compute.flavor_ref
- base_image_id = self.config.compute.image_ref
create_kwargs = {
- 'key_name': self.get_resource('keypair').id
+ 'key_name': self.keypair.id
}
- self.instance = self.compute_client.servers.create(
- i_name, base_image_id, flavor_id, **create_kwargs)
- try:
- self.assertEqual(self.instance.name, i_name)
- self.set_resource('instance', self.instance)
- except AttributeError:
- self.fail("Instance not successfully created.")
-
- self.assertEqual(self.instance.status, 'BUILD')
-
- def wait_on_active(self):
- instance_id = self.get_resource('instance').id
- self.status_timeout(
- self.compute_client.servers, instance_id, 'ACTIVE')
+ instance = self.create_server(self.compute_client,
+ create_kwargs=create_kwargs)
+ self.set_resource('instance', instance)
def pause_server(self):
instance = self.get_resource('instance')
@@ -145,10 +101,9 @@
self.remove_resource('instance')
def test_server_basicops(self):
- self.create_keypair()
+ self.add_keypair()
self.create_security_group()
self.boot_instance()
- self.wait_on_active()
self.pause_server()
self.unpause_server()
self.suspend_server()
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index 6e305c1..95d2862 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -15,8 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common.utils.data_utils import rand_name
-from tempest.common.utils.linux.remote_client import RemoteClient
from tempest.openstack.common import log as logging
from tempest.scenario import manager
@@ -35,66 +33,18 @@
"""
- def _wait_for_server_status(self, server, status):
- self.status_timeout(self.compute_client.servers,
- server.id,
- status)
-
- def _wait_for_image_status(self, image_id, status):
- self.status_timeout(self.image_client.images, image_id, status)
-
def _boot_image(self, image_id):
- name = rand_name('scenario-server-')
- client = self.compute_client
- flavor_id = self.config.compute.flavor_ref
- LOG.debug("name:%s, image:%s" % (name, image_id))
- server = client.servers.create(name=name,
- image=image_id,
- flavor=flavor_id,
- key_name=self.keypair.name)
- self.addCleanup(self.compute_client.servers.delete, server)
- self.assertEqual(name, server.name)
- self._wait_for_server_status(server, 'ACTIVE')
- server = client.servers.get(server) # getting network information
- LOG.debug("server:%s" % server)
- return server
+ create_kwargs = {
+ 'key_name': self.keypair.name
+ }
+ return self.create_server(self.compute_client, image=image_id,
+ create_kwargs=create_kwargs)
def _add_keypair(self):
- name = rand_name('scenario-keypair-')
- self.keypair = self.compute_client.keypairs.create(name=name)
- self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
- self.assertEqual(name, self.keypair.name)
-
- def _create_security_group_rule(self):
- sgs = self.compute_client.security_groups.list()
- for sg in sgs:
- if sg.name == 'default':
- secgroup = sg
-
- ruleset = {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- 'group_id': None
- }
- sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
- **ruleset)
- self.addCleanup(self.compute_client.security_group_rules.delete,
- sg_rule.id)
+ self.keypair = self.create_keypair()
def _ssh_to_server(self, server_or_ip):
- if isinstance(server_or_ip, basestring):
- ip = server_or_ip
- else:
- network_name_for_ssh = self.config.compute.network_for_ssh
- ip = server_or_ip.networks[network_name_for_ssh][0]
- username = self.config.scenario.ssh_user
- linux_client = RemoteClient(ip,
- username,
- pkey=self.keypair.private_key)
-
+ linux_client = self.get_remote_client(server_or_ip)
return linux_client.ssh_client
def _write_timestamp(self, server_or_ip):
@@ -102,17 +52,6 @@
ssh_client.exec_command('date > /tmp/timestamp; sync')
self.timestamp = ssh_client.exec_command('cat /tmp/timestamp')
- def _create_image(self, server):
- snapshot_name = rand_name('scenario-snapshot-')
- create_image_client = self.compute_client.servers.create_image
- image_id = create_image_client(server, snapshot_name)
- self.addCleanup(self.image_client.images.delete, image_id)
- self._wait_for_server_status(server, 'ACTIVE')
- self._wait_for_image_status(image_id, 'active')
- snapshot_image = self.image_client.images.get(image_id)
- self.assertEquals(snapshot_name, snapshot_image.name)
- return image_id
-
def _check_timestamp(self, server_or_ip):
ssh_client = self._ssh_to_server(server_or_ip)
got_timestamp = ssh_client.exec_command('cat /tmp/timestamp')
@@ -129,7 +68,7 @@
def test_snapshot_pattern(self):
# prepare for booting a instance
self._add_keypair()
- self._create_security_group_rule()
+ self.create_loginable_secgroup_rule()
# boot a instance and create a timestamp file in it
server = self._boot_image(self.config.compute.image_ref)
@@ -141,10 +80,10 @@
self._write_timestamp(server)
# snapshot the instance
- snapshot_image_id = self._create_image(server)
+ snapshot_image = self.create_server_snapshot(server=server)
# boot a second instance from the snapshot
- server_from_snapshot = self._boot_image(snapshot_image_id)
+ server_from_snapshot = self._boot_image(snapshot_image.id)
# check the existence of the timestamp file in the second instance
if self.config.compute.use_floatingip_for_ssh:
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 4434604..3cbd1fa 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -21,7 +21,6 @@
import testtools
from tempest.common.utils.data_utils import rand_name
-from tempest.common.utils.linux.remote_client import RemoteClient
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.scenario import manager
@@ -51,39 +50,19 @@
14. Check the existence of a file which created at 6. in volume2
"""
- def _wait_for_server_status(self, server, status):
- self.status_timeout(self.compute_client.servers,
- server.id,
- status)
-
- def _wait_for_image_status(self, image_id, status):
- self.status_timeout(self.image_client.images, image_id, status)
-
def _wait_for_volume_snapshot_status(self, volume_snapshot, status):
self.status_timeout(self.volume_client.volume_snapshots,
volume_snapshot.id, status)
def _boot_image(self, image_id):
- name = rand_name('scenario-server-')
- client = self.compute_client
- flavor_id = self.config.compute.flavor_ref
- LOG.debug("name:%s, image:%s" % (name, image_id))
- server = client.servers.create(name=name,
- image=image_id,
- flavor=flavor_id,
- key_name=self.keypair.name)
- self.addCleanup(self.compute_client.servers.delete, server)
- self.assertEqual(name, server.name)
- self._wait_for_server_status(server, 'ACTIVE')
- server = client.servers.get(server) # getting network information
- LOG.debug("server:%s" % server)
- return server
+ create_kwargs = {
+ 'key_name': self.keypair.name
+ }
+ return self.create_server(self.compute_client, image=image_id,
+ create_kwargs=create_kwargs)
def _add_keypair(self):
- name = rand_name('scenario-keypair-')
- self.keypair = self.compute_client.keypairs.create(name=name)
- self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
- self.assertEqual(name, self.keypair.name)
+ self.keypair = self.create_keypair()
def _create_floating_ip(self):
floating_ip = self.compute_client.floating_ips.create()
@@ -93,52 +72,10 @@
def _add_floating_ip(self, server, floating_ip):
server.add_floating_ip(floating_ip)
- def _create_security_group_rule(self):
- sgs = self.compute_client.security_groups.list()
- for sg in sgs:
- if sg.name == 'default':
- secgroup = sg
-
- ruleset = {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- 'group_id': None
- }
- sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
- **ruleset)
- self.addCleanup(self.compute_client.security_group_rules.delete,
- sg_rule.id)
-
- def _remote_client_to_server(self, server_or_ip):
- if isinstance(server_or_ip, basestring):
- ip = server_or_ip
- else:
- network_name_for_ssh = self.config.compute.network_for_ssh
- ip = server_or_ip.networks[network_name_for_ssh][0]
- username = self.config.scenario.ssh_user
- linux_client = RemoteClient(ip,
- username,
- pkey=self.keypair.private_key)
- return linux_client
-
def _ssh_to_server(self, server_or_ip):
- linux_client = self._remote_client_to_server(server_or_ip)
+ linux_client = self.get_remote_client(server_or_ip)
return linux_client.ssh_client
- def _create_image(self, server):
- snapshot_name = rand_name('scenario-snapshot-')
- create_image_client = self.compute_client.servers.create_image
- image_id = create_image_client(server, snapshot_name)
- self.addCleanup(self.image_client.images.delete, image_id)
- self._wait_for_server_status(server, 'ACTIVE')
- self._wait_for_image_status(image_id, 'active')
- snapshot_image = self.image_client.images.get(image_id)
- self.assertEquals(snapshot_name, snapshot_image.name)
- return image_id
-
def _create_volume_snapshot(self, volume):
snapshot_name = rand_name('scenario-snapshot-')
volume_snapshots = self.volume_client.volume_snapshots
@@ -163,20 +100,7 @@
self.volume_client.volumes, volume.id, status)
def _create_volume(self, snapshot_id=None):
- name = rand_name('scenario-volume-')
- LOG.debug("volume display-name:%s" % name)
- volume = self.volume_client.volumes.create(size=1,
- display_name=name,
- snapshot_id=snapshot_id)
- LOG.debug("volume created:%s" % volume.display_name)
-
- def cleaner():
- self._wait_for_volume_status(volume, 'available')
- self.volume_client.volumes.delete(volume)
- self.addCleanup(cleaner)
- self._wait_for_volume_status(volume, 'available')
- self.assertEqual(name, volume.display_name)
- return volume
+ return self.create_volume(snapshot_id=snapshot_id)
def _attach_volume(self, server, volume):
attach_volume_client = self.compute_client.volumes.create_server_volume
@@ -192,7 +116,7 @@
self._wait_for_volume_status(volume, 'available')
def _wait_for_volume_availible_on_the_system(self, server_or_ip):
- ssh = self._remote_client_to_server(server_or_ip)
+ ssh = self.get_remote_client(server_or_ip)
conf = self.config
def _func():
@@ -223,7 +147,7 @@
def test_stamp_pattern(self):
# prepare for booting a instance
self._add_keypair()
- self._create_security_group_rule()
+ self.create_loginable_secgroup_rule()
# boot an instance and create a timestamp file in it
volume = self._create_volume()
@@ -246,14 +170,14 @@
volume_snapshot = self._create_volume_snapshot(volume)
# snapshot the instance
- snapshot_image_id = self._create_image(server)
+ snapshot_image = self.create_server_snapshot(server=server)
# create second volume from the snapshot(volume2)
volume_from_snapshot = self._create_volume(
snapshot_id=volume_snapshot.id)
# boot second instance from the snapshot(instance2)
- server_from_snapshot = self._boot_image(snapshot_image_id)
+ server_from_snapshot = self._boot_image(snapshot_image.id)
# create and add floating IP to server_from_snapshot
if self.config.compute.use_floatingip_for_ssh:
diff --git a/tempest/scenario/test_volume_snapshot_pattern.py b/tempest/scenario/test_volume_snapshot_pattern.py
index 4d8a400..8fa177e 100644
--- a/tempest/scenario/test_volume_snapshot_pattern.py
+++ b/tempest/scenario/test_volume_snapshot_pattern.py
@@ -34,21 +34,9 @@
def _create_volume_from_image(self):
img_uuid = self.config.compute.image_ref
vol_name = rand_name('volume-origin')
- vol = self.volume_client.volumes.create(size=1,
- display_name=vol_name,
- imageRef=img_uuid)
- self.set_resource(vol.id, vol)
- self.status_timeout(self.volume_client.volumes,
- vol.id,
- 'available')
- return vol
+ return self.create_volume(name=vol_name, imageRef=img_uuid)
def _boot_instance_from_volume(self, vol_id):
- # NOTE(gfidente): the img_uuid here is only needed because
- # the novaclient requires it to be passed as arg
- img_uuid = self.config.compute.image_ref
- i_name = rand_name('instance')
- flavor_id = self.config.compute.flavor_ref
# NOTE(gfidente): the syntax for block_device_mapping is
# dev_name=id:type:size:delete_on_terminate
# where type needs to be "snap" if the server is booted
@@ -59,15 +47,8 @@
create_kwargs = {
'block_device_mapping': bd_map
}
- i = self.compute_client.servers.create(name=i_name,
- image=img_uuid,
- flavor=flavor_id,
- **create_kwargs)
- self.set_resource(i.id, i)
- self.status_timeout(self.compute_client.servers,
- i.id,
- 'ACTIVE')
- return i
+ return self.create_server(self.compute_client,
+ create_kwargs=create_kwargs)
def _create_snapshot_from_volume(self, vol_id):
volume_snapshots = self.volume_client.volume_snapshots
@@ -83,14 +64,7 @@
def _create_volume_from_snapshot(self, snap_id):
vol_name = rand_name('volume')
- vol = self.volume_client.volumes.create(size=1,
- display_name=vol_name,
- snapshot_id=snap_id)
- self.set_resource(vol.id, vol)
- self.status_timeout(self.volume_client.volumes,
- vol.id,
- 'available')
- return vol
+ return self.create_volume(name=vol_name, snapshot_id=snap_id)
def _stop_instances(self, instances):
# NOTE(gfidente): two loops so we do not wait for the status twice
diff --git a/tempest/services/compute/xml/flavors_client.py b/tempest/services/compute/xml/flavors_client.py
index 3a8986c..6fbb9e3 100644
--- a/tempest/services/compute/xml/flavors_client.py
+++ b/tempest/services/compute/xml/flavors_client.py
@@ -30,8 +30,6 @@
"http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
XMLNS_OS_FLV_ACCESS = \
"http://docs.openstack.org/compute/ext/flavor_access/api/v1.1"
-XMLNS_OS_FLV_WITH_EXT_SPECS = \
- "http://docs.openstack.org/compute/ext/flavor_with_extra_specs/api/v2.0"
class FlavorsClientXML(RestClientXML):
@@ -51,7 +49,7 @@
if k == '{%s}ephemeral' % XMLNS_OS_FLV_EXT_DATA:
k = 'OS-FLV-EXT-DATA:ephemeral'
- if k == '{%s}extra_specs' % XMLNS_OS_FLV_WITH_EXT_SPECS:
+ if k == 'extra_specs':
k = 'OS-FLV-WITH-EXT-SPECS:extra_specs'
flavor[k] = dict(v)
continue
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 12e7034..5c7a629 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -350,7 +350,7 @@
addrs = []
for child in node.getchildren():
addrs.append({'version': int(child.get('version')),
- 'addr': child.get('version')})
+ 'addr': child.get('addr')})
return {node.get('id'): addrs}
def list_addresses(self, server_id):
diff --git a/tempest/services/identity/json/identity_client.py b/tempest/services/identity/json/identity_client.py
index 90e64e7..47977df 100644
--- a/tempest/services/identity/json/identity_client.py
+++ b/tempest/services/identity/json/identity_client.py
@@ -12,9 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import httplib2
import json
+from tempest.common import http
from tempest.common.rest_client import RestClient
from tempest import exceptions
@@ -260,7 +260,8 @@
def request(self, method, url, headers=None, body=None):
"""A simple HTTP request interface."""
dscv = self.config.identity.disable_ssl_certificate_validation
- self.http_obj = httplib2.Http(disable_ssl_certificate_validation=dscv)
+ self.http_obj = http.ClosingHttp(
+ disable_ssl_certificate_validation=dscv)
if headers is None:
headers = {}
diff --git a/tempest/services/identity/v3/xml/endpoints_client.py b/tempest/services/identity/v3/xml/endpoints_client.py
index f81fccf..e211cee 100644
--- a/tempest/services/identity/v3/xml/endpoints_client.py
+++ b/tempest/services/identity/v3/xml/endpoints_client.py
@@ -16,9 +16,9 @@
# under the License.
import urlparse
-import httplib2
from lxml import etree
+from tempest.common import http
from tempest.common.rest_client import RestClientXML
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
@@ -50,7 +50,8 @@
def request(self, method, url, headers=None, body=None, wait=None):
"""Overriding the existing HTTP request in super class RestClient."""
dscv = self.config.identity.disable_ssl_certificate_validation
- self.http_obj = httplib2.Http(disable_ssl_certificate_validation=dscv)
+ self.http_obj = http.ClosingHttp(
+ disable_ssl_certificate_validation=dscv)
self._set_auth()
self.base_url = self.base_url.replace(
urlparse.urlparse(self.base_url).path, "/v3")
diff --git a/tempest/services/identity/v3/xml/policy_client.py b/tempest/services/identity/v3/xml/policy_client.py
index c3f6d99..0f07728 100644
--- a/tempest/services/identity/v3/xml/policy_client.py
+++ b/tempest/services/identity/v3/xml/policy_client.py
@@ -17,9 +17,9 @@
from urlparse import urlparse
-import httplib2
from lxml import etree
+from tempest.common import http
from tempest.common.rest_client import RestClientXML
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
@@ -51,7 +51,8 @@
def request(self, method, url, headers=None, body=None, wait=None):
"""Overriding the existing HTTP request in super class RestClient."""
dscv = self.config.identity.disable_ssl_certificate_validation
- self.http_obj = httplib2.Http(disable_ssl_certificate_validation=dscv)
+ self.http_obj = http.ClosingHttp(
+ disable_ssl_certificate_validation=dscv)
self._set_auth()
self.base_url = self.base_url.replace(urlparse(self.base_url).path,
"/v3")
diff --git a/tempest/services/identity/xml/identity_client.py b/tempest/services/identity/xml/identity_client.py
index 99a155a..7a00b84 100644
--- a/tempest/services/identity/xml/identity_client.py
+++ b/tempest/services/identity/xml/identity_client.py
@@ -15,11 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import httplib2
import json
from lxml import etree
+from tempest.common import http
from tempest.common.rest_client import RestClientXML
from tempest import exceptions
from tempest.services.compute.xml.common import Document
@@ -275,7 +275,8 @@
def request(self, method, url, headers=None, body=None):
"""A simple HTTP request interface."""
dscv = self.config.identity.disable_ssl_certificate_validation
- self.http_obj = httplib2.Http(disable_ssl_certificate_validation=dscv)
+ self.http_obj = http.ClosingHttp(
+ disable_ssl_certificate_validation=dscv)
if headers is None:
headers = {}
self._log_request(method, url, headers, body)
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 2c808a9..588dc8f 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -17,22 +17,24 @@
from tempest.common.rest_client import RestClient
-class NetworkClient(RestClient):
+class NetworkClientJSON(RestClient):
"""
Tempest REST client for Neutron. Uses v2 of the Neutron API, since the
V1 API has been removed from the code base.
- Implements create, delete, list and show for the basic Neutron
- abstractions (networks, sub-networks and ports):
+ Implements create, delete, update, list and show for the basic Neutron
+ abstractions (networks, sub-networks, routers and ports):
+
+ Implements add/remove interface to router using subnet ID / port ID
It also implements list, show, update and reset for OpenStack Networking
quotas
"""
def __init__(self, config, username, password, auth_url, tenant_name=None):
- super(NetworkClient, self).__init__(config, username, password,
- auth_url, tenant_name)
+ super(NetworkClientJSON, self).__init__(config, username, password,
+ auth_url, tenant_name)
self.service = self.config.network.catalog_type
self.version = '2.0'
self.uri_prefix = "v%s" % (self.version)
@@ -55,6 +57,17 @@
body = json.loads(body)
return resp, body
+ def create_bulk_network(self, count, names):
+ network_list = list()
+ for i in range(count):
+ network_list.append({'name': names[i]})
+ post_body = {'networks': network_list}
+ body = json.dumps(post_body)
+ uri = '%s/networks' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
def show_network(self, uuid):
uri = '%s/networks/%s' % (self.uri_prefix, uuid)
resp, body = self.get(uri, self.headers)
@@ -95,15 +108,14 @@
body = json.loads(body)
return resp, body
- def create_port(self, network_id, state=None):
- if not state:
- state = True
+ def create_port(self, network_id, **kwargs):
post_body = {
'port': {
'network_id': network_id,
- 'admin_state_up': state,
}
}
+ for key, val in kwargs.items():
+ post_body['port'][key] = val
body = json.dumps(post_body)
uri = '%s/ports' % (self.uri_prefix)
resp, body = self.post(uri, headers=self.headers, body=body)
@@ -187,3 +199,89 @@
resp, body = self.put(uri, body=body, headers=self.headers)
body = json.loads(body)
return resp, body
+
+ def list_routers(self):
+ uri = '%s/routers' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def create_router(self, name, **kwargs):
+ post_body = {
+ 'router': {
+ 'name': name,
+ }
+ }
+ post_body['router']['admin_state_up'] = kwargs.get(
+ 'admin_state_up', True)
+ post_body['router']['external_gateway_info'] = kwargs.get(
+ 'external_gateway_info', None)
+ body = json.dumps(post_body)
+ uri = '%s/routers' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def delete_router(self, router_id):
+ uri = '%s/routers/%s' % (self.uri_prefix, router_id)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def show_router(self, router_id):
+ uri = '%s/routers/%s' % (self.uri_prefix, router_id)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def update_router(self, router_id, **kwargs):
+ uri = '%s/routers/%s' % (self.uri_prefix, router_id)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ update_body = {}
+ update_body['name'] = kwargs.get('name', body['router']['name'])
+ update_body['admin_state_up'] = kwargs.get(
+ 'admin_state_up', body['router']['admin_state_up'])
+ # Must uncomment/modify these lines once LP question#233187 is solved
+ #update_body['external_gateway_info'] = kwargs.get(
+ # 'external_gateway_info', body['router']['external_gateway_info'])
+ update_body = dict(router=update_body)
+ update_body = json.dumps(update_body)
+ resp, body = self.put(uri, update_body, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def add_router_interface_with_subnet_id(self, router_id, subnet_id):
+ uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
+ router_id)
+ update_body = {"subnet_id": subnet_id}
+ update_body = json.dumps(update_body)
+ resp, body = self.put(uri, update_body, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def add_router_interface_with_port_id(self, router_id, port_id):
+ uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
+ router_id)
+ update_body = {"port_id": port_id}
+ update_body = json.dumps(update_body)
+ resp, body = self.put(uri, update_body, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def remove_router_interface_with_subnet_id(self, router_id, subnet_id):
+ uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
+ router_id)
+ update_body = {"subnet_id": subnet_id}
+ update_body = json.dumps(update_body)
+ resp, body = self.put(uri, update_body, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def remove_router_interface_with_port_id(self, router_id, port_id):
+ uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
+ router_id)
+ update_body = {"port_id": port_id}
+ update_body = json.dumps(update_body)
+ resp, body = self.put(uri, update_body, self.headers)
+ body = json.loads(body)
+ return resp, body
diff --git a/tempest/services/network/xml/__init__.py b/tempest/services/network/xml/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/services/network/xml/__init__.py
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
new file mode 100755
index 0000000..d4fb656
--- /dev/null
+++ b/tempest/services/network/xml/network_client.py
@@ -0,0 +1,172 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import xml.etree.ElementTree as ET
+
+from tempest.common.rest_client import RestClientXML
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import xml_to_json
+
+
+class NetworkClientXML(RestClientXML):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(NetworkClientXML, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.network.catalog_type
+ self.version = '2.0'
+ self.uri_prefix = "v%s" % (self.version)
+
+ def list_networks(self):
+ uri = '%s/networks' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ networks = self._parse_array(etree.fromstring(body))
+ networks = {"networks": networks}
+ return resp, networks
+
+ def create_network(self, name):
+ uri = '%s/networks' % (self.uri_prefix)
+ post_body = Element("network")
+ p2 = Element("name", name)
+ post_body.append(p2)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def create_bulk_network(self, count, names):
+ uri = '%s/networks' % (self.uri_prefix)
+ post_body = Element("networks")
+ for i in range(count):
+ p1 = Element("network")
+ p2 = Element("name", names[i])
+ p1.append(p2)
+ post_body.append(p1)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ networks = self._parse_array(etree.fromstring(body))
+ networks = {"networks": networks}
+ return resp, networks
+
+ def delete_network(self, uuid):
+ uri = '%s/networks/%s' % (self.uri_prefix, str(uuid))
+ return self.delete(uri, self.headers)
+
+ def show_network(self, uuid):
+ uri = '%s/networks/%s' % (self.uri_prefix, str(uuid))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def create_subnet(self, net_uuid, cidr):
+ uri = '%s/subnets' % (self.uri_prefix)
+ subnet = Element("subnet")
+ p2 = Element("network_id", net_uuid)
+ p3 = Element("cidr", cidr)
+ p4 = Element("ip_version", 4)
+ subnet.append(p2)
+ subnet.append(p3)
+ subnet.append(p4)
+ resp, body = self.post(uri, str(Document(subnet)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def delete_subnet(self, subnet_id):
+ uri = '%s/subnets/%s' % (self.uri_prefix, str(subnet_id))
+ return self.delete(uri, self.headers)
+
+ def list_subnets(self):
+ uri = '%s/subnets' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ subnets = self._parse_array(etree.fromstring(body))
+ subnets = {"subnets": subnets}
+ return resp, subnets
+
+ def show_subnet(self, uuid):
+ uri = '%s/subnets/%s' % (self.uri_prefix, str(uuid))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def create_port(self, net_uuid, **kwargs):
+ uri = '%s/ports' % (self.uri_prefix)
+ port = Element("port")
+ p1 = Element('network_id', net_uuid)
+ port.append(p1)
+ for key, val in kwargs.items():
+ key = Element(key, val)
+ port.append(key)
+ resp, body = self.post(uri, str(Document(port)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def delete_port(self, port_id):
+ uri = '%s/ports/%s' % (self.uri_prefix, str(port_id))
+ return self.delete(uri, self.headers)
+
+ def _parse_array(self, node):
+ array = []
+ for child in node.getchildren():
+ array.append(xml_to_json(child))
+ return array
+
+ def list_ports(self):
+ url = '%s/ports' % (self.uri_prefix)
+ resp, body = self.get(url, self.headers)
+ ports = self._parse_array(etree.fromstring(body))
+ ports = {"ports": ports}
+ return resp, ports
+
+ def show_port(self, port_id):
+ uri = '%s/ports/%s' % (self.uri_prefix, str(port_id))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_port(self, port_id, name):
+ uri = '%s/ports/%s' % (self.uri_prefix, str(port_id))
+ port = Element("port")
+ p2 = Element("name", name)
+ port.append(p2)
+ resp, body = self.put(uri, str(Document(port)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_subnet(self, subnet_id, name):
+ uri = '%s/subnets/%s' % (self.uri_prefix, str(subnet_id))
+ subnet = Element("subnet")
+ p2 = Element("name", name)
+ subnet.append(p2)
+ resp, body = self.put(uri, str(Document(subnet)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_network(self, net_id, name):
+ uri = '%s/networks/%s' % (self.uri_prefix, str(net_id))
+ network = Element("network")
+ p2 = Element("name", name)
+ network.append(p2)
+ resp, body = self.put(uri, str(Document(network)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+
+def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
+ body = ET.fromstring(xml_returned_body)
+ root_tag = body.tag
+ if root_tag.startswith("{"):
+ ns, root_tag = root_tag.split("}", 1)
+ body = xml_to_json(etree.fromstring(xml_returned_body))
+ body = {root_tag: body}
+ return body
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index 8defbbb..eb9910f 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -15,10 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import httplib2
import json
import urllib
+from tempest.common import http
from tempest.common.rest_client import RestClient
from tempest import exceptions
@@ -108,7 +108,7 @@
def request(self, method, url, headers=None, body=None):
"""A simple HTTP request interface."""
- self.http_obj = httplib2.Http()
+ self.http_obj = http.ClosingHttp()
if headers is None:
headers = {}
if self.base_url is None:
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index 181838e..1c97869 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -17,9 +17,9 @@
import hashlib
import hmac
-import httplib2
import urlparse
+from tempest.common import http
from tempest.common.rest_client import RestClient
from tempest import exceptions
@@ -162,7 +162,8 @@
def request(self, method, url, headers=None, body=None):
"""A simple HTTP request interface."""
dscv = self.config.identity.disable_ssl_certificate_validation
- self.http_obj = httplib2.Http(disable_ssl_certificate_validation=dscv)
+ self.http_obj = http.ClosingHttp(
+ disable_ssl_certificate_validation=dscv)
if headers is None:
headers = {}
if self.base_url is None:
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 22f3f26..e896e0d 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -42,7 +42,7 @@
resp, body = self.get(uri)
body = json.loads(body)
- return resp, body
+ return resp, body['stacks']
def create_stack(self, name, disable_rollback=True, parameters={},
timeout_mins=60, template=None, template_url=None):
@@ -135,7 +135,7 @@
# been created yet
pass
else:
- resource_name = body['logical_resource_id']
+ resource_name = body['resource_name']
resource_status = body['resource_status']
if resource_status == status:
return
@@ -176,3 +176,64 @@
(stack_name, status, self.build_timeout))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
+
+ def show_resource_metadata(self, stack_identifier, resource_name):
+ """Returns the resource's metadata."""
+ url = ('stacks/{stack_identifier}/resources/{resource_name}'
+ '/metadata'.format(**locals()))
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['metadata']
+
+ def list_events(self, stack_identifier):
+ """Returns list of all events for a stack."""
+ url = 'stacks/{stack_identifier}/events'.format(**locals())
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['events']
+
+ def list_resource_events(self, stack_identifier, resource_name):
+ """Returns list of all events for a resource from stack."""
+ url = ('stacks/{stack_identifier}/resources/{resource_name}'
+ '/events'.format(**locals()))
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['events']
+
+ def show_event(self, stack_identifier, resource_name, event_id):
+ """Returns the details of a single stack's event."""
+ url = ('stacks/{stack_identifier}/resources/{resource_name}/events'
+ '/{event_id}'.format(**locals()))
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['event']
+
+ def show_template(self, stack_identifier):
+ """Returns the template for the stack."""
+ url = ('stacks/{stack_identifier}/template'.format(**locals()))
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body
+
+ def _validate_template(self, post_body):
+ """Returns the validation request result."""
+ post_body = json.dumps(post_body)
+ resp, body = self.post('validate', post_body, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def validate_template(self, template, parameters={}):
+ """Returns the validation result for a template with parameters."""
+ post_body = {
+ 'template': template,
+ 'parameters': parameters,
+ }
+ return self._validate_template(post_body)
+
+ def validate_template_url(self, template_url, parameters={}):
+ """Returns the validation result for a template with parameters."""
+ post_body = {
+ 'template_url': template_url,
+ 'parameters': parameters,
+ }
+ return self._validate_template(post_body)
diff --git a/tempest/stress/actions/unit_test.py b/tempest/stress/actions/unit_test.py
new file mode 100644
index 0000000..5ab5573
--- /dev/null
+++ b/tempest/stress/actions/unit_test.py
@@ -0,0 +1,87 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.openstack.common import importutils
+from tempest.openstack.common import log as logging
+import tempest.stress.stressaction as stressaction
+
+
+class SetUpClassRunTime(object):
+
+ process = 'process'
+ action = 'action'
+ application = 'application'
+
+ allowed = set((process, action, application))
+
+ @classmethod
+ def validate(cls, name):
+ if name not in cls.allowed:
+ raise KeyError("\'%s\' not a valid option" % name)
+
+
+class UnitTest(stressaction.StressAction):
+ """This is a special action for running existing unittests as stress test.
+ You need to pass ``test_method`` and ``class_setup_per``
+ using ``kwargs`` in the JSON descriptor;
+ ``test_method`` should be the fully qualified name of a unittest,
+ ``class_setup_per`` should be one from:
+ ``application``: once in the stress job lifetime
+ ``process``: once in the worker process lifetime
+ ``action``: on each action
+ Not all combination working in every case.
+ """
+
+ def setUp(self, **kwargs):
+ method = kwargs['test_method'].split('.')
+ self.test_method = method.pop()
+ self.klass = importutils.import_class('.'.join(method))
+ self.logger = logging.getLogger('.'.join(method))
+ # valid options are 'process', 'application' , 'action'
+ self.class_setup_per = kwargs.get('class_setup_per',
+ SetUpClassRunTime.process)
+ SetUpClassRunTime.validate(self.class_setup_per)
+
+ if self.class_setup_per == SetUpClassRunTime.application:
+ self.klass.setUpClass()
+ self.setupclass_called = False
+
+ @property
+ def action(self):
+ if self.test_method:
+ return self.test_method
+ return super(UnitTest, self).action
+
+ def run_core(self):
+ res = self.klass(self.test_method).run()
+ if res.errors:
+ raise RuntimeError(res.errors)
+
+ def run(self):
+ if self.class_setup_per != SetUpClassRunTime.application:
+ if (self.class_setup_per == SetUpClassRunTime.action
+ or self.setupclass_called is False):
+ self.klass.setUpClass()
+ self.setupclass_called = True
+
+ self.run_core()
+
+ if (self.class_setup_per == SetUpClassRunTime.action):
+ self.klass.tearDownClass()
+ else:
+ self.run_core()
+
+ def tearDown(self):
+ if self.class_setup_per != SetUpClassRunTime.action:
+ self.klass.tearDownClass()
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index efc57a9..e518d28 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -102,6 +102,8 @@
"""
logfiles = admin_manager.config.stress.target_logfiles
log_check_interval = int(admin_manager.config.stress.log_check_interval)
+ default_thread_num = int(admin_manager.config.stress.
+ default_thread_number_per_action)
if logfiles:
controller = admin_manager.config.stress.target_controller
computes = _get_compute_nodes(controller)
@@ -112,7 +114,7 @@
manager = admin_manager
else:
manager = clients.Manager()
- for p_number in xrange(test.get('threads', 1)):
+ for p_number in xrange(test.get('threads', default_thread_num)):
if test.get('use_isolated_tenants', False):
username = rand_name("stress_user")
tenant_name = rand_name("stress_tenant")
@@ -146,7 +148,7 @@
process = {'process': p,
'p_number': p_number,
- 'action': test['action'],
+ 'action': test_run.action,
'statistic': shared_statistic}
processes.append(process)
diff --git a/tempest/stress/etc/sample-unit-test.json b/tempest/stress/etc/sample-unit-test.json
new file mode 100644
index 0000000..b388bfe
--- /dev/null
+++ b/tempest/stress/etc/sample-unit-test.json
@@ -0,0 +1,8 @@
+[{"action": "tempest.stress.actions.unit_test.UnitTest",
+ "threads": 8,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {"test_method": "tempest.cli.simple_read_only.test_glance.SimpleReadOnlyGlanceClientTest.test_glance_fake_action",
+ "class_setup_per": "process"}
+ }
+]
diff --git a/tempest/stress/run_stress.py b/tempest/stress/run_stress.py
index 32e3ae0..aab2afd 100755
--- a/tempest/stress/run_stress.py
+++ b/tempest/stress/run_stress.py
@@ -19,13 +19,52 @@
import argparse
import json
import sys
+from testtools.testsuite import iterate_tests
+from unittest import loader
+
+
+def discover_stress_tests(path="./", filter_attr=None):
+ """Discovers all tempest tests and create action out of them
+ """
+
+ tests = []
+ testloader = loader.TestLoader()
+ list = testloader.discover(path)
+ for func in (iterate_tests(list)):
+ try:
+ method_name = getattr(func, '_testMethodName')
+ full_name = "%s.%s.%s" % (func.__module__,
+ func.__class__.__name__,
+ method_name)
+ test_func = getattr(func, method_name)
+ # NOTE(mkoderer): this contains a list of all type attributes
+ attrs = getattr(test_func, "__testtools_attrs")
+ except Exception:
+ next
+ if 'stress' in attrs:
+ if filter_attr is not None and not filter_attr in attrs:
+ continue
+ class_setup_per = getattr(test_func, "st_class_setup_per")
+
+ action = {'action':
+ "tempest.stress.actions.unit_test.UnitTest",
+ 'kwargs': {"test_method": full_name,
+ "class_setup_per": class_setup_per
+ }
+ }
+ tests.append(action)
+ return tests
def main(ns):
# NOTE(mkoderer): moved import to make "-h" possible without OpenStack
from tempest.stress import driver
result = 0
- tests = json.load(open(ns.tests, 'r'))
+ if not ns.all:
+ tests = json.load(open(ns.tests, 'r'))
+ else:
+ tests = discover_stress_tests(filter_attr=ns.type)
+
if ns.serial:
for test in tests:
step_result = driver.stress_openstack([test],
@@ -49,7 +88,13 @@
default=False, help="Stop on first error.")
parser.add_argument('-n', '--number', type=int,
help="How often an action is executed for each process.")
-parser.add_argument('tests', help="Name of the file with test description.")
+group = parser.add_mutually_exclusive_group(required=True)
+group.add_argument('-a', '--all', action='store_true',
+ help="Execute all stress tests")
+parser.add_argument('-T', '--type',
+ help="Filters tests of a certain type (e.g. gate)")
+group.add_argument('-t', "--tests", nargs='?',
+ help="Name of the file with test description.")
if __name__ == "__main__":
sys.exit(main(parser.parse_args()))
diff --git a/tempest/stress/stressaction.py b/tempest/stress/stressaction.py
index 3719841..28251af 100644
--- a/tempest/stress/stressaction.py
+++ b/tempest/stress/stressaction.py
@@ -33,6 +33,13 @@
self.tearDown()
sys.exit(0)
+ @property
+ def action(self):
+ """This methods returns the action. Overload this if you
+ create a stress test wrapper.
+ """
+ return self.__class__.__name__
+
def setUp(self, **kwargs):
"""This method is called before the run method
to help the test initiatlize any structures.
@@ -60,6 +67,8 @@
while self.max_runs is None or (shared_statistic['runs'] <
self.max_runs):
+ self.logger.debug("Trigger new run (run %d)" %
+ shared_statistic['runs'])
try:
self.run()
except Exception:
diff --git a/tempest/test.py b/tempest/test.py
index 41f3090..ccb985a 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -57,6 +57,27 @@
return decorator
+def stresstest(*args, **kwargs):
+ """Add stress test decorator
+
+ For all functions with this decorator a attr stress will be
+ set automatically.
+
+ @param class_setup_per: allowed values are application, process, action
+ ``application``: once in the stress job lifetime
+ ``process``: once in the worker process lifetime
+ ``action``: on each action
+ """
+ def decorator(f):
+ if 'class_setup_per' in kwargs:
+ setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
+ else:
+ setattr(f, "st_class_setup_per", 'process')
+ attr(type='stress')(f)
+ return f
+ return decorator
+
+
# there is a mis-match between nose and testtools for older pythons.
# testtools will set skipException to be either
# unittest.case.SkipTest, unittest2.case.SkipTest or an internal skip
@@ -97,7 +118,7 @@
def validate_tearDownClass():
if at_exit_set:
- raise RuntimeError("tearDownClass does not calls the super's"
+ raise RuntimeError("tearDownClass does not calls the super's "
"tearDownClass in these classes: "
+ str(at_exit_set))
diff --git a/tempest/tests/README.rst b/tempest/tests/README.rst
new file mode 100644
index 0000000..4098686
--- /dev/null
+++ b/tempest/tests/README.rst
@@ -0,0 +1,25 @@
+Tempest Guide to Unit tests
+===========================
+
+What are these tests?
+---------------------
+
+Unit tests are the self checks for Tempest. They provide functional
+verification and regression checking for the internal components of tempest.
+They should be used to just verify that the individual pieces of tempest are
+working as expected. They should not require an external service to be running
+and should be able to run solely from the tempest tree.
+
+Why are these tests in tempest?
+-------------------------------
+These tests exist to make sure that the mechanisms that we use inside of
+tempest to are valid and remain functional. They are only here for self
+validation of tempest.
+
+
+Scope of these tests
+--------------------
+Unit tests should not require an external service to be running or any extra
+configuration to run. Any state that is required for a test should either be
+mocked out or created in a temporary test directory. (see test_wrappers.py for
+an example of using a temporary test directory)
diff --git a/tempest/tests/__init__.py b/tempest/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/tests/__init__.py
diff --git a/tempest/tests/files/__init__.py b/tempest/tests/files/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/tests/files/__init__.py
diff --git a/tempest/tests/files/failing-tests b/tempest/tests/files/failing-tests
new file mode 100644
index 0000000..0ec5421
--- /dev/null
+++ b/tempest/tests/files/failing-tests
@@ -0,0 +1,25 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+class FakeTestClass(testtools.TestCase):
+ def test_pass(self):
+ self.assertTrue(False)
+
+ def test_pass_list(self):
+ test_list = ['test', 'a', 'b']
+ self.assertIn('fail', test_list)
diff --git a/tempest/tests/files/passing-tests b/tempest/tests/files/passing-tests
new file mode 100644
index 0000000..2f5b7c9
--- /dev/null
+++ b/tempest/tests/files/passing-tests
@@ -0,0 +1,25 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+class FakeTestClass(testtools.TestCase):
+ def test_pass(self):
+ self.assertTrue(True)
+
+ def test_pass_list(self):
+ test_list = ['test', 'a', 'b']
+ self.assertIn('test', test_list)
diff --git a/tempest/tests/files/setup.cfg b/tempest/tests/files/setup.cfg
new file mode 100644
index 0000000..8639baa
--- /dev/null
+++ b/tempest/tests/files/setup.cfg
@@ -0,0 +1,20 @@
+[metadata]
+name = tempest_unit_tests
+version = 1
+summary = Fake Project for testing wrapper scripts
+author = OpenStack QA
+author-email = openstack-qa@lists.openstack.org
+home-page = http://www.openstack.org/
+classifier =
+ Intended Audience :: Information Technology
+ Intended Audience :: System Administrators
+ Intended Audience :: Developers
+ License :: OSI Approved :: Apache Software License
+ Operating System :: POSIX :: Linux
+ Programming Language :: Python
+ Programming Language :: Python :: 2
+ Programming Language :: Python :: 2.7
+
+[global]
+setup-hooks =
+ pbr.hooks.setup_hook
diff --git a/tempest/tests/files/testr-conf b/tempest/tests/files/testr-conf
new file mode 100644
index 0000000..d5ad083
--- /dev/null
+++ b/tempest/tests/files/testr-conf
@@ -0,0 +1,5 @@
+[DEFAULT]
+test_command=${PYTHON:-python} -m subunit.run discover -t ./ ./tests $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
+group_regex=([^\.]*\.)*
diff --git a/tempest/tests/test_wrappers.py b/tempest/tests/test_wrappers.py
new file mode 100644
index 0000000..aeea98d
--- /dev/null
+++ b/tempest/tests/test_wrappers.py
@@ -0,0 +1,103 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import shutil
+import subprocess
+import tempfile
+import testtools
+
+from tempest.test import attr
+
+DEVNULL = open(os.devnull, 'wb')
+
+
+class TestWrappers(testtools.TestCase):
+ def setUp(self):
+ super(TestWrappers, self).setUp()
+ # Setup test dirs
+ self.directory = tempfile.mkdtemp(prefix='tempest-unit')
+ self.test_dir = os.path.join(self.directory, 'tests')
+ os.mkdir(self.test_dir)
+ # Setup Test files
+ self.testr_conf_file = os.path.join(self.directory, '.testr.conf')
+ self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg')
+ self.passing_file = os.path.join(self.test_dir, 'test_passing.py')
+ self.failing_file = os.path.join(self.test_dir, 'test_failing.py')
+ self.init_file = os.path.join(self.test_dir, '__init__.py')
+ self.setup_py = os.path.join(self.directory, 'setup.py')
+ shutil.copy('tempest/tests/files/testr-conf', self.testr_conf_file)
+ shutil.copy('tempest/tests/files/passing-tests', self.passing_file)
+ shutil.copy('tempest/tests/files/failing-tests', self.failing_file)
+ shutil.copy('setup.py', self.setup_py)
+ shutil.copy('tempest/tests/files/setup.cfg', self.setup_cfg_file)
+ shutil.copy('tempest/tests/files/__init__.py', self.init_file)
+
+ @attr(type='smoke')
+ def test_pretty_tox(self):
+ # Copy wrapper script and requirements:
+ pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
+ shutil.copy('tools/pretty_tox.sh', pretty_tox)
+ # Change directory, run wrapper and check result
+ self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+ os.chdir(self.directory)
+ # Git init is required for the pbr testr command. pbr requires a git
+ # version or an sdist to work. so make the test directory a git repo
+ # too.
+ subprocess.call(['git', 'init'])
+ exit_code = subprocess.call('sh pretty_tox.sh tests.passing',
+ shell=True, stdout=DEVNULL, stderr=DEVNULL)
+ self.assertEquals(exit_code, 0)
+
+ @attr(type='smoke')
+ def test_pretty_tox_fails(self):
+ # Copy wrapper script and requirements:
+ pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
+ shutil.copy('tools/pretty_tox.sh', pretty_tox)
+ # Change directory, run wrapper and check result
+ self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+ os.chdir(self.directory)
+ # Git init is required for the pbr testr command. pbr requires a git
+ # version or an sdist to work. so make the test directory a git repo
+ # too.
+ subprocess.call(['git', 'init'])
+ exit_code = subprocess.call('sh pretty_tox.sh', shell=True,
+ stdout=DEVNULL, stderr=DEVNULL)
+ self.assertEquals(exit_code, 1)
+
+ @attr(type='smoke')
+ def test_pretty_tox_serial(self):
+ # Copy wrapper script and requirements:
+ pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
+ shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
+ # Change directory, run wrapper and check result
+ self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+ os.chdir(self.directory)
+ exit_code = subprocess.call('sh pretty_tox_serial.sh tests.passing',
+ shell=True, stdout=DEVNULL, stderr=DEVNULL)
+ self.assertEquals(exit_code, 0)
+
+ @attr(type='smoke')
+ def test_pretty_tox_serial_fails(self):
+ # Copy wrapper script and requirements:
+ pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
+ shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
+ # Change directory, run wrapper and check result
+ self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+ os.chdir(self.directory)
+ exit_code = subprocess.call('sh pretty_tox_serial.sh', shell=True,
+ stdout=DEVNULL, stderr=DEVNULL)
+ self.assertEquals(exit_code, 1)
diff --git a/tempest/whitebox/manager.py b/tempest/whitebox/manager.py
index b2632f1..3b1b107 100644
--- a/tempest/whitebox/manager.py
+++ b/tempest/whitebox/manager.py
@@ -72,7 +72,7 @@
cls.flavor_ref = cls.config.compute.flavor_ref
cls.flavor_ref_alt = cls.config.compute.flavor_ref_alt
- #NOTE(afazekas): Mimics the helper method used in the api tests
+ # NOTE(afazekas): Mimics the helper method used in the api tests
@classmethod
def create_server(cls, **kwargs):
flavor_ref = cls.config.compute.flavor_ref
@@ -127,7 +127,7 @@
cmd = shlex.split(cmd)
result = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- #Todo(rohitk): Need to define host connection parameters in config
+ # TODO(rohitk): Need to define host connection parameters in config
else:
client = self.get_ssh_connection(self.config.whitebox.api_host,
self.config.whitebox.api_user,
diff --git a/tempest/whitebox/test_images_whitebox.py b/tempest/whitebox/test_images_whitebox.py
index 0afb17e..06dcd7f 100644
--- a/tempest/whitebox/test_images_whitebox.py
+++ b/tempest/whitebox/test_images_whitebox.py
@@ -16,10 +16,13 @@
# under the License.
from tempest.common.utils.data_utils import rand_name
+from tempest.openstack.common import log as logging
from tempest.whitebox import manager
from novaclient import exceptions
+LOG = logging.getLogger(__name__)
+
class ImagesWhiteboxTest(manager.ComputeWhiteboxTest):
_interface = 'json'
@@ -65,8 +68,9 @@
self.create_image,
self.shared_server.id, image_name)
except Exception:
- self.fail("Should not allow create image when vm_state=%s and "
+ LOG.error("Should not allow create image when vm_state=%s and "
"task_state=%s" % (vm_state, task_state))
+ raise
finally:
self.update_state(self.shared_server.id, 'active', None)
diff --git a/tempest/whitebox/test_servers_whitebox.py b/tempest/whitebox/test_servers_whitebox.py
index 1c1cdeb..b6c888c 100644
--- a/tempest/whitebox/test_servers_whitebox.py
+++ b/tempest/whitebox/test_servers_whitebox.py
@@ -17,8 +17,11 @@
from tempest.api.identity.base import BaseIdentityAdminTest
from tempest import exceptions
+from tempest.openstack.common import log as logging
from tempest.whitebox import manager
+LOG = logging.getLogger(__name__)
+
class ServersWhiteboxTest(manager.ComputeWhiteboxTest):
_interface = 'json'
@@ -26,7 +29,7 @@
@classmethod
def setUpClass(cls):
super(ServersWhiteboxTest, cls).setUpClass()
- #NOTE(afazekas): Strange relationship
+ # NOTE(afazekas): Strange relationship
BaseIdentityAdminTest.setUpClass()
cls.client = cls.servers_client
cls.img_client = cls.images_client
@@ -66,25 +69,21 @@
Base method for delete server tests based on vm and task states.
Validates for successful server termination.
"""
- try:
- server = self.create_server()
- self.update_state(server['id'], vm_state, task_state)
+ server = self.create_server()
+ self.update_state(server['id'], vm_state, task_state)
- resp, body = self.client.delete_server(server['id'])
- self.assertEqual('204', resp['status'])
- self.client.wait_for_server_termination(server['id'],
- ignore_error=True)
+ resp, body = self.client.delete_server(server['id'])
+ self.assertEqual('204', resp['status'])
+ self.client.wait_for_server_termination(server['id'],
+ ignore_error=True)
- instances = self.meta.tables['instances']
- stmt = instances.select().where(instances.c.uuid == server['id'])
- result = self.connection.execute(stmt).first()
+ instances = self.meta.tables['instances']
+ stmt = instances.select().where(instances.c.uuid == server['id'])
+ result = self.connection.execute(stmt).first()
- self.assertEqual(True, result.deleted > 0)
- self.assertEqual('deleted', result.vm_state)
- self.assertEqual(None, result.task_state)
- except Exception:
- self.fail("Should be able to delete a server when vm_state=%s and "
- "task_state=%s" % (vm_state, task_state))
+ self.assertEqual(True, result.deleted > 0)
+ self.assertEqual('deleted', result.vm_state)
+ self.assertEqual(None, result.task_state)
def _test_delete_server_403_base(self, vm_state, task_state):
"""
@@ -98,8 +97,9 @@
self.client.delete_server,
self.shared_server['id'])
except Exception:
- self.fail("Should not allow delete server when vm_state=%s and "
+ LOG.error("Should not allow delete server when vm_state=%s and "
"task_state=%s" % (vm_state, task_state))
+ raise
finally:
self.update_state(self.shared_server['id'], 'active', None)
diff --git a/test-requirements.txt b/test-requirements.txt
index 236a473..6c313ca 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,8 +1,4 @@
-# Install bounded pep8/pyflakes first, then let flake8 install
-pep8==1.4.5
-pyflakes==0.7.2
-flake8==2.0
-hacking>=0.5.6,<0.7
+hacking>=0.5.6,<0.8
# needed for doc build
docutils==0.9.1
sphinx>=1.1.2
diff --git a/tools/skip_tracker.py b/tools/skip_tracker.py
index 1ed6961..c244808 100755
--- a/tools/skip_tracker.py
+++ b/tools/skip_tracker.py
@@ -61,7 +61,7 @@
"""
Return the skip tuples in a test file
"""
- BUG_RE = re.compile(r'.*skip\(.*bug:*\s*\#*(\d+)', re.IGNORECASE)
+ BUG_RE = re.compile(r'.*skip.*bug:*\s*\#*(\d+)', re.IGNORECASE)
DEF_RE = re.compile(r'.*def (\w+)\(')
bug_found = False
results = []
diff --git a/tox.ini b/tox.ini
index dc8980d..0b57eb2 100644
--- a/tox.ini
+++ b/tox.ini
@@ -19,17 +19,57 @@
# The regex below is used to select which tests to run and exclude the slow tag:
# See the testrepostiory bug: https://bugs.launchpad.net/testrepository/+bug/1208610
commands =
- sh tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
+ sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli|tests)) {posargs}'
[testenv:testr-full]
sitepackages = True
setenv = VIRTUAL_ENV={envdir}
commands =
- sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
+ sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli|tests)) {posargs}'
+
+[testenv:heat-slow]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+# The regex below is used to select heat api/scenario tests tagged as slow.
+commands =
+ sh tools/pretty_tox_serial.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
+
+[testenv:large-ops]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+commands =
+ python setup.py testr --slowest --testr-args='tempest.scenario.test_large_ops {posargs}'
+
+
+[testenv:py26-full]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+ NOSE_WITH_OPENSTACK=1
+ NOSE_OPENSTACK_COLOR=1
+ NOSE_OPENSTACK_RED=15
+ NOSE_OPENSTACK_YELLOW=3
+ NOSE_OPENSTACK_SHOW_ELAPSED=1
+ NOSE_OPENSTACK_STDOUT=1
+commands =
+ nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit -sv --xunit-file=nosetests-full.xml tempest/api tempest/scenario tempest/thirdparty tempest/cli tempest/tests {posargs}
+
+[testenv:py26-smoke]
+setenv = VIRTUAL_ENV={envdir}
+NOSE_WITH_OPENSTACK=1
+ NOSE_OPENSTACK_COLOR=1
+ NOSE_OPENSTACK_RED=15
+ NOSE_OPENSTACK_YELLOW=3
+ NOSE_OPENSTACK_SHOW_ELAPSED=1
+ NOSE_OPENSTACK_STDOUT=1
+commands =
+ nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit -sv --attr=type=smoke --xunit-file=nosetests-smoke.xml tempest {posargs}
[testenv:smoke]
sitepackages = True
setenv = VIRTUAL_ENV={envdir}
+# This is still serial because neutron doesn't work with parallel. See:
+# https://bugs.launchpad.net/tempest/+bug/1216076 so the neutron smoke
+# job would fail if we moved it to parallel.
commands =
sh tools/pretty_tox_serial.sh 'smoke {posargs}'
@@ -38,14 +78,14 @@
setenv = VIRTUAL_ENV={envdir}
commands =
python -m tools/tempest_coverage -c start --combine
- sh tools/pretty_tox_serial.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli))'
+ sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli|tests))'
python -m tools/tempest_coverage -c report --html {posargs}
[testenv:stress]
sitepackages = True
setenv = VIRTUAL_ENV={envdir}
commands =
- python -m tempest/stress/run_stress tempest/stress/etc/stress-tox-job.json -d 3600
+ python -m tempest/stress/run_stress -a -d 3600
[testenv:venv]
commands = {posargs}