Merge "Protected matcher import"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index f1aaa07..703d92a 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -1,5 +1,5 @@
[DEFAULT]
-# log_config = /opt/stack/tempest/etc/logging.conf.sample
+#log_config = /opt/stack/tempest/etc/logging.conf.sample
# disable logging to the stderr
use_stderr = False
@@ -272,6 +272,9 @@
# Set to True if the Account Quota middleware is enabled
accounts_quotas_available = True
+# Set operator role for tests that require creating a container
+operator_role = Member
+
[boto]
# This section contains configuration options used when executing tests
# with boto.
@@ -285,7 +288,7 @@
aws_access =
aws_secret =
-#Image materials for S3 upload
+# Image materials for S3 upload
# ALL content of the specified directory will be uploaded to S3
s3_materials_path = /opt/stack/devstack/files/images/s3-materials/cirros-0.3.1
@@ -293,22 +296,22 @@
# Subdirectories not allowed!
# The filenames will be used as a Keys in the S3 Buckets
-#ARI Ramdisk manifest. Must be in the above s3_materials_path
+# ARI Ramdisk manifest. Must be in the above s3_materials_path
ari_manifest = cirros-0.3.1-x86_64-initrd.manifest.xml
-#AMI Machine Image manifest. Must be in the above s3_materials_path
+# AMI Machine Image manifest. Must be in the above s3_materials_path
ami_manifest = cirros-0.3.1-x86_64-blank.img.manifest.xml
-#AKI Kernel Image manifest, Must be in the above s3_materials_path
+# AKI Kernel Image manifest, Must be in the above s3_materials_path
aki_manifest = cirros-0.3.1-x86_64-vmlinuz.manifest.xml
-#Instance type
+# Instance type
instance_type = m1.tiny
-#TCP/IP connection timeout
+# TCP/IP connection timeout
http_socket_timeout = 5
-#Number of retries actions on connection or 5xx error
+# Number of retries actions on connection or 5xx error
num_retries = 1
# Status change wait timout
diff --git a/requirements.txt b/requirements.txt
index cc61b01..06db0e6 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -13,6 +13,7 @@
python-novaclient>=2.10.0
python-neutronclient>=2.2.3,<3.0.0
python-cinderclient>=1.0.4
+python-heatclient>=0.2.3
testresources
keyring
testrepository
diff --git a/run_tests.sh b/run_tests.sh
index f8636c1..f995cde 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -108,6 +108,21 @@
fi
}
+function run_tests_nose {
+ NOSE_WITH_OPENSTACK=1
+ NOSE_OPENSTACK_COLOR=1
+ NOSE_OPENSTACK_RED=15.00
+ NOSE_OPENSTACK_YELLOW=3.00
+ NOSE_OPENSTACK_SHOW_ELAPSED=1
+ NOSE_OPENSTACK_STDOUT=1
+ if [[ "x$noseargs" =~ "tempest" ]]; then
+ noseargs="$testrargs"
+ else
+ noseargs="$noseargs tempest"
+ fi
+ ${wrapper} nosetests $noseargs
+}
+
function run_pep8 {
echo "Running pep8 ..."
${wrapper} flake8
@@ -162,7 +177,13 @@
run_coverage_start
fi
-run_tests
+
+py_version=`${wrapper} python --version 2>&1`
+if [[ $py_version =~ "2.6" ]] ; then
+ run_tests_nose
+else
+ run_tests
+fi
retval=$?
if [ $nova_coverage -eq 1 ]; then
diff --git a/tempest/api/README.rst b/tempest/api/README.rst
index 617fda4..9d8dc10 100644
--- a/tempest/api/README.rst
+++ b/tempest/api/README.rst
@@ -9,15 +9,15 @@
works with the OpenStack API as documented. The current largest
portion of Tempest code is devoted to test cases that do exactly this.
-It's also important to test not only the expected possitive path on
+It's also important to test not only the expected positive path on
APIs, but also to provide them with invalid data to ensure they fail
in expected and documented ways. Over the course of the OpenStack
project Tempest has discovered many fundamental bugs by doing just
this.
-In order for some APIs to return meaniful results, there must be
+In order for some APIs to return meaningful results, there must be
enough data in the system. This means these tests might start by
-spinning up a server, image, etc, then opperating on it.
+spinning up a server, image, etc, then operating on it.
Why are these tests in tempest?
diff --git a/tempest/api/compute/admin/test_fixed_ips.py b/tempest/api/compute/admin/test_fixed_ips.py
index 8b96370..895f773 100644
--- a/tempest/api/compute/admin/test_fixed_ips.py
+++ b/tempest/api/compute/admin/test_fixed_ips.py
@@ -15,8 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testtools
-
from tempest.api.compute import base
from tempest import config
from tempest import exceptions
@@ -30,6 +28,9 @@
@classmethod
def setUpClass(cls):
super(FixedIPsBase, cls).setUpClass()
+ if cls.config.service_available.neutron:
+ msg = ("%s skipped as neutron is available" % cls.__name__)
+ raise cls.skipException(msg)
# NOTE(maurosr): The idea here is: the server creation is just an
# auxiliary element to the ip details or reservation, there was no way
# (at least none in my mind) to get an valid and existing ip except
@@ -56,8 +57,6 @@
CONF = config.TempestConfig()
- @testtools.skipIf(CONF.service_available.neutron, "This feature is not" +
- "implemented by Neutron. See bug: #1194569")
@attr(type='gate')
def test_list_fixed_ip_details(self):
resp, fixed_ip = self.client.get_fixed_ip_details(self.ip)
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 6d0a5b5..5f31084 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api import compute
from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_int_id
@@ -193,9 +195,10 @@
flag = True
self.assertTrue(flag)
+ @testtools.skip("Skipped until the Bug #1209101 is resolved")
@attr(type='gate')
- def test_flavor_not_public_verify_entry_not_in_list_details(self):
- # Create a flavor with os-flavor-access:is_public false should not
+ def test_list_non_public_flavor(self):
+ # Create a flavor with os-flavor-access:is_public false should
# be present in list_details.
# This operation requires the user to have 'admin' role
flavor_name = rand_name(self.flavor_name_prefix)
@@ -208,13 +211,22 @@
new_flavor_id,
is_public="False")
self.addCleanup(self.flavor_clean_up, flavor['id'])
- flag = False
# Verify flavor is retrieved
+ flag = False
resp, flavors = self.client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
for flavor in flavors:
if flavor['name'] == flavor_name:
flag = True
+ self.assertTrue(flag)
+
+ # Verify flavor is not retrieved with other user
+ flag = False
+ resp, flavors = self.user_client.list_flavors_with_detail()
+ self.assertEqual(resp.status, 200)
+ for flavor in flavors:
+ if flavor['name'] == flavor_name:
+ flag = True
self.assertFalse(flag)
@attr(type='gate')
@@ -276,7 +288,7 @@
r, flavors = self.client.list_flavors_with_detail(params)
self.assertEqual(r.status, 200)
flavor = _flavor_lookup(flavors, flavor_name)
- self.assertNotEqual(flavor, None)
+ self.assertIsNotNone(flavor)
_test_string_variations(['f', 'false', 'no', '0'],
flavor_name_not_public)
diff --git a/tempest/api/compute/admin/test_hosts.py b/tempest/api/compute/admin/test_hosts.py
index 849cebb..af76ad0 100644
--- a/tempest/api/compute/admin/test_hosts.py
+++ b/tempest/api/compute/admin/test_hosts.py
@@ -33,13 +33,13 @@
cls.client = cls.os_adm.hosts_client
cls.non_admin_client = cls.os.hosts_client
- @attr(type=['positive', 'gate'])
+ @attr(type='gate')
def test_list_hosts(self):
resp, hosts = self.client.list_hosts()
self.assertEqual(200, resp.status)
self.assertTrue(len(hosts) >= 2)
- @attr(type='positive')
+ @attr(type='gate')
def test_list_hosts_with_zone(self):
resp, hosts = self.client.list_hosts()
host = hosts[0]
diff --git a/tempest/api/compute/admin/test_hypervisor.py b/tempest/api/compute/admin/test_hypervisor.py
index 00a5955..5ca16f4 100644
--- a/tempest/api/compute/admin/test_hypervisor.py
+++ b/tempest/api/compute/admin/test_hypervisor.py
@@ -40,20 +40,20 @@
self.assertEqual(200, resp.status)
return hypers
- @attr(type=['positive', 'gate'])
+ @attr(type='gate')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertTrue(len(hypers) > 0)
- @attr(type=['positive', 'gate'])
+ @attr(type='gate')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
resp, hypers = self.client.get_hypervisor_list_details()
self.assertEqual(200, resp.status)
self.assertTrue(len(hypers) > 0)
- @attr(type=['positive', 'gate'])
+ @attr(type='gate')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
@@ -66,7 +66,7 @@
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
- @attr(type=['positive', 'gate'])
+ @attr(type='gate')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
@@ -77,14 +77,14 @@
self.assertEqual(200, resp.status)
self.assertTrue(len(hypervisors) > 0)
- @attr(type=['positive', 'gate'])
+ @attr(type='gate')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
resp, stats = self.client.get_hypervisor_stats()
self.assertEqual(200, resp.status)
self.assertTrue(len(stats) > 0)
- @attr(type=['positive', 'gate'])
+ @attr(type='gate')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 15e28fd..acf0275 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -36,6 +36,7 @@
@classmethod
def setUpClass(cls):
+ super(BaseComputeTest, cls).setUpClass()
if not cls.config.service_available.nova:
skip_msg = ("%s skipped as nova is not available" % cls.__name__)
raise cls.skipException(skip_msg)
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index b8f965c..ade7604 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -20,6 +20,7 @@
from tempest.api.compute import base
from tempest.api import utils
from tempest.common.utils.data_utils import rand_name
+from tempest import config
from tempest import exceptions
from tempest.test import attr
@@ -218,6 +219,8 @@
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
+ @testtools.skipIf(config.TempestConfig().service_available.neutron,
+ "Skipped until the Bug #1182883 is resolved")
@attr(type='gate')
def test_list_servers_filtered_by_ip_regex(self):
# Filter servers by regex ip
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 893d9e0..25df6e6 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -225,7 +225,7 @@
resp, output = self.servers_client.get_console_output(
self.server_id, 10)
self.assertEqual(200, resp.status)
- self.assertNotEqual(output, None)
+ self.assertIsNotNone(output)
lines = len(output.split('\n'))
self.assertEqual(lines, 10)
self.wait_for(get_output)
@@ -249,7 +249,7 @@
resp, output = self.servers_client.get_console_output(self.server_id,
10)
self.assertEqual(200, resp.status)
- self.assertNotEqual(output, None)
+ self.assertIsNotNone(output)
lines = len(output.split('\n'))
self.assertEqual(lines, 10)
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index 2a5be8c..e5ea30e 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -45,7 +45,7 @@
# for a given server_id
resp, output = self.client.list_virtual_interfaces(self.server_id)
self.assertEqual(200, resp.status)
- self.assertNotEqual(output, None)
+ self.assertIsNotNone(output)
virt_ifaces = output
self.assertNotEqual(0, len(virt_ifaces['virtual_interfaces']),
'Expected virtual interfaces, got 0 interfaces.')
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index a3b051e..b67a5e0 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -55,6 +55,7 @@
# Start a server and wait for it to become ready
resp, server = self.create_server(wait_until='ACTIVE',
adminPass='password')
+ self.server = server
# Record addresses so that we can ssh later
resp, server['addresses'] = \
diff --git a/tempest/api/identity/admin/test_users.py b/tempest/api/identity/admin/test_users.py
index cbbe05d..4cfeb45 100644
--- a/tempest/api/identity/admin/test_users.py
+++ b/tempest/api/identity/admin/test_users.py
@@ -26,11 +26,14 @@
class UsersTestJSON(base.BaseIdentityAdminTest):
_interface = 'json'
- alt_user = rand_name('test_user_')
- alt_password = rand_name('pass_')
- alt_email = alt_user + '@testmail.tm'
- alt_tenant = rand_name('test_tenant_')
- alt_description = rand_name('desc_')
+ @classmethod
+ def setUpClass(cls):
+ super(UsersTestJSON, cls).setUpClass()
+ cls.alt_user = rand_name('test_user_')
+ cls.alt_password = rand_name('pass_')
+ cls.alt_email = cls.alt_user + '@testmail.tm'
+ cls.alt_tenant = rand_name('test_tenant_')
+ cls.alt_description = rand_name('desc_')
@attr(type='smoke')
def test_create_user(self):
@@ -101,8 +104,9 @@
@attr(type='smoke')
def test_delete_user(self):
# Delete a user
+ alt_user2 = rand_name('alt_user_')
self.data.setup_test_tenant()
- resp, user = self.client.create_user('user_1234', self.alt_password,
+ resp, user = self.client.create_user(alt_user2, self.alt_password,
self.data.tenant['id'],
self.alt_email)
self.assertEquals('200', resp['status'])
@@ -228,13 +232,16 @@
self.data.setup_test_tenant()
user_ids = list()
fetched_user_ids = list()
- resp, user1 = self.client.create_user('tenant_user1', 'password1',
+ alt_tenant_user1 = rand_name('tenant_user1_')
+ resp, user1 = self.client.create_user(alt_tenant_user1, 'password1',
self.data.tenant['id'],
'user1@123')
self.assertEquals('200', resp['status'])
user_ids.append(user1['id'])
self.data.users.append(user1)
- resp, user2 = self.client.create_user('tenant_user2', 'password2',
+
+ alt_tenant_user2 = rand_name('tenant_user2_')
+ resp, user2 = self.client.create_user(alt_tenant_user2, 'password2',
self.data.tenant['id'],
'user2@123')
self.assertEquals('200', resp['status'])
@@ -267,9 +274,11 @@
resp, role = self.client.assign_user_role(tenant['id'], user['id'],
role['id'])
self.assertEquals('200', resp['status'])
- resp, second_user = self.client.create_user('second_user', 'password1',
+
+ alt_user2 = rand_name('second_user_')
+ resp, second_user = self.client.create_user(alt_user2, 'password1',
self.data.tenant['id'],
- 'user1@123')
+ 'user2@123')
self.assertEquals('200', resp['status'])
user_ids.append(second_user['id'])
self.data.users.append(second_user)
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 9f7b24b..d98fb71 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -53,6 +53,7 @@
cls.client.delete_endpoint(e['id'])
for s in cls.service_ids:
cls.identity_client.delete_service(s)
+ super(EndPointsTestJSON, cls).tearDownClass()
@attr(type='gate')
def test_list_endpoints(self):
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index cab84c0..980323a 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -54,7 +54,7 @@
resp[1], _ = cls.v3_client.delete_group(cls.group_body['id'])
resp[2], _ = cls.v3_client.delete_user(cls.user_body['id'])
resp[3], _ = cls.v3_client.delete_project(cls.project['id'])
- #NOTE(harika-vakadi): It is necessary to disable the domian
+ # NOTE(harika-vakadi): It is necessary to disable the domian
# before deleting,or else it would result in unauthorized error
cls.v3_client.update_domain(cls.domain['id'], enabled=False)
resp[4], _ = cls.v3_client.delete_domain(cls.domain['id'])
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index 1237ce4..bfb5372 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -25,6 +25,7 @@
@classmethod
def setUpClass(cls):
+ super(BaseIdentityAdminTest, cls).setUpClass()
os = clients.AdminManager(interface=cls._interface)
cls.client = os.identity_client
cls.token_client = os.token_client
@@ -45,6 +46,7 @@
@classmethod
def tearDownClass(cls):
cls.data.teardown_all()
+ super(BaseIdentityAdminTest, cls).tearDownClass()
def disable_user(self, user_name):
user = self.get_user_by_name(user_name)
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 4e61495..4f54a15 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -29,6 +29,7 @@
@classmethod
def setUpClass(cls):
+ super(BaseImageTest, cls).setUpClass()
cls.created_images = []
cls._interface = 'json'
cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__)
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index d3fa763..19c5f84 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -47,7 +47,8 @@
@classmethod
def setUpClass(cls):
- os = clients.Manager()
+ super(BaseNetworkTest, cls).setUpClass()
+ os = clients.Manager(interface=cls._interface)
cls.network_cfg = os.config.network
if not cls.config.service_available.neutron:
raise cls.skipException("Neutron support is required")
@@ -64,6 +65,7 @@
cls.client.delete_subnet(subnet['id'])
for network in cls.networks:
cls.client.delete_network(network['id'])
+ super(BaseNetworkTest, cls).tearDownClass()
@classmethod
def create_network(cls, network_name=None):
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 00a8ef7..7f49452 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -23,7 +23,8 @@
from tempest.test import attr
-class NetworksTest(base.BaseNetworkTest):
+class NetworksTestJSON(base.BaseNetworkTest):
+ _interface = 'json'
"""
Tests the following operations in the Neutron API using the REST client for
@@ -55,13 +56,25 @@
@classmethod
def setUpClass(cls):
- super(NetworksTest, cls).setUpClass()
+ super(NetworksTestJSON, cls).setUpClass()
cls.network = cls.create_network()
cls.name = cls.network['name']
cls.subnet = cls.create_subnet(cls.network)
cls.cidr = cls.subnet['cidr']
cls.port = cls.create_port(cls.network)
+ def _delete_networks(self, created_networks):
+ for n in created_networks:
+ resp, body = self.client.delete_network(n['id'])
+ self.assertEqual(204, resp.status)
+ # Asserting that the networks are not found in the list after deletion
+ resp, body = self.client.list_networks()
+ networks_list = list()
+ for network in body['networks']:
+ networks_list.append(network['id'])
+ for n in created_networks:
+ self.assertNotIn(n['id'], networks_list)
+
@attr(type='gate')
def test_create_update_delete_network_subnet(self):
# Creates a network
@@ -97,7 +110,7 @@
self.assertEqual('200', resp['status'])
updated_subnet = body['subnet']
self.assertEqual(updated_subnet['name'], new_subnet)
- # Deletes subnet and network
+ # Delete subnet and network
resp, body = self.client.delete_subnet(subnet_id)
self.assertEqual('204', resp['status'])
resp, body = self.client.delete_network(net_id)
@@ -116,6 +129,7 @@
def test_list_networks(self):
# Verify the network exists in the list of all networks
resp, body = self.client.list_networks()
+ self.assertEqual('200', resp['status'])
networks = body['networks']
found = None
for n in networks:
@@ -137,6 +151,7 @@
def test_list_subnets(self):
# Verify the subnet exists in the list of all subnets
resp, body = self.client.list_subnets()
+ self.assertEqual('200', resp['status'])
subnets = body['subnets']
found = None
for n in subnets:
@@ -147,7 +162,7 @@
@attr(type='gate')
def test_create_update_delete_port(self):
- # Verify that successful port creation & deletion
+ # Verify that successful port creation, update & deletion
resp, body = self.client.create_port(self.network['id'])
self.assertEqual('201', resp['status'])
port = body['port']
@@ -162,7 +177,7 @@
self.assertEqual('204', resp['status'])
@attr(type='gate')
- def test_show_ports(self):
+ def test_show_port(self):
# Verify the details of port
resp, body = self.client.show_port(self.port['id'])
self.assertEqual('200', resp['status'])
@@ -192,3 +207,24 @@
non_exist_id = rand_name('subnet')
self.assertRaises(exceptions.NotFound, self.client.show_subnet,
non_exist_id)
+
+ @attr(type='gate')
+ def test_bulk_create_delete_network(self):
+ # Creates 2 networks in one request
+ network_names = [rand_name('network-'), rand_name('network-')]
+ resp, body = self.client.create_bulk_network(2, network_names)
+ created_networks = body['networks']
+ self.assertEqual('201', resp['status'])
+ self.addCleanup(self._delete_networks, created_networks)
+ # Asserting that the networks are found in the list after creation
+ resp, body = self.client.list_networks()
+ networks_list = list()
+ for network in body['networks']:
+ networks_list.append(network['id'])
+ for n in created_networks:
+ self.assertIsNotNone(n['id'])
+ self.assertIn(n['id'], networks_list)
+
+
+class NetworksTestXML(NetworksTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/network/test_quotas.py b/tempest/api/network/test_quotas.py
index ba70f34..b49cbe8 100644
--- a/tempest/api/network/test_quotas.py
+++ b/tempest/api/network/test_quotas.py
@@ -23,6 +23,7 @@
class QuotasTest(base.BaseNetworkTest):
+ _interface = 'json'
"""
Tests the following operations in the Neutron API using the REST client for
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
new file mode 100644
index 0000000..4f687b0
--- /dev/null
+++ b/tempest/api/network/test_routers.py
@@ -0,0 +1,134 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+class RoutersTest(base.BaseNetworkTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(RoutersTest, cls).setUpClass()
+
+ def _delete_router(self, router_id):
+ resp, _ = self.client.delete_router(router_id)
+ self.assertEqual(204, resp.status)
+ # Asserting that the router is not found in the list
+ # after deletion
+ resp, list_body = self.client.list_routers()
+ self.assertEqual('200', resp['status'])
+ routers_list = list()
+ for router in list_body['routers']:
+ routers_list.append(router['id'])
+ self.assertNotIn(router_id, routers_list)
+
+ def _remove_router_interface_with_subnet_id(self, router_id, subnet_id):
+ resp, _ = self.client.remove_router_interface_with_subnet_id(
+ router_id, subnet_id)
+ self.assertEqual('200', resp['status'])
+
+ def _remove_router_interface_with_port_id(self, router_id, port_id):
+ resp, _ = self.client.remove_router_interface_with_port_id(
+ router_id, port_id)
+ self.assertEqual('200', resp['status'])
+
+ @attr(type='gate')
+ def test_create_show_list_update_delete_router(self):
+ # Create a router
+ name = rand_name('router-')
+ resp, create_body = self.client.create_router(
+ name, external_gateway_info={
+ "network_id": self.network_cfg.public_network_id},
+ admin_state_up=False)
+ self.assertEqual('201', resp['status'])
+ self.addCleanup(self._delete_router, create_body['router']['id'])
+ self.assertEqual(create_body['router']['name'], name)
+ self.assertEqual(
+ create_body['router']['external_gateway_info']['network_id'],
+ self.network_cfg.public_network_id)
+ self.assertEqual(create_body['router']['admin_state_up'], False)
+ # Show details of the created router
+ resp, show_body = self.client.show_router(
+ create_body['router']['id'])
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(show_body['router']['name'], name)
+ self.assertEqual(
+ show_body['router']['external_gateway_info']['network_id'],
+ self.network_cfg.public_network_id)
+ self.assertEqual(show_body['router']['admin_state_up'], False)
+ # List routers and verify if created router is there in response
+ resp, list_body = self.client.list_routers()
+ self.assertEqual('200', resp['status'])
+ routers_list = list()
+ for router in list_body['routers']:
+ routers_list.append(router['id'])
+ self.assertIn(create_body['router']['id'], routers_list)
+ # Update the name of router and verify if it is updated
+ updated_name = 'updated ' + name
+ resp, update_body = self.client.update_router(
+ create_body['router']['id'], name=updated_name)
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(update_body['router']['name'], updated_name)
+ resp, show_body = self.client.show_router(
+ create_body['router']['id'])
+ self.assertEqual(show_body['router']['name'], updated_name)
+
+ @attr(type='gate')
+ def test_add_remove_router_interface_with_subnet_id(self):
+ network = self.create_network()
+ subnet = self.create_subnet(network)
+ name = rand_name('router-')
+ resp, create_body = self.client.create_router(name)
+ self.addCleanup(self.client.delete_router, create_body['router']['id'])
+ # Add router interafce with subnet id
+ resp, interface = self.client.add_router_interface_with_subnet_id(
+ create_body['router']['id'], subnet['id'])
+ self.assertEqual('200', resp['status'])
+ self.addCleanup(self._remove_router_interface_with_subnet_id,
+ create_body['router']['id'], subnet['id'])
+ self.assertTrue('subnet_id' in interface.keys())
+ self.assertTrue('port_id' in interface.keys())
+ # Verify router id is equal to device id in port details
+ resp, show_port_body = self.client.show_port(
+ interface['port_id'])
+ self.assertEqual(show_port_body['port']['device_id'],
+ create_body['router']['id'])
+
+ @attr(type='gate')
+ def test_add_remove_router_interface_with_port_id(self):
+ network = self.create_network()
+ self.create_subnet(network)
+ name = rand_name('router-')
+ resp, create_body = self.client.create_router(name)
+ self.addCleanup(self.client.delete_router, create_body['router']['id'])
+ resp, port_body = self.client.create_port(network['id'])
+ # add router interface to port created above
+ resp, interface = self.client.add_router_interface_with_port_id(
+ create_body['router']['id'], port_body['port']['id'])
+ self.assertEqual('200', resp['status'])
+ self.addCleanup(self._remove_router_interface_with_port_id,
+ create_body['router']['id'], port_body['port']['id'])
+ self.assertTrue('subnet_id' in interface.keys())
+ self.assertTrue('port_id' in interface.keys())
+ # Verify router id is equal to device id in port details
+ resp, show_port_body = self.client.show_port(
+ interface['port_id'])
+ self.assertEqual(show_port_body['port']['device_id'],
+ create_body['router']['id'])
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 5a1fb5a..e6e8d17 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -18,6 +18,7 @@
from tempest.api.identity.base import DataGenerator
from tempest import clients
+from tempest.common import isolated_creds
from tempest import exceptions
import tempest.test
@@ -26,19 +27,45 @@
@classmethod
def setUpClass(cls):
+ super(BaseObjectTest, cls).setUpClass()
if not cls.config.service_available.swift:
skip_msg = ("%s skipped as swift is not available" % cls.__name__)
raise cls.skipException(skip_msg)
- cls.os = clients.Manager()
+ cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__)
+ if cls.config.compute.allow_tenant_isolation:
+ # Get isolated creds for normal user
+ creds = cls.isolated_creds.get_primary_creds()
+ username, tenant_name, password = creds
+ cls.os = clients.Manager(username=username,
+ password=password,
+ tenant_name=tenant_name)
+ # Get isolated creds for admin user
+ admin_creds = cls.isolated_creds.get_admin_creds()
+ admin_username, admin_tenant_name, admin_password = admin_creds
+ cls.os_admin = clients.Manager(username=admin_username,
+ password=admin_password,
+ tenant_name=admin_tenant_name)
+ # Get isolated creds for alt user
+ alt_creds = cls.isolated_creds.get_alt_creds()
+ alt_username, alt_tenant, alt_password = alt_creds
+ cls.os_alt = clients.Manager(username=alt_username,
+ password=alt_password,
+ tenant_name=alt_tenant)
+ # Add isolated users to operator role so that they can create a
+ # container in swift.
+ cls._assign_member_role()
+ else:
+ cls.os = clients.Manager()
+ cls.os_admin = clients.AdminManager()
+ cls.os_alt = clients.AltManager()
+
cls.object_client = cls.os.object_client
cls.container_client = cls.os.container_client
cls.account_client = cls.os.account_client
cls.custom_object_client = cls.os.custom_object_client
- cls.os_admin = clients.AdminManager()
cls.token_client = cls.os_admin.token_client
cls.identity_admin_client = cls.os_admin.identity_client
cls.custom_account_client = cls.os.custom_account_client
- cls.os_alt = clients.AltManager()
cls.object_client_alt = cls.os_alt.object_client
cls.container_client_alt = cls.os_alt.container_client
cls.identity_client_alt = cls.os_alt.identity_client
@@ -46,6 +73,22 @@
cls.data = DataGenerator(cls.identity_admin_client)
@classmethod
+ def _assign_member_role(cls):
+ primary_user = cls.isolated_creds.get_primary_user()
+ alt_user = cls.isolated_creds.get_alt_user()
+ swift_role = cls.config.object_storage.operator_role
+ try:
+ resp, roles = cls.os_admin.identity_client.list_roles()
+ role = next(r for r in roles if r['name'] == swift_role)
+ except StopIteration:
+ msg = "No role named %s found" % swift_role
+ raise exceptions.NotFound(msg)
+ for user in [primary_user, alt_user]:
+ cls.os_admin.identity_client.assign_user_role(user['tenantId'],
+ user['id'],
+ role['id'])
+
+ @classmethod
def delete_containers(cls, containers, container_client=None,
object_client=None):
"""Remove given containers and all objects in them.
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index bc050dc..65fe1ac 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -75,22 +75,27 @@
cls.data.test_password,
cls.data.test_tenant)
- headers = {"X-Auth-Token": cls.reselleradmin_token,
+ def setUp(self):
+ super(AccountQuotasTest, self).setUp()
+
+ # Set a quota of 20 bytes on the user's account before each test
+ headers = {"X-Auth-Token": self.reselleradmin_token,
"X-Account-Meta-Quota-Bytes": "20"}
- cls.os.custom_account_client.request("POST", "", headers, "")
+ self.os.custom_account_client.request("POST", "", headers, "")
+
+ def tearDown(self):
+ # remove the quota from the container
+ headers = {"X-Auth-Token": self.reselleradmin_token,
+ "X-Remove-Account-Meta-Quota-Bytes": "x"}
+
+ self.os.custom_account_client.request("POST", "", headers, "")
+ super(AccountQuotasTest, self).tearDown()
@classmethod
def tearDownClass(cls):
cls.delete_containers([cls.container_name])
cls.data.teardown_all()
-
- # remove the quota from the container
- headers = {"X-Auth-Token": cls.reselleradmin_token,
- "X-Remove-Account-Meta-Quota-Bytes": "x"}
-
- cls.os.custom_account_client.request("POST", "", headers, "")
-
super(AccountQuotasTest, cls).tearDownClass()
@testtools.skipIf(not accounts_quotas_available,
@@ -113,3 +118,45 @@
self.assertRaises(exceptions.OverLimit,
self.object_client.create_object,
self.container_name, object_name, data)
+
+ @testtools.skipIf(not accounts_quotas_available,
+ "Account Quotas middleware not available")
+ @attr(type=["smoke"])
+ def test_admin_modify_quota(self):
+ """Test that the ResellerAdmin is able to modify and remove the quota
+ on a user's account.
+
+ Using the custom_account client, the test modifies the quota
+ successively to:
+
+ * "25": a random value different from the initial quota value.
+ * "" : an empty value, equivalent to the removal of the quota.
+ * "20": set the quota to its initial value.
+ """
+ for quota in ("25", "", "20"):
+
+ headers = {"X-Auth-Token": self.reselleradmin_token,
+ "X-Account-Meta-Quota-Bytes": quota}
+
+ resp, _ = self.os.custom_account_client.request("POST", "",
+ headers, "")
+
+ self.assertEqual(resp["status"], "204")
+
+ @testtools.skipIf(not accounts_quotas_available,
+ "Account Quotas middleware not available")
+ @attr(type=["negative", "smoke"])
+ def test_user_modify_quota(self):
+ """Test that a user is not able to modify or remove a quota on
+ its account.
+ """
+
+ # Not able to remove quota
+ self.assertRaises(exceptions.Unauthorized,
+ self.account_client.create_account_metadata,
+ {"Quota-Bytes": ""})
+
+ # Not able to modify quota
+ self.assertRaises(exceptions.Unauthorized,
+ self.account_client.create_account_metadata,
+ {"Quota-Bytes": "100"})
diff --git a/tempest/api/object_storage/test_container_services.py b/tempest/api/object_storage/test_container_services.py
index 8b9fc8c..eaaed39 100644
--- a/tempest/api/object_storage/test_container_services.py
+++ b/tempest/api/object_storage/test_container_services.py
@@ -31,6 +31,7 @@
@classmethod
def tearDownClass(cls):
cls.delete_containers(cls.containers)
+ super(ContainerTest, cls).tearDownClass()
@attr(type='smoke')
def test_create_container(self):
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 5de4df0..d18c2ad 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -50,6 +50,7 @@
def tearDownClass(cls):
for client in cls.clients.values():
cls.delete_containers(cls.containers, client[0], client[1])
+ super(ContainerSyncTest, cls).tearDownClass()
@testtools.skip('Until Bug #1093743 is resolved.')
@attr(type='gate')
diff --git a/tempest/api/object_storage/test_object_expiry.py b/tempest/api/object_storage/test_object_expiry.py
index b546cec..8703480 100644
--- a/tempest/api/object_storage/test_object_expiry.py
+++ b/tempest/api/object_storage/test_object_expiry.py
@@ -41,6 +41,7 @@
NotFound exception and also non empty container cannot be deleted.
"""
cls.delete_containers([cls.container_name])
+ super(ObjectExpiryTest, cls).tearDownClass()
@testtools.skip('Until Bug #1069849 is resolved.')
@attr(type='gate')
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index c8d9965..c599562 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -47,6 +47,7 @@
cls.delete_containers(cls.containers)
# delete the user setup created
cls.data.teardown_all()
+ super(ObjectTest, cls).tearDownClass()
@attr(type='smoke')
def test_create_object(self):
diff --git a/tempest/api/object_storage/test_object_version.py b/tempest/api/object_storage/test_object_version.py
index cda3e4f..2b93c32 100644
--- a/tempest/api/object_storage/test_object_version.py
+++ b/tempest/api/object_storage/test_object_version.py
@@ -29,6 +29,7 @@
@classmethod
def tearDownClass(cls):
cls.delete_containers(cls.containers)
+ super(ContainerTest, cls).tearDownClass()
def assertContainer(self, container, count, byte, versioned):
resp, _ = self.container_client.list_container_metadata(container)
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index d06d942..745dd87 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -28,7 +28,7 @@
@classmethod
def setUpClass(cls):
-
+ super(BaseOrchestrationTest, cls).setUpClass()
os = clients.OrchestrationManager()
cls.orchestration_cfg = os.config.orchestration
if not os.config.service_available.heat:
@@ -107,6 +107,7 @@
def tearDownClass(cls):
cls.clear_stacks()
cls.clear_keypairs()
+ super(BaseOrchestrationTest, cls).tearDownClass()
def wait_for(self, condition):
"""Repeatedly calls condition() until a timeout."""
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 52ab5b7..7781647 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -31,6 +31,7 @@
@classmethod
def setUpClass(cls):
+ super(BaseVolumeTest, cls).setUpClass()
cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__)
if not cls.config.service_available.cinder:
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index f04d23f..08f585a 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -93,7 +93,7 @@
def cmd_with_auth(self, cmd, action, flags='', params='',
admin=True, fail_ok=False):
"""Executes given command with auth attributes appended."""
- #TODO(jogo) make admin=False work
+ # TODO(jogo) make admin=False work
creds = ('--os-username %s --os-tenant-name %s --os-password %s '
'--os-auth-url %s ' % (self.identity.admin_username,
self.identity.admin_tenant_name, self.identity.admin_password,
@@ -134,6 +134,11 @@
for field in field_names:
self.assertIn(field, item)
+ def assertFirstLineStartsWith(self, lines, beginning):
+ self.assertTrue(lines[0].startswith(beginning),
+ msg=('Beginning of first line has invalid content: %s'
+ % lines[:3]))
+
class CommandFailed(subprocess.CalledProcessError):
# adds output attribute for python2.6
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/test_cinder.py
index e9ce87b..21acae8 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/test_cinder.py
@@ -87,7 +87,7 @@
def test_admin_help(self):
help_text = self.cinder('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: cinder'))
+ self.assertFirstLineStartsWith(lines, 'usage: cinder')
commands = []
cmds_start = lines.index('Positional arguments:')
diff --git a/tempest/cli/simple_read_only/test_compute.py b/tempest/cli/simple_read_only/test_compute.py
index 4c7f604..9b358e6 100644
--- a/tempest/cli/simple_read_only/test_compute.py
+++ b/tempest/cli/simple_read_only/test_compute.py
@@ -48,7 +48,7 @@
self.nova,
'this-does-nova-exist')
- #NOTE(jogo): Commands in order listed in 'nova help'
+ # NOTE(jogo): Commands in order listed in 'nova help'
# Positional arguments:
diff --git a/tempest/cli/simple_read_only/test_compute_manage.py b/tempest/cli/simple_read_only/test_compute_manage.py
index 1848827..523c65f 100644
--- a/tempest/cli/simple_read_only/test_compute_manage.py
+++ b/tempest/cli/simple_read_only/test_compute_manage.py
@@ -41,7 +41,7 @@
self.nova_manage,
'this-does-nova-exist')
- #NOTE(jogo): Commands in order listed in 'nova-manage -h'
+ # NOTE(jogo): Commands in order listed in 'nova-manage -h'
# test flags
def test_help_flag(self):
diff --git a/tempest/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/test_glance.py
index 3d58451..d02c60b 100644
--- a/tempest/cli/simple_read_only/test_glance.py
+++ b/tempest/cli/simple_read_only/test_glance.py
@@ -48,7 +48,7 @@
def test_glance_help(self):
help_text = self.glance('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: glance'))
+ self.assertFirstLineStartsWith(lines, 'usage: glance')
commands = []
cmds_start = lines.index('Positional arguments:')
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index 4c7982b..1e8009f 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -46,7 +46,9 @@
out = self.keystone('catalog')
catalog = self.parser.details_multiple(out, with_label=True)
for svc in catalog:
- self.assertTrue(svc['__label'].startswith('Service:'))
+ self.assertTrue(svc['__label'].startswith('Service:'),
+ msg=('Invalid beginning of service block: %s' %
+ svc['__label']))
def test_admin_endpoint_list(self):
out = self.keystone('endpoint-list')
@@ -94,7 +96,7 @@
def test_admin_help(self):
help_text = self.keystone('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: keystone'))
+ self.assertFirstLineStartsWith(lines, 'usage: keystone')
commands = []
cmds_start = lines.index('Positional arguments:')
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
index 7b8340d..ae3a1a7 100644
--- a/tempest/cli/simple_read_only/test_neutron.py
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -92,7 +92,7 @@
def test_neutron_help(self):
help_text = self.neutron('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: neutron'))
+ self.assertFirstLineStartsWith(lines, 'usage: neutron')
commands = []
cmds_start = lines.index('Commands for API v2.0:')
diff --git a/tempest/clients.py b/tempest/clients.py
index 195cb89..48e4939 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -90,7 +90,8 @@
from tempest.services.identity.xml.identity_client import TokenClientXML
from tempest.services.image.v1.json.image_client import ImageClientJSON
from tempest.services.image.v2.json.image_client import ImageClientV2JSON
-from tempest.services.network.json.network_client import NetworkClient
+from tempest.services.network.json.network_client import NetworkClientJSON
+from tempest.services.network.xml.network_client import NetworkClientXML
from tempest.services.object_storage.account_client import AccountClient
from tempest.services.object_storage.account_client import \
AccountClientCustomizedHeader
@@ -116,6 +117,11 @@
"xml": ImagesClientXML,
}
+NETWORKS_CLIENTS = {
+ "json": NetworkClientJSON,
+ "xml": NetworkClientXML,
+}
+
KEYPAIRS_CLIENTS = {
"json": KeyPairsClientJSON,
"xml": KeyPairsClientXML,
@@ -295,6 +301,7 @@
try:
self.servers_client = SERVERS_CLIENTS[interface](*client_args)
+ self.network_client = NETWORKS_CLIENTS[interface](*client_args)
self.limits_client = LIMITS_CLIENTS[interface](*client_args)
if self.config.service_available.glance:
self.images_client = IMAGES_CLIENTS[interface](*client_args)
@@ -339,7 +346,6 @@
except KeyError:
msg = "Unsupported interface type `%s'" % interface
raise exceptions.InvalidConfiguration(msg)
- self.network_client = NetworkClient(*client_args)
self.hosts_client = HostsClientJSON(*client_args)
self.account_client = AccountClient(*client_args)
if self.config.service_available.glance:
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 759ab81..ea5b4f4 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -323,7 +323,7 @@
if (resp.status in set((204, 205, 304)) or resp.status < 200 or
method.upper() == 'HEAD') and resp_body:
raise exceptions.ResponseWithNonEmptyBody(status=resp.status)
- #NOTE(afazekas):
+ # NOTE(afazekas):
# If the HTTP Status Code is 205
# 'The response MUST NOT include an entity.'
# A HTTP entity has an entity-body and an 'entity-header'.
@@ -336,7 +336,7 @@
0 != len(set(resp.keys()) - set(('status',)) -
self.response_header_lc - self.general_header_lc)):
raise exceptions.ResponseWithEntity()
- #NOTE(afazekas)
+ # NOTE(afazekas)
# Now the swift sometimes (delete not empty container)
# returns with non json error response, we can create new rest class
# for swift.
@@ -458,8 +458,8 @@
message = resp_body
if parse_resp:
resp_body = self._parse_resp(resp_body)
- #I'm seeing both computeFault and cloudServersFault come back.
- #Will file a bug to fix, but leave as is for now.
+ # I'm seeing both computeFault and cloudServersFault come back.
+ # Will file a bug to fix, but leave as is for now.
if 'cloudServersFault' in resp_body:
message = resp_body['cloudServersFault']['message']
elif 'computeFault' in resp_body:
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index de2bf43..2cbb74d 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -24,7 +24,7 @@
class RemoteClient():
- #Note(afazekas): It should always get an address instead of server
+ # NOTE(afazekas): It should always get an address instead of server
def __init__(self, server, username, password=None, pkey=None):
ssh_timeout = TempestConfig().compute.ssh_timeout
network = TempestConfig().compute.network_for_ssh
diff --git a/tempest/config.py b/tempest/config.py
index 9b1a91e..e0ac843 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -369,6 +369,10 @@
cfg.BoolOpt('accounts_quotas_available',
default=True,
help="Set to True if the Account Quota middleware is enabled"),
+ cfg.StrOpt('operator_role',
+ default='Member',
+ help="Role to add to users created for swift tests to "
+ "enable creating containers"),
]
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 4447da0..8290021 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -16,11 +16,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
import subprocess
# Default client libs
import cinderclient.client
import glanceclient
+import heatclient.client
import keystoneclient.v2_0.client
import netaddr
from neutronclient.common import exceptions as exc
@@ -32,6 +34,7 @@
from tempest.common import isolated_creds
from tempest.common import ssh
from tempest.common.utils.data_utils import rand_name
+from tempest.common.utils.linux.remote_client import RemoteClient
import tempest.manager
from tempest.openstack.common import log as logging
import tempest.test
@@ -48,6 +51,7 @@
NOVACLIENT_VERSION = '2'
CINDERCLIENT_VERSION = '1'
+ HEATCLIENT_VERSION = '1'
def __init__(self, username, password, tenant_name):
super(OfficialClientManager, self).__init__()
@@ -62,6 +66,10 @@
self.volume_client = self._get_volume_client(username,
password,
tenant_name)
+ self.orchestration_client = self._get_orchestration_client(
+ username,
+ password,
+ tenant_name)
def _get_compute_client(self, username, password, tenant_name):
# Novaclient will not execute operations for anyone but the
@@ -98,6 +106,32 @@
tenant_name,
auth_url)
+ def _get_orchestration_client(self, username=None, password=None,
+ tenant_name=None):
+ if not username:
+ username = self.config.identity.admin_username
+ if not password:
+ password = self.config.identity.admin_password
+ if not tenant_name:
+ tenant_name = self.config.identity.tenant_name
+
+ self._validate_credentials(username, password, tenant_name)
+
+ keystone = self._get_identity_client(username, password, tenant_name)
+ token = keystone.auth_token
+ try:
+ endpoint = keystone.service_catalog.url_for(
+ service_type='orchestration',
+ endpoint_type='publicURL')
+ except keystoneclient.exceptions.EndpointNotFound:
+ return None
+ else:
+ return heatclient.client.Client(self.HEATCLIENT_VERSION,
+ endpoint,
+ token=token,
+ username=username,
+ password=password)
+
def _get_identity_client(self, username, password, tenant_name):
# This identity client is not intended to check the security
# of the identity service, so use admin credentials by default.
@@ -150,15 +184,11 @@
@classmethod
def setUpClass(cls):
+ super(OfficialClientTest, cls).setUpClass()
cls.isolated_creds = isolated_creds.IsolatedCreds(
__name__, tempest_client=False)
- if cls.config.compute.allow_tenant_isolation:
- creds = cls.isolated_creds.get_primary_creds()
- username, tenant_name, password = creds
- else:
- username = cls.config.identity.username
- password = cls.config.identity.password
- tenant_name = cls.config.identity.tenant_name
+
+ username, tenant_name, password = cls.credentials()
cls.manager = OfficialClientManager(username, password, tenant_name)
cls.compute_client = cls.manager.compute_client
@@ -166,10 +196,21 @@
cls.identity_client = cls.manager.identity_client
cls.network_client = cls.manager.network_client
cls.volume_client = cls.manager.volume_client
+ cls.orchestration_client = cls.manager.orchestration_client
cls.resource_keys = {}
cls.os_resources = []
@classmethod
+ def credentials(cls):
+ if cls.config.compute.allow_tenant_isolation:
+ return cls.isolated_creds.get_primary_creds()
+
+ username = cls.config.identity.username
+ password = cls.config.identity.password
+ tenant_name = cls.config.identity.tenant_name
+ return username, tenant_name, password
+
+ @classmethod
def tearDownClass(cls):
# NOTE(jaypipes): Because scenario tests are typically run in a
# specific order, and because test methods in scenario tests
@@ -257,58 +298,14 @@
self.fail("Timed out waiting for thing %s to become %s"
% (thing_id, expected_status))
-
-class NetworkScenarioTest(OfficialClientTest):
- """
- Base class for network scenario tests
- """
-
- @classmethod
- def check_preconditions(cls):
- if (cls.config.service_available.neutron):
- cls.enabled = True
- #verify that neutron_available is telling the truth
- try:
- cls.network_client.list_networks()
- except exc.EndpointNotFound:
- cls.enabled = False
- raise
- else:
- cls.enabled = False
- msg = 'Neutron not available'
- raise cls.skipException(msg)
-
- @classmethod
- def setUpClass(cls):
- super(NetworkScenarioTest, cls).setUpClass()
- cls.tenant_id = cls.manager._get_identity_client(
- cls.config.identity.username,
- cls.config.identity.password,
- cls.config.identity.tenant_name).tenant_id
-
- def _create_keypair(self, client, namestart='keypair-smoke-'):
- kp_name = rand_name(namestart)
- keypair = client.keypairs.create(kp_name)
- try:
- self.assertEqual(keypair.id, kp_name)
- self.set_resource(kp_name, keypair)
- except AttributeError:
- self.fail("Keypair object not successfully created.")
- return keypair
-
- def _create_security_group(self, client, namestart='secgroup-smoke-'):
- # Create security group
- sg_name = rand_name(namestart)
- sg_desc = sg_name + " description"
- secgroup = client.security_groups.create(sg_name, sg_desc)
- try:
- self.assertEqual(secgroup.name, sg_name)
- self.assertEqual(secgroup.description, sg_desc)
- self.set_resource(sg_name, secgroup)
- except AttributeError:
- self.fail("SecurityGroup object not successfully created.")
-
- # Add rules to the security group
+ def create_loginable_secgroup_rule(self, client=None, secgroup_id=None):
+ if client is None:
+ client = self.compute_client
+ if secgroup_id is None:
+ sgs = client.security_groups.list()
+ for sg in sgs:
+ if sg.name == 'default':
+ secgroup_id = sg.id
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
@@ -331,10 +328,116 @@
}
]
for ruleset in rulesets:
+ sg_rule = client.security_group_rules.create(secgroup_id,
+ **ruleset)
+ self.set_resource(sg_rule.id, sg_rule)
+
+ def create_server(self, client, name=None, image=None, flavor=None,
+ create_kwargs={}):
+ if name is None:
+ name = rand_name('scenario-server-')
+ if image is None:
+ image = self.config.compute.image_ref
+ if flavor is None:
+ flavor = self.config.compute.flavor_ref
+ LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
+ name, image, flavor)
+ server = client.servers.create(name, image, flavor, **create_kwargs)
+ try:
+ self.assertEqual(server.name, name)
+ self.set_resource(name, server)
+ except AttributeError:
+ self.fail("Server not successfully created.")
+ self.status_timeout(client.servers, server.id, 'ACTIVE')
+ # The instance retrieved on creation is missing network
+ # details, necessitating retrieval after it becomes active to
+ # ensure correct details.
+ server = client.servers.get(server.id)
+ self.set_resource(name, server)
+ LOG.debug("Created server: %s", server)
+ return server
+
+ def create_volume(self, client=None, size=1, name=None,
+ snapshot_id=None, imageRef=None):
+ if client is None:
+ client = self.volume_client
+ if name is None:
+ name = rand_name('scenario-volume-')
+ LOG.debug("Creating a volume (size :%s, name: %s)", size, name)
+ volume = client.volumes.create(size=size, display_name=name,
+ snapshot_id=snapshot_id,
+ imageRef=imageRef)
+ self.set_resource(name, volume)
+ self.assertEqual(name, volume.display_name)
+ self.status_timeout(client.volumes, volume.id, 'available')
+ LOG.debug("Created volume: %s", volume)
+ return volume
+
+ def create_keypair(self, client=None, name=None):
+ if client is None:
+ client = self.compute_client
+ if name is None:
+ name = rand_name('scenario-keypair-')
+ keypair = client.keypairs.create(name)
+ self.assertEqual(keypair.name, name)
+ self.set_resource(name, keypair)
+ return keypair
+
+ def get_remote_client(self, server_or_ip, username=None, private_key=None):
+ if isinstance(server_or_ip, basestring):
+ ip = server_or_ip
+ else:
+ network_name_for_ssh = self.config.compute.network_for_ssh
+ ip = server_or_ip.networks[network_name_for_ssh][0]
+ if username is None:
+ username = self.config.scenario.ssh_user
+ if private_key is None:
+ private_key = self.keypair.private_key
+ return RemoteClient(ip, username, pkey=private_key)
+
+
+class NetworkScenarioTest(OfficialClientTest):
+ """
+ Base class for network scenario tests
+ """
+
+ @classmethod
+ def check_preconditions(cls):
+ if (cls.config.service_available.neutron):
+ cls.enabled = True
+ # verify that neutron_available is telling the truth
try:
- client.security_group_rules.create(secgroup.id, **ruleset)
- except Exception:
- self.fail("Failed to create rule in security group.")
+ cls.network_client.list_networks()
+ except exc.EndpointNotFound:
+ cls.enabled = False
+ raise
+ else:
+ cls.enabled = False
+ msg = 'Neutron not available'
+ raise cls.skipException(msg)
+
+ @classmethod
+ def setUpClass(cls):
+ super(NetworkScenarioTest, cls).setUpClass()
+ cls.tenant_id = cls.manager._get_identity_client(
+ cls.config.identity.username,
+ cls.config.identity.password,
+ cls.config.identity.tenant_name).tenant_id
+
+ def _create_security_group(self, client, namestart='secgroup-smoke-'):
+ # Create security group
+ sg_name = rand_name(namestart)
+ sg_desc = sg_name + " description"
+ secgroup = client.security_groups.create(sg_name, sg_desc)
+ try:
+ self.assertEqual(secgroup.name, sg_name)
+ self.assertEqual(secgroup.description, sg_desc)
+ self.set_resource(sg_name, secgroup)
+ except AttributeError:
+ self.fail("SecurityGroup object not successfully created.")
+
+ # Add rules to the security group
+ self.create_loginable_secgroup_rule(client, secgroup.id)
return secgroup
@@ -411,31 +514,6 @@
self.set_resource(name, port)
return port
- def _create_server(self, client, network, name, key_name, security_groups):
- flavor_id = self.config.compute.flavor_ref
- base_image_id = self.config.compute.image_ref
- create_kwargs = {
- 'nics': [
- {'net-id': network.id},
- ],
- 'key_name': key_name,
- 'security_groups': security_groups,
- }
- server = client.servers.create(name, base_image_id, flavor_id,
- **create_kwargs)
- try:
- self.assertEqual(server.name, name)
- self.set_resource(name, server)
- except AttributeError:
- self.fail("Server not successfully created.")
- self.status_timeout(client.servers, server.id, 'ACTIVE')
- # The instance retrieved on creation is missing network
- # details, necessitating retrieval after it becomes active to
- # ensure correct details.
- server = client.servers.get(server.id)
- self.set_resource(name, server)
- return server
-
def _create_floating_ip(self, server, external_network_id):
result = self.network_client.list_ports(device_id=server.id)
ports = result.get('ports', [])
@@ -488,3 +566,30 @@
timeout=self.config.compute.ssh_timeout),
'Auth failure in connecting to %s@%s via ssh' %
(username, ip_address))
+
+
+class OrchestrationScenarioTest(OfficialClientTest):
+ """
+ Base class for orchestration scenario tests
+ """
+
+ @classmethod
+ def credentials(cls):
+ username = cls.config.identity.admin_username
+ password = cls.config.identity.admin_password
+ tenant_name = cls.config.identity.tenant_name
+ return username, tenant_name, password
+
+ def _load_template(self, base_file, file_name):
+ filepath = os.path.join(os.path.dirname(os.path.realpath(base_file)),
+ file_name)
+ with open(filepath) as f:
+ return f.read()
+
+ @classmethod
+ def _stack_rand_name(cls):
+ return rand_name(cls.__name__ + '-')
+
+ def _create_keypair(self):
+ kp_name = rand_name('keypair-smoke')
+ return self.compute_client.keypairs.create(kp_name)
diff --git a/tempest/scenario/orchestration/__init__.py b/tempest/scenario/orchestration/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/scenario/orchestration/__init__.py
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
new file mode 100644
index 0000000..cd959a8
--- /dev/null
+++ b/tempest/scenario/orchestration/test_autoscaling.py
@@ -0,0 +1,108 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+from tempest.test import attr
+from tempest.test import call_until_true
+import time
+
+
+LOG = logging.getLogger(__name__)
+
+
+class AutoScalingTest(manager.OrchestrationScenarioTest):
+
+ def setUp(self):
+ super(AutoScalingTest, self).setUp()
+ if not self.config.orchestration.image_ref:
+ raise self.skipException("No image available to test")
+ self.client = self.orchestration_client
+
+ def assign_keypair(self):
+ self.stack_name = self._stack_rand_name()
+ if self.config.orchestration.keypair_name:
+ self.keypair_name = self.config.orchestration.keypair_name
+ else:
+ self.keypair = self._create_keypair()
+ self.keypair_name = self.keypair.id
+ self.set_resource('keypair', self.keypair)
+
+ def launch_stack(self):
+ self.parameters = {
+ 'KeyName': self.keypair_name,
+ 'InstanceType': self.config.orchestration.instance_type,
+ 'ImageId': self.config.orchestration.image_ref,
+ 'StackStart': str(time.time())
+ }
+
+ # create the stack
+ self.template = self._load_template(__file__, 'test_autoscaling.yaml')
+ self.client.stacks.create(
+ stack_name=self.stack_name,
+ template=self.template,
+ parameters=self.parameters)
+
+ self.stack = self.client.stacks.get(self.stack_name)
+ self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id)
+
+ # if a keypair was set, do not delete the stack on exit to allow
+ # for manual post-mortums
+ if not self.config.orchestration.keypair_name:
+ self.set_resource('stack', self.stack)
+
+ @attr(type='slow')
+ def test_scale_up_then_down(self):
+
+ self.assign_keypair()
+ self.launch_stack()
+
+ sid = self.stack_identifier
+ timeout = self.config.orchestration.build_timeout
+ interval = 10
+
+ self.assertEqual('CREATE', self.stack.action)
+ # wait for create to complete.
+ self.status_timeout(self.client.stacks, sid, 'COMPLETE')
+
+ self.stack.get()
+ self.assertEqual('CREATE_COMPLETE', self.stack.stack_status)
+
+ # the resource SmokeServerGroup is implemented as a nested
+ # stack, so servers can be counted by counting the resources
+ # inside that nested stack
+ resource = self.client.resources.get(sid, 'SmokeServerGroup')
+ nested_stack_id = resource.physical_resource_id
+
+ def server_count():
+ # the number of servers is the number of resources
+ # in the nexted stack
+ self.server_count = len(
+ self.client.resources.list(nested_stack_id))
+ return self.server_count
+
+ def assertScale(from_servers, to_servers):
+ call_until_true(lambda: server_count() == to_servers,
+ timeout, interval)
+ self.assertEqual(to_servers, self.server_count,
+ 'Failed scaling from %d to %d servers' % (
+ from_servers, to_servers))
+
+ # he marched them up to the top of the hill
+ assertScale(1, 2)
+ assertScale(2, 3)
+
+ # and he marched them down again
+ assertScale(3, 2)
+ assertScale(2, 1)
diff --git a/tempest/scenario/orchestration/test_autoscaling.yaml b/tempest/scenario/orchestration/test_autoscaling.yaml
new file mode 100644
index 0000000..045b3bc
--- /dev/null
+++ b/tempest/scenario/orchestration/test_autoscaling.yaml
@@ -0,0 +1,182 @@
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+ Template which tests autoscaling and load balancing
+Parameters:
+ KeyName:
+ Type: String
+ InstanceType:
+ Type: String
+ ImageId:
+ Type: String
+ StackStart:
+ Description: Epoch seconds when the stack was launched
+ Type: Number
+ ConsumeStartSeconds:
+ Description: Seconds after invocation when memory should be consumed
+ Type: Number
+ Default: '60'
+ ConsumeStopSeconds:
+ Description: Seconds after StackStart when memory should be released
+ Type: Number
+ Default: '420'
+ ScaleUpThreshold:
+ Description: Memory percentage threshold to scale up on
+ Type: Number
+ Default: '70'
+ ScaleDownThreshold:
+ Description: Memory percentage threshold to scale down on
+ Type: Number
+ Default: '60'
+ ConsumeMemoryLimit:
+ Description: Memory percentage threshold to consume
+ Type: Number
+ Default: '71'
+Resources:
+ SmokeServerGroup:
+ Type: AWS::AutoScaling::AutoScalingGroup
+ Properties:
+ AvailabilityZones: {'Fn::GetAZs': ''}
+ LaunchConfigurationName: {Ref: LaunchConfig}
+ MinSize: '1'
+ MaxSize: '3'
+ SmokeServerScaleUpPolicy:
+ Type: AWS::AutoScaling::ScalingPolicy
+ Properties:
+ AdjustmentType: ChangeInCapacity
+ AutoScalingGroupName: {Ref: SmokeServerGroup}
+ Cooldown: '60'
+ ScalingAdjustment: '1'
+ SmokeServerScaleDownPolicy:
+ Type: AWS::AutoScaling::ScalingPolicy
+ Properties:
+ AdjustmentType: ChangeInCapacity
+ AutoScalingGroupName: {Ref: SmokeServerGroup}
+ Cooldown: '60'
+ ScalingAdjustment: '-1'
+ MEMAlarmHigh:
+ Type: AWS::CloudWatch::Alarm
+ Properties:
+ AlarmDescription: Scale-up if MEM > ScaleUpThreshold% for 10 seconds
+ MetricName: MemoryUtilization
+ Namespace: system/linux
+ Statistic: Average
+ Period: '10'
+ EvaluationPeriods: '1'
+ Threshold: {Ref: ScaleUpThreshold}
+ AlarmActions: [{Ref: SmokeServerScaleUpPolicy}]
+ Dimensions:
+ - Name: AutoScalingGroupName
+ Value: {Ref: SmokeServerGroup}
+ ComparisonOperator: GreaterThanThreshold
+ MEMAlarmLow:
+ Type: AWS::CloudWatch::Alarm
+ Properties:
+ AlarmDescription: Scale-down if MEM < ScaleDownThreshold% for 10 seconds
+ MetricName: MemoryUtilization
+ Namespace: system/linux
+ Statistic: Average
+ Period: '10'
+ EvaluationPeriods: '1'
+ Threshold: {Ref: ScaleDownThreshold}
+ AlarmActions: [{Ref: SmokeServerScaleDownPolicy}]
+ Dimensions:
+ - Name: AutoScalingGroupName
+ Value: {Ref: SmokeServerGroup}
+ ComparisonOperator: LessThanThreshold
+ CfnUser:
+ Type: AWS::IAM::User
+ SmokeKeys:
+ Type: AWS::IAM::AccessKey
+ Properties:
+ UserName: {Ref: CfnUser}
+ SmokeSecurityGroup:
+ Type: AWS::EC2::SecurityGroup
+ Properties:
+ GroupDescription: Standard firewall rules
+ SecurityGroupIngress:
+ - {IpProtocol: tcp, FromPort: '22', ToPort: '22', CidrIp: 0.0.0.0/0}
+ - {IpProtocol: tcp, FromPort: '80', ToPort: '80', CidrIp: 0.0.0.0/0}
+ LaunchConfig:
+ Type: AWS::AutoScaling::LaunchConfiguration
+ Metadata:
+ AWS::CloudFormation::Init:
+ config:
+ files:
+ /etc/cfn/cfn-credentials:
+ content:
+ Fn::Replace:
+ - $AWSAccessKeyId: {Ref: SmokeKeys}
+ $AWSSecretKey: {'Fn::GetAtt': [SmokeKeys, SecretAccessKey]}
+ - |
+ AWSAccessKeyId=$AWSAccessKeyId
+ AWSSecretKey=$AWSSecretKey
+ mode: '000400'
+ owner: root
+ group: root
+ /root/watch_loop:
+ content:
+ Fn::Replace:
+ - _hi_: {Ref: MEMAlarmHigh}
+ _lo_: {Ref: MEMAlarmLow}
+ - |
+ #!/bin/bash
+ while :
+ do
+ /opt/aws/bin/cfn-push-stats --watch _hi_ --mem-util
+ /opt/aws/bin/cfn-push-stats --watch _lo_ --mem-util
+ sleep 4
+ done
+ mode: '000700'
+ owner: root
+ group: root
+ /root/consume_memory:
+ content:
+ Fn::Replace:
+ - StackStart: {Ref: StackStart}
+ ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
+ ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
+ ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
+ - |
+ #!/usr/bin/env python
+ import psutil
+ import time
+ import datetime
+ import sys
+ a = []
+ sleep_until_consume = ConsumeStartSeconds
+ stack_start = StackStart
+ consume_stop_time = stack_start + ConsumeStopSeconds
+ memory_limit = ConsumeMemoryLimit
+ if sleep_until_consume > 0:
+ sys.stdout.flush()
+ time.sleep(sleep_until_consume)
+ while psutil.virtual_memory().percent < memory_limit:
+ sys.stdout.flush()
+ a.append(' ' * 10**5)
+ time.sleep(0.1)
+ sleep_until_exit = consume_stop_time - time.time()
+ if sleep_until_exit > 0:
+ time.sleep(sleep_until_exit)
+ mode: '000700'
+ owner: root
+ group: root
+ Properties:
+ ImageId: {Ref: ImageId}
+ InstanceType: {Ref: InstanceType}
+ KeyName: {Ref: KeyName}
+ SecurityGroups: [{Ref: SmokeSecurityGroup}]
+ UserData:
+ Fn::Base64:
+ Fn::Replace:
+ - ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
+ ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
+ ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
+ - |
+ #!/bin/bash -v
+ /opt/aws/bin/cfn-init
+ # report on memory consumption every 4 seconds
+ /root/watch_loop &
+ # wait ConsumeStartSeconds then ramp up memory consumption
+ # until it is over ConsumeMemoryLimit%
+ # then exits ConsumeStopSeconds seconds after stack launch
+ /root/consume_memory > /root/consume_memory.log &
\ No newline at end of file
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 13b31ec..5cddde2 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -16,7 +16,6 @@
# under the License.
from tempest.common.utils.data_utils import rand_name
-from tempest.common.utils.linux.remote_client import RemoteClient
from tempest.openstack.common import log as logging
from tempest.scenario import manager
@@ -83,22 +82,13 @@
properties=properties)
def nova_keypair_add(self):
- name = rand_name('scenario-keypair-')
-
- self.keypair = self.compute_client.keypairs.create(name=name)
- self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
- self.assertEqual(name, self.keypair.name)
+ self.keypair = self.create_keypair()
def nova_boot(self):
- name = rand_name('scenario-server-')
- client = self.compute_client
- flavor_id = self.config.compute.flavor_ref
- self.server = client.servers.create(name=name, image=self.image,
- flavor=flavor_id,
- key_name=self.keypair.name)
- self.addCleanup(self.compute_client.servers.delete, self.server)
- self.assertEqual(name, self.server.name)
- self._wait_for_server_status('ACTIVE')
+ create_kwargs = {'key_name': self.keypair.name}
+ self.server = self.create_server(self.compute_client,
+ image=self.image,
+ create_kwargs=create_kwargs)
def nova_list(self):
servers = self.compute_client.servers.list()
@@ -111,15 +101,7 @@
self.assertEqual(self.server, got_server)
def cinder_create(self):
- name = rand_name('scenario-volume-')
- LOG.debug("volume display-name:%s" % name)
- self.volume = self.volume_client.volumes.create(size=1,
- display_name=name)
- LOG.debug("volume created:%s" % self.volume.display_name)
- self._wait_for_volume_status('available')
-
- self.addCleanup(self.volume_client.volumes.delete, self.volume)
- self.assertEqual(name, self.volume.display_name)
+ self.volume = self.create_volume()
def cinder_list(self):
volumes = self.volume_client.volumes.list()
@@ -148,30 +130,8 @@
def nova_floating_ip_add(self):
self.server.add_floating_ip(self.floating_ip)
- def nova_security_group_rule_create(self):
- sgs = self.compute_client.security_groups.list()
- for sg in sgs:
- if sg.name == 'default':
- secgroup = sg
-
- ruleset = {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- 'group_id': None
- }
- sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
- **ruleset)
- self.addCleanup(self.compute_client.security_group_rules.delete,
- sg_rule.id)
-
def ssh_to_server(self):
- username = self.config.scenario.ssh_user
- self.linux_client = RemoteClient(self.floating_ip.ip,
- username,
- pkey=self.keypair.private_key)
+ self.linux_client = self.get_remote_client(self.floating_ip.ip)
def check_partitions(self):
partitions = self.linux_client.get_partitions()
@@ -200,7 +160,7 @@
self.nova_floating_ip_create()
self.nova_floating_ip_add()
- self.nova_security_group_rule_create()
+ self.create_loginable_secgroup_rule()
self.ssh_to_server()
self.check_partitions()
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 5311eae..70939f6 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -16,8 +16,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testtools
-
from tempest.api.network import common as net_common
from tempest.common.utils.data_utils import rand_name
from tempest import config
@@ -43,7 +41,7 @@
ssh server hosted at the IP address. This check guarantees
that the IP address is associated with the target VM.
- #TODO(mnewby) - Need to implement the following:
+ # TODO(mnewby) - Need to implement the following:
- the Tempest host can ssh into the VM via the IP address and
successfully execute the following:
@@ -162,8 +160,8 @@
@attr(type='smoke')
def test_001_create_keypairs(self):
- self.keypairs[self.tenant_id] = self._create_keypair(
- self.compute_client)
+ self.keypairs[self.tenant_id] = self.create_keypair(
+ name=rand_name('keypair-smoke-'))
@attr(type='smoke')
def test_002_create_security_groups(self):
@@ -182,8 +180,8 @@
@attr(type='smoke')
def test_004_check_networks(self):
- #Checks that we see the newly created network/subnet/router via
- #checking the result of list_[networks,routers,subnets]
+ # Checks that we see the newly created network/subnet/router via
+ # checking the result of list_[networks,routers,subnets]
seen_nets = self._list_networks()
seen_names = [n['name'] for n in seen_nets]
seen_ids = [n['id'] for n in seen_nets]
@@ -213,8 +211,15 @@
name = rand_name('server-smoke-%d-' % i)
keypair_name = self.keypairs[tenant_id].name
security_groups = [self.security_groups[tenant_id].name]
- server = self._create_server(self.compute_client, network,
- name, keypair_name, security_groups)
+ create_kwargs = {
+ 'nics': [
+ {'net-id': network.id},
+ ],
+ 'key_name': keypair_name,
+ 'security_groups': security_groups,
+ }
+ server = self.create_server(self.compute_client, name=name,
+ create_kwargs=create_kwargs)
self.servers.append(server)
@attr(type='smoke')
@@ -247,8 +252,6 @@
self.floating_ips[server].append(floating_ip)
@attr(type='smoke')
- @testtools.skipIf(CONF.service_available.neutron,
- "Skipped unti bug #1210664 is resolved")
def test_008_check_public_network_connectivity(self):
if not self.floating_ips:
raise self.skipTest('No floating ips have been allocated.')
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 0ec3a1d..2903687 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -36,14 +36,8 @@
* Terminate the instance
"""
- def create_keypair(self):
- kp_name = rand_name('keypair-smoke')
- self.keypair = self.compute_client.keypairs.create(kp_name)
- try:
- self.assertEqual(self.keypair.id, kp_name)
- self.set_resource('keypair', self.keypair)
- except AttributeError:
- self.fail("Keypair object not successfully created.")
+ def add_keypair(self):
+ self.keypair = self.create_keypair()
def create_security_group(self):
sg_name = rand_name('secgroup-smoke')
@@ -58,50 +52,15 @@
self.fail("SecurityGroup object not successfully created.")
# Add rules to the security group
- rulesets = [
- {
- 'ip_protocol': 'tcp',
- 'from_port': 1,
- 'to_port': 65535,
- 'cidr': '0.0.0.0/0',
- 'group_id': self.secgroup.id
- },
- {
- 'ip_protocol': 'icmp',
- 'from_port': -1,
- 'to_port': -1,
- 'cidr': '0.0.0.0/0',
- 'group_id': self.secgroup.id
- }
- ]
- for ruleset in rulesets:
- try:
- self.compute_client.security_group_rules.create(
- self.secgroup.id, **ruleset)
- except Exception:
- self.fail("Failed to create rule in security group.")
+ self.create_loginable_secgroup_rule(secgroup_id=self.secgroup.id)
def boot_instance(self):
- i_name = rand_name('instance')
- flavor_id = self.config.compute.flavor_ref
- base_image_id = self.config.compute.image_ref
create_kwargs = {
- 'key_name': self.get_resource('keypair').id
+ 'key_name': self.keypair.id
}
- self.instance = self.compute_client.servers.create(
- i_name, base_image_id, flavor_id, **create_kwargs)
- try:
- self.assertEqual(self.instance.name, i_name)
- self.set_resource('instance', self.instance)
- except AttributeError:
- self.fail("Instance not successfully created.")
-
- self.assertEqual(self.instance.status, 'BUILD')
-
- def wait_on_active(self):
- instance_id = self.get_resource('instance').id
- self.status_timeout(
- self.compute_client.servers, instance_id, 'ACTIVE')
+ instance = self.create_server(self.compute_client,
+ create_kwargs=create_kwargs)
+ self.set_resource('instance', instance)
def pause_server(self):
instance = self.get_resource('instance')
@@ -145,10 +104,9 @@
self.remove_resource('instance')
def test_server_basicops(self):
- self.create_keypair()
+ self.add_keypair()
self.create_security_group()
self.boot_instance()
- self.wait_on_active()
self.pause_server()
self.unpause_server()
self.suspend_server()
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index 6e305c1..c55e2a3 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -16,7 +16,6 @@
# under the License.
from tempest.common.utils.data_utils import rand_name
-from tempest.common.utils.linux.remote_client import RemoteClient
from tempest.openstack.common import log as logging
from tempest.scenario import manager
@@ -44,57 +43,17 @@
self.status_timeout(self.image_client.images, image_id, status)
def _boot_image(self, image_id):
- name = rand_name('scenario-server-')
- client = self.compute_client
- flavor_id = self.config.compute.flavor_ref
- LOG.debug("name:%s, image:%s" % (name, image_id))
- server = client.servers.create(name=name,
- image=image_id,
- flavor=flavor_id,
- key_name=self.keypair.name)
- self.addCleanup(self.compute_client.servers.delete, server)
- self.assertEqual(name, server.name)
- self._wait_for_server_status(server, 'ACTIVE')
- server = client.servers.get(server) # getting network information
- LOG.debug("server:%s" % server)
- return server
+ create_kwargs = {
+ 'key_name': self.keypair.name
+ }
+ return self.create_server(self.compute_client, image=image_id,
+ create_kwargs=create_kwargs)
def _add_keypair(self):
- name = rand_name('scenario-keypair-')
- self.keypair = self.compute_client.keypairs.create(name=name)
- self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
- self.assertEqual(name, self.keypair.name)
-
- def _create_security_group_rule(self):
- sgs = self.compute_client.security_groups.list()
- for sg in sgs:
- if sg.name == 'default':
- secgroup = sg
-
- ruleset = {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- 'group_id': None
- }
- sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
- **ruleset)
- self.addCleanup(self.compute_client.security_group_rules.delete,
- sg_rule.id)
+ self.keypair = self.create_keypair()
def _ssh_to_server(self, server_or_ip):
- if isinstance(server_or_ip, basestring):
- ip = server_or_ip
- else:
- network_name_for_ssh = self.config.compute.network_for_ssh
- ip = server_or_ip.networks[network_name_for_ssh][0]
- username = self.config.scenario.ssh_user
- linux_client = RemoteClient(ip,
- username,
- pkey=self.keypair.private_key)
-
+ linux_client = self.get_remote_client(server_or_ip)
return linux_client.ssh_client
def _write_timestamp(self, server_or_ip):
@@ -129,7 +88,7 @@
def test_snapshot_pattern(self):
# prepare for booting a instance
self._add_keypair()
- self._create_security_group_rule()
+ self.create_loginable_secgroup_rule()
# boot a instance and create a timestamp file in it
server = self._boot_image(self.config.compute.image_ref)
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 4434604..c74b88d 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -21,7 +21,6 @@
import testtools
from tempest.common.utils.data_utils import rand_name
-from tempest.common.utils.linux.remote_client import RemoteClient
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.scenario import manager
@@ -64,26 +63,14 @@
volume_snapshot.id, status)
def _boot_image(self, image_id):
- name = rand_name('scenario-server-')
- client = self.compute_client
- flavor_id = self.config.compute.flavor_ref
- LOG.debug("name:%s, image:%s" % (name, image_id))
- server = client.servers.create(name=name,
- image=image_id,
- flavor=flavor_id,
- key_name=self.keypair.name)
- self.addCleanup(self.compute_client.servers.delete, server)
- self.assertEqual(name, server.name)
- self._wait_for_server_status(server, 'ACTIVE')
- server = client.servers.get(server) # getting network information
- LOG.debug("server:%s" % server)
- return server
+ create_kwargs = {
+ 'key_name': self.keypair.name
+ }
+ return self.create_server(self.compute_client, image=image_id,
+ create_kwargs=create_kwargs)
def _add_keypair(self):
- name = rand_name('scenario-keypair-')
- self.keypair = self.compute_client.keypairs.create(name=name)
- self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
- self.assertEqual(name, self.keypair.name)
+ self.keypair = self.create_keypair()
def _create_floating_ip(self):
floating_ip = self.compute_client.floating_ips.create()
@@ -93,39 +80,8 @@
def _add_floating_ip(self, server, floating_ip):
server.add_floating_ip(floating_ip)
- def _create_security_group_rule(self):
- sgs = self.compute_client.security_groups.list()
- for sg in sgs:
- if sg.name == 'default':
- secgroup = sg
-
- ruleset = {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- 'group_id': None
- }
- sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
- **ruleset)
- self.addCleanup(self.compute_client.security_group_rules.delete,
- sg_rule.id)
-
- def _remote_client_to_server(self, server_or_ip):
- if isinstance(server_or_ip, basestring):
- ip = server_or_ip
- else:
- network_name_for_ssh = self.config.compute.network_for_ssh
- ip = server_or_ip.networks[network_name_for_ssh][0]
- username = self.config.scenario.ssh_user
- linux_client = RemoteClient(ip,
- username,
- pkey=self.keypair.private_key)
- return linux_client
-
def _ssh_to_server(self, server_or_ip):
- linux_client = self._remote_client_to_server(server_or_ip)
+ linux_client = self.get_remote_client(server_or_ip)
return linux_client.ssh_client
def _create_image(self, server):
@@ -163,20 +119,7 @@
self.volume_client.volumes, volume.id, status)
def _create_volume(self, snapshot_id=None):
- name = rand_name('scenario-volume-')
- LOG.debug("volume display-name:%s" % name)
- volume = self.volume_client.volumes.create(size=1,
- display_name=name,
- snapshot_id=snapshot_id)
- LOG.debug("volume created:%s" % volume.display_name)
-
- def cleaner():
- self._wait_for_volume_status(volume, 'available')
- self.volume_client.volumes.delete(volume)
- self.addCleanup(cleaner)
- self._wait_for_volume_status(volume, 'available')
- self.assertEqual(name, volume.display_name)
- return volume
+ return self.create_volume(snapshot_id=snapshot_id)
def _attach_volume(self, server, volume):
attach_volume_client = self.compute_client.volumes.create_server_volume
@@ -192,7 +135,7 @@
self._wait_for_volume_status(volume, 'available')
def _wait_for_volume_availible_on_the_system(self, server_or_ip):
- ssh = self._remote_client_to_server(server_or_ip)
+ ssh = self.get_remote_client(server_or_ip)
conf = self.config
def _func():
@@ -223,7 +166,7 @@
def test_stamp_pattern(self):
# prepare for booting a instance
self._add_keypair()
- self._create_security_group_rule()
+ self.create_loginable_secgroup_rule()
# boot an instance and create a timestamp file in it
volume = self._create_volume()
diff --git a/tempest/scenario/test_volume_snapshot_pattern.py b/tempest/scenario/test_volume_snapshot_pattern.py
index 4d8a400..8fa177e 100644
--- a/tempest/scenario/test_volume_snapshot_pattern.py
+++ b/tempest/scenario/test_volume_snapshot_pattern.py
@@ -34,21 +34,9 @@
def _create_volume_from_image(self):
img_uuid = self.config.compute.image_ref
vol_name = rand_name('volume-origin')
- vol = self.volume_client.volumes.create(size=1,
- display_name=vol_name,
- imageRef=img_uuid)
- self.set_resource(vol.id, vol)
- self.status_timeout(self.volume_client.volumes,
- vol.id,
- 'available')
- return vol
+ return self.create_volume(name=vol_name, imageRef=img_uuid)
def _boot_instance_from_volume(self, vol_id):
- # NOTE(gfidente): the img_uuid here is only needed because
- # the novaclient requires it to be passed as arg
- img_uuid = self.config.compute.image_ref
- i_name = rand_name('instance')
- flavor_id = self.config.compute.flavor_ref
# NOTE(gfidente): the syntax for block_device_mapping is
# dev_name=id:type:size:delete_on_terminate
# where type needs to be "snap" if the server is booted
@@ -59,15 +47,8 @@
create_kwargs = {
'block_device_mapping': bd_map
}
- i = self.compute_client.servers.create(name=i_name,
- image=img_uuid,
- flavor=flavor_id,
- **create_kwargs)
- self.set_resource(i.id, i)
- self.status_timeout(self.compute_client.servers,
- i.id,
- 'ACTIVE')
- return i
+ return self.create_server(self.compute_client,
+ create_kwargs=create_kwargs)
def _create_snapshot_from_volume(self, vol_id):
volume_snapshots = self.volume_client.volume_snapshots
@@ -83,14 +64,7 @@
def _create_volume_from_snapshot(self, snap_id):
vol_name = rand_name('volume')
- vol = self.volume_client.volumes.create(size=1,
- display_name=vol_name,
- snapshot_id=snap_id)
- self.set_resource(vol.id, vol)
- self.status_timeout(self.volume_client.volumes,
- vol.id,
- 'available')
- return vol
+ return self.create_volume(name=vol_name, snapshot_id=snap_id)
def _stop_instances(self, instances):
# NOTE(gfidente): two loops so we do not wait for the status twice
diff --git a/tempest/services/compute/xml/flavors_client.py b/tempest/services/compute/xml/flavors_client.py
index 6ba31ea..6fbb9e3 100644
--- a/tempest/services/compute/xml/flavors_client.py
+++ b/tempest/services/compute/xml/flavors_client.py
@@ -49,6 +49,11 @@
if k == '{%s}ephemeral' % XMLNS_OS_FLV_EXT_DATA:
k = 'OS-FLV-EXT-DATA:ephemeral'
+ if k == 'extra_specs':
+ k = 'OS-FLV-WITH-EXT-SPECS:extra_specs'
+ flavor[k] = dict(v)
+ continue
+
try:
v = int(v)
except ValueError:
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 12e7034..5c7a629 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -350,7 +350,7 @@
addrs = []
for child in node.getchildren():
addrs.append({'version': int(child.get('version')),
- 'addr': child.get('version')})
+ 'addr': child.get('addr')})
return {node.get('id'): addrs}
def list_addresses(self, server_id):
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 2c808a9..588dc8f 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -17,22 +17,24 @@
from tempest.common.rest_client import RestClient
-class NetworkClient(RestClient):
+class NetworkClientJSON(RestClient):
"""
Tempest REST client for Neutron. Uses v2 of the Neutron API, since the
V1 API has been removed from the code base.
- Implements create, delete, list and show for the basic Neutron
- abstractions (networks, sub-networks and ports):
+ Implements create, delete, update, list and show for the basic Neutron
+ abstractions (networks, sub-networks, routers and ports):
+
+ Implements add/remove interface to router using subnet ID / port ID
It also implements list, show, update and reset for OpenStack Networking
quotas
"""
def __init__(self, config, username, password, auth_url, tenant_name=None):
- super(NetworkClient, self).__init__(config, username, password,
- auth_url, tenant_name)
+ super(NetworkClientJSON, self).__init__(config, username, password,
+ auth_url, tenant_name)
self.service = self.config.network.catalog_type
self.version = '2.0'
self.uri_prefix = "v%s" % (self.version)
@@ -55,6 +57,17 @@
body = json.loads(body)
return resp, body
+ def create_bulk_network(self, count, names):
+ network_list = list()
+ for i in range(count):
+ network_list.append({'name': names[i]})
+ post_body = {'networks': network_list}
+ body = json.dumps(post_body)
+ uri = '%s/networks' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
def show_network(self, uuid):
uri = '%s/networks/%s' % (self.uri_prefix, uuid)
resp, body = self.get(uri, self.headers)
@@ -95,15 +108,14 @@
body = json.loads(body)
return resp, body
- def create_port(self, network_id, state=None):
- if not state:
- state = True
+ def create_port(self, network_id, **kwargs):
post_body = {
'port': {
'network_id': network_id,
- 'admin_state_up': state,
}
}
+ for key, val in kwargs.items():
+ post_body['port'][key] = val
body = json.dumps(post_body)
uri = '%s/ports' % (self.uri_prefix)
resp, body = self.post(uri, headers=self.headers, body=body)
@@ -187,3 +199,89 @@
resp, body = self.put(uri, body=body, headers=self.headers)
body = json.loads(body)
return resp, body
+
+ def list_routers(self):
+ uri = '%s/routers' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def create_router(self, name, **kwargs):
+ post_body = {
+ 'router': {
+ 'name': name,
+ }
+ }
+ post_body['router']['admin_state_up'] = kwargs.get(
+ 'admin_state_up', True)
+ post_body['router']['external_gateway_info'] = kwargs.get(
+ 'external_gateway_info', None)
+ body = json.dumps(post_body)
+ uri = '%s/routers' % (self.uri_prefix)
+ resp, body = self.post(uri, headers=self.headers, body=body)
+ body = json.loads(body)
+ return resp, body
+
+ def delete_router(self, router_id):
+ uri = '%s/routers/%s' % (self.uri_prefix, router_id)
+ resp, body = self.delete(uri, self.headers)
+ return resp, body
+
+ def show_router(self, router_id):
+ uri = '%s/routers/%s' % (self.uri_prefix, router_id)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def update_router(self, router_id, **kwargs):
+ uri = '%s/routers/%s' % (self.uri_prefix, router_id)
+ resp, body = self.get(uri, self.headers)
+ body = json.loads(body)
+ update_body = {}
+ update_body['name'] = kwargs.get('name', body['router']['name'])
+ update_body['admin_state_up'] = kwargs.get(
+ 'admin_state_up', body['router']['admin_state_up'])
+ # Must uncomment/modify these lines once LP question#233187 is solved
+ #update_body['external_gateway_info'] = kwargs.get(
+ # 'external_gateway_info', body['router']['external_gateway_info'])
+ update_body = dict(router=update_body)
+ update_body = json.dumps(update_body)
+ resp, body = self.put(uri, update_body, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def add_router_interface_with_subnet_id(self, router_id, subnet_id):
+ uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
+ router_id)
+ update_body = {"subnet_id": subnet_id}
+ update_body = json.dumps(update_body)
+ resp, body = self.put(uri, update_body, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def add_router_interface_with_port_id(self, router_id, port_id):
+ uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
+ router_id)
+ update_body = {"port_id": port_id}
+ update_body = json.dumps(update_body)
+ resp, body = self.put(uri, update_body, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def remove_router_interface_with_subnet_id(self, router_id, subnet_id):
+ uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
+ router_id)
+ update_body = {"subnet_id": subnet_id}
+ update_body = json.dumps(update_body)
+ resp, body = self.put(uri, update_body, self.headers)
+ body = json.loads(body)
+ return resp, body
+
+ def remove_router_interface_with_port_id(self, router_id, port_id):
+ uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
+ router_id)
+ update_body = {"port_id": port_id}
+ update_body = json.dumps(update_body)
+ resp, body = self.put(uri, update_body, self.headers)
+ body = json.loads(body)
+ return resp, body
diff --git a/tempest/services/network/xml/__init__.py b/tempest/services/network/xml/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/services/network/xml/__init__.py
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
new file mode 100755
index 0000000..d4fb656
--- /dev/null
+++ b/tempest/services/network/xml/network_client.py
@@ -0,0 +1,172 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import xml.etree.ElementTree as ET
+
+from tempest.common.rest_client import RestClientXML
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import xml_to_json
+
+
+class NetworkClientXML(RestClientXML):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(NetworkClientXML, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.network.catalog_type
+ self.version = '2.0'
+ self.uri_prefix = "v%s" % (self.version)
+
+ def list_networks(self):
+ uri = '%s/networks' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ networks = self._parse_array(etree.fromstring(body))
+ networks = {"networks": networks}
+ return resp, networks
+
+ def create_network(self, name):
+ uri = '%s/networks' % (self.uri_prefix)
+ post_body = Element("network")
+ p2 = Element("name", name)
+ post_body.append(p2)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def create_bulk_network(self, count, names):
+ uri = '%s/networks' % (self.uri_prefix)
+ post_body = Element("networks")
+ for i in range(count):
+ p1 = Element("network")
+ p2 = Element("name", names[i])
+ p1.append(p2)
+ post_body.append(p1)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ networks = self._parse_array(etree.fromstring(body))
+ networks = {"networks": networks}
+ return resp, networks
+
+ def delete_network(self, uuid):
+ uri = '%s/networks/%s' % (self.uri_prefix, str(uuid))
+ return self.delete(uri, self.headers)
+
+ def show_network(self, uuid):
+ uri = '%s/networks/%s' % (self.uri_prefix, str(uuid))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def create_subnet(self, net_uuid, cidr):
+ uri = '%s/subnets' % (self.uri_prefix)
+ subnet = Element("subnet")
+ p2 = Element("network_id", net_uuid)
+ p3 = Element("cidr", cidr)
+ p4 = Element("ip_version", 4)
+ subnet.append(p2)
+ subnet.append(p3)
+ subnet.append(p4)
+ resp, body = self.post(uri, str(Document(subnet)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def delete_subnet(self, subnet_id):
+ uri = '%s/subnets/%s' % (self.uri_prefix, str(subnet_id))
+ return self.delete(uri, self.headers)
+
+ def list_subnets(self):
+ uri = '%s/subnets' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ subnets = self._parse_array(etree.fromstring(body))
+ subnets = {"subnets": subnets}
+ return resp, subnets
+
+ def show_subnet(self, uuid):
+ uri = '%s/subnets/%s' % (self.uri_prefix, str(uuid))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def create_port(self, net_uuid, **kwargs):
+ uri = '%s/ports' % (self.uri_prefix)
+ port = Element("port")
+ p1 = Element('network_id', net_uuid)
+ port.append(p1)
+ for key, val in kwargs.items():
+ key = Element(key, val)
+ port.append(key)
+ resp, body = self.post(uri, str(Document(port)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def delete_port(self, port_id):
+ uri = '%s/ports/%s' % (self.uri_prefix, str(port_id))
+ return self.delete(uri, self.headers)
+
+ def _parse_array(self, node):
+ array = []
+ for child in node.getchildren():
+ array.append(xml_to_json(child))
+ return array
+
+ def list_ports(self):
+ url = '%s/ports' % (self.uri_prefix)
+ resp, body = self.get(url, self.headers)
+ ports = self._parse_array(etree.fromstring(body))
+ ports = {"ports": ports}
+ return resp, ports
+
+ def show_port(self, port_id):
+ uri = '%s/ports/%s' % (self.uri_prefix, str(port_id))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_port(self, port_id, name):
+ uri = '%s/ports/%s' % (self.uri_prefix, str(port_id))
+ port = Element("port")
+ p2 = Element("name", name)
+ port.append(p2)
+ resp, body = self.put(uri, str(Document(port)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_subnet(self, subnet_id, name):
+ uri = '%s/subnets/%s' % (self.uri_prefix, str(subnet_id))
+ subnet = Element("subnet")
+ p2 = Element("name", name)
+ subnet.append(p2)
+ resp, body = self.put(uri, str(Document(subnet)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_network(self, net_id, name):
+ uri = '%s/networks/%s' % (self.uri_prefix, str(net_id))
+ network = Element("network")
+ p2 = Element("name", name)
+ network.append(p2)
+ resp, body = self.put(uri, str(Document(network)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+
+def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
+ body = ET.fromstring(xml_returned_body)
+ root_tag = body.tag
+ if root_tag.startswith("{"):
+ ns, root_tag = root_tag.split("}", 1)
+ body = xml_to_json(etree.fromstring(xml_returned_body))
+ body = {root_tag: body}
+ return body
diff --git a/tempest/stress/actions/unit_test.py b/tempest/stress/actions/unit_test.py
new file mode 100644
index 0000000..95cc1bc
--- /dev/null
+++ b/tempest/stress/actions/unit_test.py
@@ -0,0 +1,79 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.openstack.common import importutils
+import tempest.stress.stressaction as stressaction
+
+
+class SetUpClassRunTime(object):
+
+ process = 'process'
+ action = 'action'
+ application = 'application'
+
+ allowed = set((process, action, application))
+
+ @classmethod
+ def validate(cls, name):
+ if name not in cls.allowed:
+ raise KeyError("\'%s\' not a valid option" % name)
+
+
+class UnitTest(stressaction.StressAction):
+ """This is a special action for running existing unittests as stress test.
+ You need to pass ``test_method`` and ``class_setup_per``
+ using ``kwargs`` in the JSON descriptor;
+ ``test_method`` should be the fully qualified name of a unittest,
+ ``class_setup_per`` should be one from:
+ ``application``: once in the stress job lifetime
+ ``process``: once in the worker process lifetime
+ ``action``: on each action
+ Not all combination working in every case.
+ """
+
+ def setUp(self, **kwargs):
+ method = kwargs['test_method'].split('.')
+ self.test_method = method.pop()
+ self.klass = importutils.import_class('.'.join(method))
+ # valid options are 'process', 'application' , 'action'
+ self.class_setup_per = kwargs.get('class_setup_per',
+ SetUpClassRunTime.process)
+ SetUpClassRunTime.validate(self.class_setup_per)
+
+ if self.class_setup_per == SetUpClassRunTime.application:
+ self.klass.setUpClass()
+ self.setupclass_called = False
+
+ def run_core(self):
+ res = self.klass(self.test_method).run()
+ if res.errors:
+ raise RuntimeError(res.errors)
+
+ def run(self):
+ if self.class_setup_per != SetUpClassRunTime.application:
+ if (self.class_setup_per == SetUpClassRunTime.action
+ or self.setupclass_called is False):
+ self.klass.setUpClass()
+ self.setupclass_called = True
+
+ self.run_core()
+
+ if (self.class_setup_per == SetUpClassRunTime.action):
+ self.klass.tearDownClass()
+ else:
+ self.run_core()
+
+ def tearDown(self):
+ if self.class_setup_per != SetUpClassRunTime.action:
+ self.klass.tearDownClass()
diff --git a/tempest/stress/etc/sample-unit-test.json b/tempest/stress/etc/sample-unit-test.json
new file mode 100644
index 0000000..b388bfe
--- /dev/null
+++ b/tempest/stress/etc/sample-unit-test.json
@@ -0,0 +1,8 @@
+[{"action": "tempest.stress.actions.unit_test.UnitTest",
+ "threads": 8,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {"test_method": "tempest.cli.simple_read_only.test_glance.SimpleReadOnlyGlanceClientTest.test_glance_fake_action",
+ "class_setup_per": "process"}
+ }
+]
diff --git a/tempest/test.py b/tempest/test.py
index 0cd0b08..68cedf0 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import atexit
import os
import time
@@ -91,6 +92,17 @@
LOG.info("Overriding skipException to nose SkipTest")
testtools.TestCase.skipException = nose.plugins.skip.SkipTest
+at_exit_set = set()
+
+
+def validate_tearDownClass():
+ if at_exit_set:
+ raise RuntimeError("tearDownClass does not calls the super's "
+ "tearDownClass in these classes: "
+ + str(at_exit_set))
+
+atexit.register(validate_tearDownClass)
+
class BaseTestCase(testtools.TestCase,
testtools.testcase.WithAttributes,
@@ -98,29 +110,43 @@
config = config.TempestConfig()
+ setUpClassCalled = False
+
@classmethod
def setUpClass(cls):
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
+ cls.setUpClassCalled = True
- def setUp(cls):
- super(BaseTestCase, cls).setUp()
+ @classmethod
+ def tearDownClass(cls):
+ at_exit_set.remove(cls)
+ if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
+ super(BaseTestCase, cls).tearDownClass()
+
+ def setUp(self):
+ super(BaseTestCase, self).setUp()
+ if not self.setUpClassCalled:
+ raise RuntimeError("setUpClass does not calls the super's"
+ "setUpClass in the "
+ + self.__class__.__name__)
+ at_exit_set.add(self.__class__)
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
test_timeout = 0
if test_timeout > 0:
- cls.useFixture(fixtures.Timeout(test_timeout, gentle=True))
+ self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
- stdout = cls.useFixture(fixtures.StringStream('stdout')).stream
- cls.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
+ stdout = self.useFixture(fixtures.StringStream('stdout')).stream
+ self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
- stderr = cls.useFixture(fixtures.StringStream('stderr')).stream
- cls.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
+ stderr = self.useFixture(fixtures.StringStream('stderr')).stream
+ self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
@classmethod
def _get_identity_admin_client(cls):
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
index 8812a10..e0c9f06 100644
--- a/tempest/thirdparty/boto/test.py
+++ b/tempest/thirdparty/boto/test.py
@@ -197,6 +197,7 @@
@classmethod
def setUpClass(cls):
+ super(BotoTestCase, cls).setUpClass()
# The trash contains cleanup functions and paramaters in tuples
# (function, *args, **kwargs)
cls._resource_trash_bin = {}
@@ -261,6 +262,10 @@
LOG.exception(exc)
finally:
del cls._resource_trash_bin[key]
+ super(BotoTestCase, cls).tearDownClass()
+ # NOTE(afazekas): let the super called even on exceptions
+ # The real exceptions already logged, if the super throws another,
+ # does not causes hidden issues
if fail_count:
raise exceptions.TearDownException(num=fail_count)
diff --git a/tempest/whitebox/manager.py b/tempest/whitebox/manager.py
index b2632f1..3b1b107 100644
--- a/tempest/whitebox/manager.py
+++ b/tempest/whitebox/manager.py
@@ -72,7 +72,7 @@
cls.flavor_ref = cls.config.compute.flavor_ref
cls.flavor_ref_alt = cls.config.compute.flavor_ref_alt
- #NOTE(afazekas): Mimics the helper method used in the api tests
+ # NOTE(afazekas): Mimics the helper method used in the api tests
@classmethod
def create_server(cls, **kwargs):
flavor_ref = cls.config.compute.flavor_ref
@@ -127,7 +127,7 @@
cmd = shlex.split(cmd)
result = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- #Todo(rohitk): Need to define host connection parameters in config
+ # TODO(rohitk): Need to define host connection parameters in config
else:
client = self.get_ssh_connection(self.config.whitebox.api_host,
self.config.whitebox.api_user,
diff --git a/tempest/whitebox/test_servers_whitebox.py b/tempest/whitebox/test_servers_whitebox.py
index 1c1cdeb..abe903c 100644
--- a/tempest/whitebox/test_servers_whitebox.py
+++ b/tempest/whitebox/test_servers_whitebox.py
@@ -26,7 +26,7 @@
@classmethod
def setUpClass(cls):
super(ServersWhiteboxTest, cls).setUpClass()
- #NOTE(afazekas): Strange relationship
+ # NOTE(afazekas): Strange relationship
BaseIdentityAdminTest.setUpClass()
cls.client = cls.servers_client
cls.img_client = cls.images_client
diff --git a/tools/skip_tracker.py b/tools/skip_tracker.py
index 1ed6961..c244808 100755
--- a/tools/skip_tracker.py
+++ b/tools/skip_tracker.py
@@ -61,7 +61,7 @@
"""
Return the skip tuples in a test file
"""
- BUG_RE = re.compile(r'.*skip\(.*bug:*\s*\#*(\d+)', re.IGNORECASE)
+ BUG_RE = re.compile(r'.*skip.*bug:*\s*\#*(\d+)', re.IGNORECASE)
DEF_RE = re.compile(r'.*def (\w+)\(')
bug_found = False
results = []
diff --git a/tox.ini b/tox.ini
index dc8980d..ea27b92 100644
--- a/tox.ini
+++ b/tox.ini
@@ -27,6 +27,36 @@
commands =
sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
+[testenv:heat-slow]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+# The regex below is used to select heat api/scenario tests tagged as slow.
+commands =
+ sh tools/pretty_tox_serial.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
+
+[testenv:py26-full]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+ NOSE_WITH_OPENSTACK=1
+ NOSE_OPENSTACK_COLOR=1
+ NOSE_OPENSTACK_RED=15
+ NOSE_OPENSTACK_YELLOW=3
+ NOSE_OPENSTACK_SHOW_ELAPSED=1
+ NOSE_OPENSTACK_STDOUT=1
+commands =
+ nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit -sv --xunit-file=nosetests-full.xml tempest/api tempest/scenario tempest/thirdparty tempest/cli {posargs}
+
+[testenv:py26-smoke]
+setenv = VIRTUAL_ENV={envdir}
+NOSE_WITH_OPENSTACK=1
+ NOSE_OPENSTACK_COLOR=1
+ NOSE_OPENSTACK_RED=15
+ NOSE_OPENSTACK_YELLOW=3
+ NOSE_OPENSTACK_SHOW_ELAPSED=1
+ NOSE_OPENSTACK_STDOUT=1
+commands =
+ nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit -sv --attr=type=smoke --xunit-file=nosetests-smoke.xml tempest {posargs}
+
[testenv:smoke]
sitepackages = True
setenv = VIRTUAL_ENV={envdir}