Merge "Skip os-fixed-ips test since neutron has not implemented it"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index f1aaa07..703d92a 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -1,5 +1,5 @@
[DEFAULT]
-# log_config = /opt/stack/tempest/etc/logging.conf.sample
+#log_config = /opt/stack/tempest/etc/logging.conf.sample
# disable logging to the stderr
use_stderr = False
@@ -272,6 +272,9 @@
# Set to True if the Account Quota middleware is enabled
accounts_quotas_available = True
+# Set operator role for tests that require creating a container
+operator_role = Member
+
[boto]
# This section contains configuration options used when executing tests
# with boto.
@@ -285,7 +288,7 @@
aws_access =
aws_secret =
-#Image materials for S3 upload
+# Image materials for S3 upload
# ALL content of the specified directory will be uploaded to S3
s3_materials_path = /opt/stack/devstack/files/images/s3-materials/cirros-0.3.1
@@ -293,22 +296,22 @@
# Subdirectories not allowed!
# The filenames will be used as a Keys in the S3 Buckets
-#ARI Ramdisk manifest. Must be in the above s3_materials_path
+# ARI Ramdisk manifest. Must be in the above s3_materials_path
ari_manifest = cirros-0.3.1-x86_64-initrd.manifest.xml
-#AMI Machine Image manifest. Must be in the above s3_materials_path
+# AMI Machine Image manifest. Must be in the above s3_materials_path
ami_manifest = cirros-0.3.1-x86_64-blank.img.manifest.xml
-#AKI Kernel Image manifest, Must be in the above s3_materials_path
+# AKI Kernel Image manifest, Must be in the above s3_materials_path
aki_manifest = cirros-0.3.1-x86_64-vmlinuz.manifest.xml
-#Instance type
+# Instance type
instance_type = m1.tiny
-#TCP/IP connection timeout
+# TCP/IP connection timeout
http_socket_timeout = 5
-#Number of retries actions on connection or 5xx error
+# Number of retries actions on connection or 5xx error
num_retries = 1
# Status change wait timout
diff --git a/requirements.txt b/requirements.txt
index cc61b01..06db0e6 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -13,6 +13,7 @@
python-novaclient>=2.10.0
python-neutronclient>=2.2.3,<3.0.0
python-cinderclient>=1.0.4
+python-heatclient>=0.2.3
testresources
keyring
testrepository
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index a3b051e..b67a5e0 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -55,6 +55,7 @@
# Start a server and wait for it to become ready
resp, server = self.create_server(wait_until='ACTIVE',
adminPass='password')
+ self.server = server
# Record addresses so that we can ssh later
resp, server['addresses'] = \
diff --git a/tempest/api/identity/admin/test_users.py b/tempest/api/identity/admin/test_users.py
index 6f90b04..8cdcee1 100644
--- a/tempest/api/identity/admin/test_users.py
+++ b/tempest/api/identity/admin/test_users.py
@@ -26,11 +26,14 @@
class UsersTestJSON(base.BaseIdentityAdminTest):
_interface = 'json'
- alt_user = rand_name('test_user_')
- alt_password = rand_name('pass_')
- alt_email = alt_user + '@testmail.tm'
- alt_tenant = rand_name('test_tenant_')
- alt_description = rand_name('desc_')
+ @classmethod
+ def setUpClass(cls):
+ super(UsersTestJSON, cls).setUpClass()
+ cls.alt_user = rand_name('test_user_')
+ cls.alt_password = rand_name('pass_')
+ cls.alt_email = cls.alt_user + '@testmail.tm'
+ cls.alt_tenant = rand_name('test_tenant_')
+ cls.alt_description = rand_name('desc_')
@attr(type='smoke')
def test_create_user(self):
@@ -101,8 +104,9 @@
@attr(type='smoke')
def test_delete_user(self):
# Delete a user
+ alt_user2 = rand_name('alt_user_')
self.data.setup_test_tenant()
- resp, user = self.client.create_user('user_1234', self.alt_password,
+ resp, user = self.client.create_user(alt_user2, self.alt_password,
self.data.tenant['id'],
self.alt_email)
self.assertEquals('200', resp['status'])
@@ -228,13 +232,16 @@
self.data.setup_test_tenant()
user_ids = list()
fetched_user_ids = list()
- resp, user1 = self.client.create_user('tenant_user1', 'password1',
+ alt_tenant_user1 = rand_name('tenant_user1_')
+ resp, user1 = self.client.create_user(alt_tenant_user1, 'password1',
self.data.tenant['id'],
'user1@123')
self.assertEquals('200', resp['status'])
user_ids.append(user1['id'])
self.data.users.append(user1)
- resp, user2 = self.client.create_user('tenant_user2', 'password2',
+
+ alt_tenant_user2 = rand_name('tenant_user2_')
+ resp, user2 = self.client.create_user(alt_tenant_user2, 'password2',
self.data.tenant['id'],
'user2@123')
self.assertEquals('200', resp['status'])
@@ -267,9 +274,11 @@
resp, role = self.client.assign_user_role(tenant['id'], user['id'],
role['id'])
self.assertEquals('200', resp['status'])
- resp, second_user = self.client.create_user('second_user', 'password1',
+
+ alt_user2 = rand_name('second_user_')
+ resp, second_user = self.client.create_user(alt_user2, 'password1',
self.data.tenant['id'],
- 'user1@123')
+ 'user2@123')
self.assertEquals('200', resp['status'])
user_ids.append(second_user['id'])
self.data.users.append(second_user)
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index cab84c0..980323a 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -54,7 +54,7 @@
resp[1], _ = cls.v3_client.delete_group(cls.group_body['id'])
resp[2], _ = cls.v3_client.delete_user(cls.user_body['id'])
resp[3], _ = cls.v3_client.delete_project(cls.project['id'])
- #NOTE(harika-vakadi): It is necessary to disable the domian
+ # NOTE(harika-vakadi): It is necessary to disable the domian
# before deleting,or else it would result in unauthorized error
cls.v3_client.update_domain(cls.domain['id'], enabled=False)
resp[4], _ = cls.v3_client.delete_domain(cls.domain['id'])
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 2a3b3f7..19c5f84 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -48,7 +48,7 @@
@classmethod
def setUpClass(cls):
super(BaseNetworkTest, cls).setUpClass()
- os = clients.Manager()
+ os = clients.Manager(interface=cls._interface)
cls.network_cfg = os.config.network
if not cls.config.service_available.neutron:
raise cls.skipException("Neutron support is required")
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 7dcbbd8..7f49452 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -23,7 +23,8 @@
from tempest.test import attr
-class NetworksTest(base.BaseNetworkTest):
+class NetworksTestJSON(base.BaseNetworkTest):
+ _interface = 'json'
"""
Tests the following operations in the Neutron API using the REST client for
@@ -55,7 +56,7 @@
@classmethod
def setUpClass(cls):
- super(NetworksTest, cls).setUpClass()
+ super(NetworksTestJSON, cls).setUpClass()
cls.network = cls.create_network()
cls.name = cls.network['name']
cls.subnet = cls.create_subnet(cls.network)
@@ -109,7 +110,7 @@
self.assertEqual('200', resp['status'])
updated_subnet = body['subnet']
self.assertEqual(updated_subnet['name'], new_subnet)
- # Deletes subnet and network
+ # Delete subnet and network
resp, body = self.client.delete_subnet(subnet_id)
self.assertEqual('204', resp['status'])
resp, body = self.client.delete_network(net_id)
@@ -128,6 +129,7 @@
def test_list_networks(self):
# Verify the network exists in the list of all networks
resp, body = self.client.list_networks()
+ self.assertEqual('200', resp['status'])
networks = body['networks']
found = None
for n in networks:
@@ -149,6 +151,7 @@
def test_list_subnets(self):
# Verify the subnet exists in the list of all subnets
resp, body = self.client.list_subnets()
+ self.assertEqual('200', resp['status'])
subnets = body['subnets']
found = None
for n in subnets:
@@ -159,7 +162,7 @@
@attr(type='gate')
def test_create_update_delete_port(self):
- # Verify that successful port creation & deletion
+ # Verify that successful port creation, update & deletion
resp, body = self.client.create_port(self.network['id'])
self.assertEqual('201', resp['status'])
port = body['port']
@@ -174,7 +177,7 @@
self.assertEqual('204', resp['status'])
@attr(type='gate')
- def test_show_ports(self):
+ def test_show_port(self):
# Verify the details of port
resp, body = self.client.show_port(self.port['id'])
self.assertEqual('200', resp['status'])
@@ -221,3 +224,7 @@
for n in created_networks:
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], networks_list)
+
+
+class NetworksTestXML(NetworksTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/network/test_quotas.py b/tempest/api/network/test_quotas.py
index ba70f34..b49cbe8 100644
--- a/tempest/api/network/test_quotas.py
+++ b/tempest/api/network/test_quotas.py
@@ -23,6 +23,7 @@
class QuotasTest(base.BaseNetworkTest):
+ _interface = 'json'
"""
Tests the following operations in the Neutron API using the REST client for
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index df85682..4f687b0 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -21,6 +21,7 @@
class RoutersTest(base.BaseNetworkTest):
+ _interface = 'json'
@classmethod
def setUpClass(cls):
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 820328c..e6e8d17 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -18,6 +18,7 @@
from tempest.api.identity.base import DataGenerator
from tempest import clients
+from tempest.common import isolated_creds
from tempest import exceptions
import tempest.test
@@ -30,16 +31,41 @@
if not cls.config.service_available.swift:
skip_msg = ("%s skipped as swift is not available" % cls.__name__)
raise cls.skipException(skip_msg)
- cls.os = clients.Manager()
+ cls.isolated_creds = isolated_creds.IsolatedCreds(cls.__name__)
+ if cls.config.compute.allow_tenant_isolation:
+ # Get isolated creds for normal user
+ creds = cls.isolated_creds.get_primary_creds()
+ username, tenant_name, password = creds
+ cls.os = clients.Manager(username=username,
+ password=password,
+ tenant_name=tenant_name)
+ # Get isolated creds for admin user
+ admin_creds = cls.isolated_creds.get_admin_creds()
+ admin_username, admin_tenant_name, admin_password = admin_creds
+ cls.os_admin = clients.Manager(username=admin_username,
+ password=admin_password,
+ tenant_name=admin_tenant_name)
+ # Get isolated creds for alt user
+ alt_creds = cls.isolated_creds.get_alt_creds()
+ alt_username, alt_tenant, alt_password = alt_creds
+ cls.os_alt = clients.Manager(username=alt_username,
+ password=alt_password,
+ tenant_name=alt_tenant)
+ # Add isolated users to operator role so that they can create a
+ # container in swift.
+ cls._assign_member_role()
+ else:
+ cls.os = clients.Manager()
+ cls.os_admin = clients.AdminManager()
+ cls.os_alt = clients.AltManager()
+
cls.object_client = cls.os.object_client
cls.container_client = cls.os.container_client
cls.account_client = cls.os.account_client
cls.custom_object_client = cls.os.custom_object_client
- cls.os_admin = clients.AdminManager()
cls.token_client = cls.os_admin.token_client
cls.identity_admin_client = cls.os_admin.identity_client
cls.custom_account_client = cls.os.custom_account_client
- cls.os_alt = clients.AltManager()
cls.object_client_alt = cls.os_alt.object_client
cls.container_client_alt = cls.os_alt.container_client
cls.identity_client_alt = cls.os_alt.identity_client
@@ -47,6 +73,22 @@
cls.data = DataGenerator(cls.identity_admin_client)
@classmethod
+ def _assign_member_role(cls):
+ primary_user = cls.isolated_creds.get_primary_user()
+ alt_user = cls.isolated_creds.get_alt_user()
+ swift_role = cls.config.object_storage.operator_role
+ try:
+ resp, roles = cls.os_admin.identity_client.list_roles()
+ role = next(r for r in roles if r['name'] == swift_role)
+ except StopIteration:
+ msg = "No role named %s found" % swift_role
+ raise exceptions.NotFound(msg)
+ for user in [primary_user, alt_user]:
+ cls.os_admin.identity_client.assign_user_role(user['tenantId'],
+ user['id'],
+ role['id'])
+
+ @classmethod
def delete_containers(cls, containers, container_client=None,
object_client=None):
"""Remove given containers and all objects in them.
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index f04d23f..08f585a 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -93,7 +93,7 @@
def cmd_with_auth(self, cmd, action, flags='', params='',
admin=True, fail_ok=False):
"""Executes given command with auth attributes appended."""
- #TODO(jogo) make admin=False work
+ # TODO(jogo) make admin=False work
creds = ('--os-username %s --os-tenant-name %s --os-password %s '
'--os-auth-url %s ' % (self.identity.admin_username,
self.identity.admin_tenant_name, self.identity.admin_password,
@@ -134,6 +134,11 @@
for field in field_names:
self.assertIn(field, item)
+ def assertFirstLineStartsWith(self, lines, beginning):
+ self.assertTrue(lines[0].startswith(beginning),
+ msg=('Beginning of first line has invalid content: %s'
+ % lines[:3]))
+
class CommandFailed(subprocess.CalledProcessError):
# adds output attribute for python2.6
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/test_cinder.py
index e9ce87b..21acae8 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/test_cinder.py
@@ -87,7 +87,7 @@
def test_admin_help(self):
help_text = self.cinder('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: cinder'))
+ self.assertFirstLineStartsWith(lines, 'usage: cinder')
commands = []
cmds_start = lines.index('Positional arguments:')
diff --git a/tempest/cli/simple_read_only/test_compute.py b/tempest/cli/simple_read_only/test_compute.py
index 4c7f604..9b358e6 100644
--- a/tempest/cli/simple_read_only/test_compute.py
+++ b/tempest/cli/simple_read_only/test_compute.py
@@ -48,7 +48,7 @@
self.nova,
'this-does-nova-exist')
- #NOTE(jogo): Commands in order listed in 'nova help'
+ # NOTE(jogo): Commands in order listed in 'nova help'
# Positional arguments:
diff --git a/tempest/cli/simple_read_only/test_compute_manage.py b/tempest/cli/simple_read_only/test_compute_manage.py
index 1848827..523c65f 100644
--- a/tempest/cli/simple_read_only/test_compute_manage.py
+++ b/tempest/cli/simple_read_only/test_compute_manage.py
@@ -41,7 +41,7 @@
self.nova_manage,
'this-does-nova-exist')
- #NOTE(jogo): Commands in order listed in 'nova-manage -h'
+ # NOTE(jogo): Commands in order listed in 'nova-manage -h'
# test flags
def test_help_flag(self):
diff --git a/tempest/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/test_glance.py
index 3d58451..d02c60b 100644
--- a/tempest/cli/simple_read_only/test_glance.py
+++ b/tempest/cli/simple_read_only/test_glance.py
@@ -48,7 +48,7 @@
def test_glance_help(self):
help_text = self.glance('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: glance'))
+ self.assertFirstLineStartsWith(lines, 'usage: glance')
commands = []
cmds_start = lines.index('Positional arguments:')
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index 4c7982b..1e8009f 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -46,7 +46,9 @@
out = self.keystone('catalog')
catalog = self.parser.details_multiple(out, with_label=True)
for svc in catalog:
- self.assertTrue(svc['__label'].startswith('Service:'))
+ self.assertTrue(svc['__label'].startswith('Service:'),
+ msg=('Invalid beginning of service block: %s' %
+ svc['__label']))
def test_admin_endpoint_list(self):
out = self.keystone('endpoint-list')
@@ -94,7 +96,7 @@
def test_admin_help(self):
help_text = self.keystone('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: keystone'))
+ self.assertFirstLineStartsWith(lines, 'usage: keystone')
commands = []
cmds_start = lines.index('Positional arguments:')
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
index 7b8340d..ae3a1a7 100644
--- a/tempest/cli/simple_read_only/test_neutron.py
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -92,7 +92,7 @@
def test_neutron_help(self):
help_text = self.neutron('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: neutron'))
+ self.assertFirstLineStartsWith(lines, 'usage: neutron')
commands = []
cmds_start = lines.index('Commands for API v2.0:')
diff --git a/tempest/clients.py b/tempest/clients.py
index 195cb89..48e4939 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -90,7 +90,8 @@
from tempest.services.identity.xml.identity_client import TokenClientXML
from tempest.services.image.v1.json.image_client import ImageClientJSON
from tempest.services.image.v2.json.image_client import ImageClientV2JSON
-from tempest.services.network.json.network_client import NetworkClient
+from tempest.services.network.json.network_client import NetworkClientJSON
+from tempest.services.network.xml.network_client import NetworkClientXML
from tempest.services.object_storage.account_client import AccountClient
from tempest.services.object_storage.account_client import \
AccountClientCustomizedHeader
@@ -116,6 +117,11 @@
"xml": ImagesClientXML,
}
+NETWORKS_CLIENTS = {
+ "json": NetworkClientJSON,
+ "xml": NetworkClientXML,
+}
+
KEYPAIRS_CLIENTS = {
"json": KeyPairsClientJSON,
"xml": KeyPairsClientXML,
@@ -295,6 +301,7 @@
try:
self.servers_client = SERVERS_CLIENTS[interface](*client_args)
+ self.network_client = NETWORKS_CLIENTS[interface](*client_args)
self.limits_client = LIMITS_CLIENTS[interface](*client_args)
if self.config.service_available.glance:
self.images_client = IMAGES_CLIENTS[interface](*client_args)
@@ -339,7 +346,6 @@
except KeyError:
msg = "Unsupported interface type `%s'" % interface
raise exceptions.InvalidConfiguration(msg)
- self.network_client = NetworkClient(*client_args)
self.hosts_client = HostsClientJSON(*client_args)
self.account_client = AccountClient(*client_args)
if self.config.service_available.glance:
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 759ab81..ea5b4f4 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -323,7 +323,7 @@
if (resp.status in set((204, 205, 304)) or resp.status < 200 or
method.upper() == 'HEAD') and resp_body:
raise exceptions.ResponseWithNonEmptyBody(status=resp.status)
- #NOTE(afazekas):
+ # NOTE(afazekas):
# If the HTTP Status Code is 205
# 'The response MUST NOT include an entity.'
# A HTTP entity has an entity-body and an 'entity-header'.
@@ -336,7 +336,7 @@
0 != len(set(resp.keys()) - set(('status',)) -
self.response_header_lc - self.general_header_lc)):
raise exceptions.ResponseWithEntity()
- #NOTE(afazekas)
+ # NOTE(afazekas)
# Now the swift sometimes (delete not empty container)
# returns with non json error response, we can create new rest class
# for swift.
@@ -458,8 +458,8 @@
message = resp_body
if parse_resp:
resp_body = self._parse_resp(resp_body)
- #I'm seeing both computeFault and cloudServersFault come back.
- #Will file a bug to fix, but leave as is for now.
+ # I'm seeing both computeFault and cloudServersFault come back.
+ # Will file a bug to fix, but leave as is for now.
if 'cloudServersFault' in resp_body:
message = resp_body['cloudServersFault']['message']
elif 'computeFault' in resp_body:
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index de2bf43..2cbb74d 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -24,7 +24,7 @@
class RemoteClient():
- #Note(afazekas): It should always get an address instead of server
+ # NOTE(afazekas): It should always get an address instead of server
def __init__(self, server, username, password=None, pkey=None):
ssh_timeout = TempestConfig().compute.ssh_timeout
network = TempestConfig().compute.network_for_ssh
diff --git a/tempest/config.py b/tempest/config.py
index 9b1a91e..e0ac843 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -369,6 +369,10 @@
cfg.BoolOpt('accounts_quotas_available',
default=True,
help="Set to True if the Account Quota middleware is enabled"),
+ cfg.StrOpt('operator_role',
+ default='Member',
+ help="Role to add to users created for swift tests to "
+ "enable creating containers"),
]
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index d512ae8..dad7eca 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -16,11 +16,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
import subprocess
# Default client libs
import cinderclient.client
import glanceclient
+import heatclient.client
import keystoneclient.v2_0.client
import netaddr
from neutronclient.common import exceptions as exc
@@ -48,6 +50,7 @@
NOVACLIENT_VERSION = '2'
CINDERCLIENT_VERSION = '1'
+ HEATCLIENT_VERSION = '1'
def __init__(self, username, password, tenant_name):
super(OfficialClientManager, self).__init__()
@@ -62,6 +65,10 @@
self.volume_client = self._get_volume_client(username,
password,
tenant_name)
+ self.orchestration_client = self._get_orchestration_client(
+ username,
+ password,
+ tenant_name)
def _get_compute_client(self, username, password, tenant_name):
# Novaclient will not execute operations for anyone but the
@@ -98,6 +105,32 @@
tenant_name,
auth_url)
+ def _get_orchestration_client(self, username=None, password=None,
+ tenant_name=None):
+ if not username:
+ username = self.config.identity.admin_username
+ if not password:
+ password = self.config.identity.admin_password
+ if not tenant_name:
+ tenant_name = self.config.identity.tenant_name
+
+ self._validate_credentials(username, password, tenant_name)
+
+ keystone = self._get_identity_client(username, password, tenant_name)
+ token = keystone.auth_token
+ try:
+ endpoint = keystone.service_catalog.url_for(
+ service_type='orchestration',
+ endpoint_type='publicURL')
+ except keystoneclient.exceptions.EndpointNotFound:
+ return None
+ else:
+ return heatclient.client.Client(self.HEATCLIENT_VERSION,
+ endpoint,
+ token=token,
+ username=username,
+ password=password)
+
def _get_identity_client(self, username, password, tenant_name):
# This identity client is not intended to check the security
# of the identity service, so use admin credentials by default.
@@ -153,13 +186,8 @@
super(OfficialClientTest, cls).setUpClass()
cls.isolated_creds = isolated_creds.IsolatedCreds(
__name__, tempest_client=False)
- if cls.config.compute.allow_tenant_isolation:
- creds = cls.isolated_creds.get_primary_creds()
- username, tenant_name, password = creds
- else:
- username = cls.config.identity.username
- password = cls.config.identity.password
- tenant_name = cls.config.identity.tenant_name
+
+ username, tenant_name, password = cls.credentials()
cls.manager = OfficialClientManager(username, password, tenant_name)
cls.compute_client = cls.manager.compute_client
@@ -167,10 +195,21 @@
cls.identity_client = cls.manager.identity_client
cls.network_client = cls.manager.network_client
cls.volume_client = cls.manager.volume_client
+ cls.orchestration_client = cls.manager.orchestration_client
cls.resource_keys = {}
cls.os_resources = []
@classmethod
+ def credentials(cls):
+ if cls.config.compute.allow_tenant_isolation:
+ return cls.isolated_creds.get_primary_creds()
+
+ username = cls.config.identity.username
+ password = cls.config.identity.password
+ tenant_name = cls.config.identity.tenant_name
+ return username, tenant_name, password
+
+ @classmethod
def tearDownClass(cls):
# NOTE(jaypipes): Because scenario tests are typically run in a
# specific order, and because test methods in scenario tests
@@ -258,6 +297,40 @@
self.fail("Timed out waiting for thing %s to become %s"
% (thing_id, expected_status))
+ def create_loginable_secgroup_rule(self, client=None, secgroup_id=None):
+ if client is None:
+ client = self.compute_client
+ if secgroup_id is None:
+ sgs = client.security_groups.list()
+ for sg in sgs:
+ if sg.name == 'default':
+ secgroup_id = sg.id
+
+ # These rules are intended to permit inbound ssh and icmp
+ # traffic from all sources, so no group_id is provided.
+ # Setting a group_id would only permit traffic from ports
+ # belonging to the same security group.
+ rulesets = [
+ {
+ # ssh
+ 'ip_protocol': 'tcp',
+ 'from_port': 22,
+ 'to_port': 22,
+ 'cidr': '0.0.0.0/0',
+ },
+ {
+ # ping
+ 'ip_protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '0.0.0.0/0',
+ }
+ ]
+ for ruleset in rulesets:
+ sg_rule = client.security_group_rules.create(secgroup_id,
+ **ruleset)
+ self.set_resource(sg_rule.id, sg_rule)
+
def create_server(self, client, name=None, image=None, flavor=None,
create_kwargs={}):
if name is None:
@@ -283,6 +356,32 @@
LOG.debug("Created server: %s", server)
return server
+ def create_volume(self, client=None, size=1, name=None,
+ snapshot_id=None, imageRef=None):
+ if client is None:
+ client = self.volume_client
+ if name is None:
+ name = rand_name('scenario-volume-')
+ LOG.debug("Creating a volume (size :%s, name: %s)", size, name)
+ volume = client.volumes.create(size=size, display_name=name,
+ snapshot_id=snapshot_id,
+ imageRef=imageRef)
+ self.set_resource(name, volume)
+ self.assertEqual(name, volume.display_name)
+ self.status_timeout(client.volumes, volume.id, 'available')
+ LOG.debug("Created volume: %s", volume)
+ return volume
+
+ def create_keypair(self, client=None, name=None):
+ if client is None:
+ client = self.compute_client
+ if name is None:
+ name = rand_name('scenario-keypair-')
+ keypair = client.keypairs.create(name)
+ self.assertEqual(keypair.name, name)
+ self.set_resource(name, keypair)
+ return keypair
+
class NetworkScenarioTest(OfficialClientTest):
"""
@@ -293,7 +392,7 @@
def check_preconditions(cls):
if (cls.config.service_available.neutron):
cls.enabled = True
- #verify that neutron_available is telling the truth
+ # verify that neutron_available is telling the truth
try:
cls.network_client.list_networks()
except exc.EndpointNotFound:
@@ -312,16 +411,6 @@
cls.config.identity.password,
cls.config.identity.tenant_name).tenant_id
- def _create_keypair(self, client, namestart='keypair-smoke-'):
- kp_name = rand_name(namestart)
- keypair = client.keypairs.create(kp_name)
- try:
- self.assertEqual(keypair.id, kp_name)
- self.set_resource(kp_name, keypair)
- except AttributeError:
- self.fail("Keypair object not successfully created.")
- return keypair
-
def _create_security_group(self, client, namestart='secgroup-smoke-'):
# Create security group
sg_name = rand_name(namestart)
@@ -335,32 +424,7 @@
self.fail("SecurityGroup object not successfully created.")
# Add rules to the security group
-
- # These rules are intended to permit inbound ssh and icmp
- # traffic from all sources, so no group_id is provided.
- # Setting a group_id would only permit traffic from ports
- # belonging to the same security group.
- rulesets = [
- {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- },
- {
- # ping
- 'ip_protocol': 'icmp',
- 'from_port': -1,
- 'to_port': -1,
- 'cidr': '0.0.0.0/0',
- }
- ]
- for ruleset in rulesets:
- try:
- client.security_group_rules.create(secgroup.id, **ruleset)
- except Exception:
- self.fail("Failed to create rule in security group.")
+ self.create_loginable_secgroup_rule(client, secgroup.id)
return secgroup
@@ -489,3 +553,30 @@
timeout=self.config.compute.ssh_timeout),
'Auth failure in connecting to %s@%s via ssh' %
(username, ip_address))
+
+
+class OrchestrationScenarioTest(OfficialClientTest):
+ """
+ Base class for orchestration scenario tests
+ """
+
+ @classmethod
+ def credentials(cls):
+ username = cls.config.identity.admin_username
+ password = cls.config.identity.admin_password
+ tenant_name = cls.config.identity.tenant_name
+ return username, tenant_name, password
+
+ def _load_template(self, base_file, file_name):
+ filepath = os.path.join(os.path.dirname(os.path.realpath(base_file)),
+ file_name)
+ with open(filepath) as f:
+ return f.read()
+
+ @classmethod
+ def _stack_rand_name(cls):
+ return rand_name(cls.__name__ + '-')
+
+ def _create_keypair(self):
+ kp_name = rand_name('keypair-smoke')
+ return self.compute_client.keypairs.create(kp_name)
diff --git a/tempest/scenario/orchestration/__init__.py b/tempest/scenario/orchestration/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/scenario/orchestration/__init__.py
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
new file mode 100644
index 0000000..cd959a8
--- /dev/null
+++ b/tempest/scenario/orchestration/test_autoscaling.py
@@ -0,0 +1,108 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+from tempest.test import attr
+from tempest.test import call_until_true
+import time
+
+
+LOG = logging.getLogger(__name__)
+
+
+class AutoScalingTest(manager.OrchestrationScenarioTest):
+
+ def setUp(self):
+ super(AutoScalingTest, self).setUp()
+ if not self.config.orchestration.image_ref:
+ raise self.skipException("No image available to test")
+ self.client = self.orchestration_client
+
+ def assign_keypair(self):
+ self.stack_name = self._stack_rand_name()
+ if self.config.orchestration.keypair_name:
+ self.keypair_name = self.config.orchestration.keypair_name
+ else:
+ self.keypair = self._create_keypair()
+ self.keypair_name = self.keypair.id
+ self.set_resource('keypair', self.keypair)
+
+ def launch_stack(self):
+ self.parameters = {
+ 'KeyName': self.keypair_name,
+ 'InstanceType': self.config.orchestration.instance_type,
+ 'ImageId': self.config.orchestration.image_ref,
+ 'StackStart': str(time.time())
+ }
+
+ # create the stack
+ self.template = self._load_template(__file__, 'test_autoscaling.yaml')
+ self.client.stacks.create(
+ stack_name=self.stack_name,
+ template=self.template,
+ parameters=self.parameters)
+
+ self.stack = self.client.stacks.get(self.stack_name)
+ self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id)
+
+ # if a keypair was set, do not delete the stack on exit to allow
+ # for manual post-mortums
+ if not self.config.orchestration.keypair_name:
+ self.set_resource('stack', self.stack)
+
+ @attr(type='slow')
+ def test_scale_up_then_down(self):
+
+ self.assign_keypair()
+ self.launch_stack()
+
+ sid = self.stack_identifier
+ timeout = self.config.orchestration.build_timeout
+ interval = 10
+
+ self.assertEqual('CREATE', self.stack.action)
+ # wait for create to complete.
+ self.status_timeout(self.client.stacks, sid, 'COMPLETE')
+
+ self.stack.get()
+ self.assertEqual('CREATE_COMPLETE', self.stack.stack_status)
+
+ # the resource SmokeServerGroup is implemented as a nested
+ # stack, so servers can be counted by counting the resources
+ # inside that nested stack
+ resource = self.client.resources.get(sid, 'SmokeServerGroup')
+ nested_stack_id = resource.physical_resource_id
+
+ def server_count():
+ # the number of servers is the number of resources
+ # in the nexted stack
+ self.server_count = len(
+ self.client.resources.list(nested_stack_id))
+ return self.server_count
+
+ def assertScale(from_servers, to_servers):
+ call_until_true(lambda: server_count() == to_servers,
+ timeout, interval)
+ self.assertEqual(to_servers, self.server_count,
+ 'Failed scaling from %d to %d servers' % (
+ from_servers, to_servers))
+
+ # he marched them up to the top of the hill
+ assertScale(1, 2)
+ assertScale(2, 3)
+
+ # and he marched them down again
+ assertScale(3, 2)
+ assertScale(2, 1)
diff --git a/tempest/scenario/orchestration/test_autoscaling.yaml b/tempest/scenario/orchestration/test_autoscaling.yaml
new file mode 100644
index 0000000..045b3bc
--- /dev/null
+++ b/tempest/scenario/orchestration/test_autoscaling.yaml
@@ -0,0 +1,182 @@
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+ Template which tests autoscaling and load balancing
+Parameters:
+ KeyName:
+ Type: String
+ InstanceType:
+ Type: String
+ ImageId:
+ Type: String
+ StackStart:
+ Description: Epoch seconds when the stack was launched
+ Type: Number
+ ConsumeStartSeconds:
+ Description: Seconds after invocation when memory should be consumed
+ Type: Number
+ Default: '60'
+ ConsumeStopSeconds:
+ Description: Seconds after StackStart when memory should be released
+ Type: Number
+ Default: '420'
+ ScaleUpThreshold:
+ Description: Memory percentage threshold to scale up on
+ Type: Number
+ Default: '70'
+ ScaleDownThreshold:
+ Description: Memory percentage threshold to scale down on
+ Type: Number
+ Default: '60'
+ ConsumeMemoryLimit:
+ Description: Memory percentage threshold to consume
+ Type: Number
+ Default: '71'
+Resources:
+ SmokeServerGroup:
+ Type: AWS::AutoScaling::AutoScalingGroup
+ Properties:
+ AvailabilityZones: {'Fn::GetAZs': ''}
+ LaunchConfigurationName: {Ref: LaunchConfig}
+ MinSize: '1'
+ MaxSize: '3'
+ SmokeServerScaleUpPolicy:
+ Type: AWS::AutoScaling::ScalingPolicy
+ Properties:
+ AdjustmentType: ChangeInCapacity
+ AutoScalingGroupName: {Ref: SmokeServerGroup}
+ Cooldown: '60'
+ ScalingAdjustment: '1'
+ SmokeServerScaleDownPolicy:
+ Type: AWS::AutoScaling::ScalingPolicy
+ Properties:
+ AdjustmentType: ChangeInCapacity
+ AutoScalingGroupName: {Ref: SmokeServerGroup}
+ Cooldown: '60'
+ ScalingAdjustment: '-1'
+ MEMAlarmHigh:
+ Type: AWS::CloudWatch::Alarm
+ Properties:
+ AlarmDescription: Scale-up if MEM > ScaleUpThreshold% for 10 seconds
+ MetricName: MemoryUtilization
+ Namespace: system/linux
+ Statistic: Average
+ Period: '10'
+ EvaluationPeriods: '1'
+ Threshold: {Ref: ScaleUpThreshold}
+ AlarmActions: [{Ref: SmokeServerScaleUpPolicy}]
+ Dimensions:
+ - Name: AutoScalingGroupName
+ Value: {Ref: SmokeServerGroup}
+ ComparisonOperator: GreaterThanThreshold
+ MEMAlarmLow:
+ Type: AWS::CloudWatch::Alarm
+ Properties:
+ AlarmDescription: Scale-down if MEM < ScaleDownThreshold% for 10 seconds
+ MetricName: MemoryUtilization
+ Namespace: system/linux
+ Statistic: Average
+ Period: '10'
+ EvaluationPeriods: '1'
+ Threshold: {Ref: ScaleDownThreshold}
+ AlarmActions: [{Ref: SmokeServerScaleDownPolicy}]
+ Dimensions:
+ - Name: AutoScalingGroupName
+ Value: {Ref: SmokeServerGroup}
+ ComparisonOperator: LessThanThreshold
+ CfnUser:
+ Type: AWS::IAM::User
+ SmokeKeys:
+ Type: AWS::IAM::AccessKey
+ Properties:
+ UserName: {Ref: CfnUser}
+ SmokeSecurityGroup:
+ Type: AWS::EC2::SecurityGroup
+ Properties:
+ GroupDescription: Standard firewall rules
+ SecurityGroupIngress:
+ - {IpProtocol: tcp, FromPort: '22', ToPort: '22', CidrIp: 0.0.0.0/0}
+ - {IpProtocol: tcp, FromPort: '80', ToPort: '80', CidrIp: 0.0.0.0/0}
+ LaunchConfig:
+ Type: AWS::AutoScaling::LaunchConfiguration
+ Metadata:
+ AWS::CloudFormation::Init:
+ config:
+ files:
+ /etc/cfn/cfn-credentials:
+ content:
+ Fn::Replace:
+ - $AWSAccessKeyId: {Ref: SmokeKeys}
+ $AWSSecretKey: {'Fn::GetAtt': [SmokeKeys, SecretAccessKey]}
+ - |
+ AWSAccessKeyId=$AWSAccessKeyId
+ AWSSecretKey=$AWSSecretKey
+ mode: '000400'
+ owner: root
+ group: root
+ /root/watch_loop:
+ content:
+ Fn::Replace:
+ - _hi_: {Ref: MEMAlarmHigh}
+ _lo_: {Ref: MEMAlarmLow}
+ - |
+ #!/bin/bash
+ while :
+ do
+ /opt/aws/bin/cfn-push-stats --watch _hi_ --mem-util
+ /opt/aws/bin/cfn-push-stats --watch _lo_ --mem-util
+ sleep 4
+ done
+ mode: '000700'
+ owner: root
+ group: root
+ /root/consume_memory:
+ content:
+ Fn::Replace:
+ - StackStart: {Ref: StackStart}
+ ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
+ ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
+ ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
+ - |
+ #!/usr/bin/env python
+ import psutil
+ import time
+ import datetime
+ import sys
+ a = []
+ sleep_until_consume = ConsumeStartSeconds
+ stack_start = StackStart
+ consume_stop_time = stack_start + ConsumeStopSeconds
+ memory_limit = ConsumeMemoryLimit
+ if sleep_until_consume > 0:
+ sys.stdout.flush()
+ time.sleep(sleep_until_consume)
+ while psutil.virtual_memory().percent < memory_limit:
+ sys.stdout.flush()
+ a.append(' ' * 10**5)
+ time.sleep(0.1)
+ sleep_until_exit = consume_stop_time - time.time()
+ if sleep_until_exit > 0:
+ time.sleep(sleep_until_exit)
+ mode: '000700'
+ owner: root
+ group: root
+ Properties:
+ ImageId: {Ref: ImageId}
+ InstanceType: {Ref: InstanceType}
+ KeyName: {Ref: KeyName}
+ SecurityGroups: [{Ref: SmokeSecurityGroup}]
+ UserData:
+ Fn::Base64:
+ Fn::Replace:
+ - ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
+ ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
+ ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
+ - |
+ #!/bin/bash -v
+ /opt/aws/bin/cfn-init
+ # report on memory consumption every 4 seconds
+ /root/watch_loop &
+ # wait ConsumeStartSeconds then ramp up memory consumption
+ # until it is over ConsumeMemoryLimit%
+ # then exits ConsumeStopSeconds seconds after stack launch
+ /root/consume_memory > /root/consume_memory.log &
\ No newline at end of file
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index b789fa2..25735e9 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -83,11 +83,7 @@
properties=properties)
def nova_keypair_add(self):
- name = rand_name('scenario-keypair-')
-
- self.keypair = self.compute_client.keypairs.create(name=name)
- self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
- self.assertEqual(name, self.keypair.name)
+ self.keypair = self.create_keypair()
def nova_boot(self):
create_kwargs = {'key_name': self.keypair.name}
@@ -106,15 +102,7 @@
self.assertEqual(self.server, got_server)
def cinder_create(self):
- name = rand_name('scenario-volume-')
- LOG.debug("volume display-name:%s" % name)
- self.volume = self.volume_client.volumes.create(size=1,
- display_name=name)
- LOG.debug("volume created:%s" % self.volume.display_name)
- self._wait_for_volume_status('available')
-
- self.addCleanup(self.volume_client.volumes.delete, self.volume)
- self.assertEqual(name, self.volume.display_name)
+ self.volume = self.create_volume()
def cinder_list(self):
volumes = self.volume_client.volumes.list()
@@ -143,25 +131,6 @@
def nova_floating_ip_add(self):
self.server.add_floating_ip(self.floating_ip)
- def nova_security_group_rule_create(self):
- sgs = self.compute_client.security_groups.list()
- for sg in sgs:
- if sg.name == 'default':
- secgroup = sg
-
- ruleset = {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- 'group_id': None
- }
- sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
- **ruleset)
- self.addCleanup(self.compute_client.security_group_rules.delete,
- sg_rule.id)
-
def ssh_to_server(self):
username = self.config.scenario.ssh_user
self.linux_client = RemoteClient(self.floating_ip.ip,
@@ -195,7 +164,7 @@
self.nova_floating_ip_create()
self.nova_floating_ip_add()
- self.nova_security_group_rule_create()
+ self.create_loginable_secgroup_rule()
self.ssh_to_server()
self.check_partitions()
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 99b0071..70939f6 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -16,8 +16,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testtools
-
from tempest.api.network import common as net_common
from tempest.common.utils.data_utils import rand_name
from tempest import config
@@ -43,7 +41,7 @@
ssh server hosted at the IP address. This check guarantees
that the IP address is associated with the target VM.
- #TODO(mnewby) - Need to implement the following:
+ # TODO(mnewby) - Need to implement the following:
- the Tempest host can ssh into the VM via the IP address and
successfully execute the following:
@@ -162,8 +160,8 @@
@attr(type='smoke')
def test_001_create_keypairs(self):
- self.keypairs[self.tenant_id] = self._create_keypair(
- self.compute_client)
+ self.keypairs[self.tenant_id] = self.create_keypair(
+ name=rand_name('keypair-smoke-'))
@attr(type='smoke')
def test_002_create_security_groups(self):
@@ -182,8 +180,8 @@
@attr(type='smoke')
def test_004_check_networks(self):
- #Checks that we see the newly created network/subnet/router via
- #checking the result of list_[networks,routers,subnets]
+ # Checks that we see the newly created network/subnet/router via
+ # checking the result of list_[networks,routers,subnets]
seen_nets = self._list_networks()
seen_names = [n['name'] for n in seen_nets]
seen_ids = [n['id'] for n in seen_nets]
@@ -254,8 +252,6 @@
self.floating_ips[server].append(floating_ip)
@attr(type='smoke')
- @testtools.skipIf(CONF.service_available.neutron,
- "Skipped unti bug #1210664 is resolved")
def test_008_check_public_network_connectivity(self):
if not self.floating_ips:
raise self.skipTest('No floating ips have been allocated.')
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 43ac2d9..2903687 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -36,14 +36,8 @@
* Terminate the instance
"""
- def create_keypair(self):
- kp_name = rand_name('keypair-smoke')
- self.keypair = self.compute_client.keypairs.create(kp_name)
- try:
- self.assertEqual(self.keypair.id, kp_name)
- self.set_resource('keypair', self.keypair)
- except AttributeError:
- self.fail("Keypair object not successfully created.")
+ def add_keypair(self):
+ self.keypair = self.create_keypair()
def create_security_group(self):
sg_name = rand_name('secgroup-smoke')
@@ -58,32 +52,11 @@
self.fail("SecurityGroup object not successfully created.")
# Add rules to the security group
- rulesets = [
- {
- 'ip_protocol': 'tcp',
- 'from_port': 1,
- 'to_port': 65535,
- 'cidr': '0.0.0.0/0',
- 'group_id': self.secgroup.id
- },
- {
- 'ip_protocol': 'icmp',
- 'from_port': -1,
- 'to_port': -1,
- 'cidr': '0.0.0.0/0',
- 'group_id': self.secgroup.id
- }
- ]
- for ruleset in rulesets:
- try:
- self.compute_client.security_group_rules.create(
- self.secgroup.id, **ruleset)
- except Exception:
- self.fail("Failed to create rule in security group.")
+ self.create_loginable_secgroup_rule(secgroup_id=self.secgroup.id)
def boot_instance(self):
create_kwargs = {
- 'key_name': self.get_resource('keypair').id
+ 'key_name': self.keypair.id
}
instance = self.create_server(self.compute_client,
create_kwargs=create_kwargs)
@@ -131,7 +104,7 @@
self.remove_resource('instance')
def test_server_basicops(self):
- self.create_keypair()
+ self.add_keypair()
self.create_security_group()
self.boot_instance()
self.pause_server()
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index e8ce1bd..1e090af 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -51,29 +51,7 @@
create_kwargs=create_kwargs)
def _add_keypair(self):
- name = rand_name('scenario-keypair-')
- self.keypair = self.compute_client.keypairs.create(name=name)
- self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
- self.assertEqual(name, self.keypair.name)
-
- def _create_security_group_rule(self):
- sgs = self.compute_client.security_groups.list()
- for sg in sgs:
- if sg.name == 'default':
- secgroup = sg
-
- ruleset = {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- 'group_id': None
- }
- sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
- **ruleset)
- self.addCleanup(self.compute_client.security_group_rules.delete,
- sg_rule.id)
+ self.keypair = self.create_keypair()
def _ssh_to_server(self, server_or_ip):
if isinstance(server_or_ip, basestring):
@@ -120,7 +98,7 @@
def test_snapshot_pattern(self):
# prepare for booting a instance
self._add_keypair()
- self._create_security_group_rule()
+ self.create_loginable_secgroup_rule()
# boot a instance and create a timestamp file in it
server = self._boot_image(self.config.compute.image_ref)
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 038d251..8864b2f 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -71,10 +71,7 @@
create_kwargs=create_kwargs)
def _add_keypair(self):
- name = rand_name('scenario-keypair-')
- self.keypair = self.compute_client.keypairs.create(name=name)
- self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
- self.assertEqual(name, self.keypair.name)
+ self.keypair = self.create_keypair()
def _create_floating_ip(self):
floating_ip = self.compute_client.floating_ips.create()
@@ -84,25 +81,6 @@
def _add_floating_ip(self, server, floating_ip):
server.add_floating_ip(floating_ip)
- def _create_security_group_rule(self):
- sgs = self.compute_client.security_groups.list()
- for sg in sgs:
- if sg.name == 'default':
- secgroup = sg
-
- ruleset = {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- 'group_id': None
- }
- sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
- **ruleset)
- self.addCleanup(self.compute_client.security_group_rules.delete,
- sg_rule.id)
-
def _remote_client_to_server(self, server_or_ip):
if isinstance(server_or_ip, basestring):
ip = server_or_ip
@@ -154,20 +132,7 @@
self.volume_client.volumes, volume.id, status)
def _create_volume(self, snapshot_id=None):
- name = rand_name('scenario-volume-')
- LOG.debug("volume display-name:%s" % name)
- volume = self.volume_client.volumes.create(size=1,
- display_name=name,
- snapshot_id=snapshot_id)
- LOG.debug("volume created:%s" % volume.display_name)
-
- def cleaner():
- self._wait_for_volume_status(volume, 'available')
- self.volume_client.volumes.delete(volume)
- self.addCleanup(cleaner)
- self._wait_for_volume_status(volume, 'available')
- self.assertEqual(name, volume.display_name)
- return volume
+ return self.create_volume(snapshot_id=snapshot_id)
def _attach_volume(self, server, volume):
attach_volume_client = self.compute_client.volumes.create_server_volume
@@ -214,7 +179,7 @@
def test_stamp_pattern(self):
# prepare for booting a instance
self._add_keypair()
- self._create_security_group_rule()
+ self.create_loginable_secgroup_rule()
# boot an instance and create a timestamp file in it
volume = self._create_volume()
diff --git a/tempest/scenario/test_volume_snapshot_pattern.py b/tempest/scenario/test_volume_snapshot_pattern.py
index 95a30ed..8fa177e 100644
--- a/tempest/scenario/test_volume_snapshot_pattern.py
+++ b/tempest/scenario/test_volume_snapshot_pattern.py
@@ -34,14 +34,7 @@
def _create_volume_from_image(self):
img_uuid = self.config.compute.image_ref
vol_name = rand_name('volume-origin')
- vol = self.volume_client.volumes.create(size=1,
- display_name=vol_name,
- imageRef=img_uuid)
- self.set_resource(vol.id, vol)
- self.status_timeout(self.volume_client.volumes,
- vol.id,
- 'available')
- return vol
+ return self.create_volume(name=vol_name, imageRef=img_uuid)
def _boot_instance_from_volume(self, vol_id):
# NOTE(gfidente): the syntax for block_device_mapping is
@@ -71,14 +64,7 @@
def _create_volume_from_snapshot(self, snap_id):
vol_name = rand_name('volume')
- vol = self.volume_client.volumes.create(size=1,
- display_name=vol_name,
- snapshot_id=snap_id)
- self.set_resource(vol.id, vol)
- self.status_timeout(self.volume_client.volumes,
- vol.id,
- 'available')
- return vol
+ return self.create_volume(name=vol_name, snapshot_id=snap_id)
def _stop_instances(self, instances):
# NOTE(gfidente): two loops so we do not wait for the status twice
diff --git a/tempest/services/compute/xml/flavors_client.py b/tempest/services/compute/xml/flavors_client.py
index 3a8986c..6fbb9e3 100644
--- a/tempest/services/compute/xml/flavors_client.py
+++ b/tempest/services/compute/xml/flavors_client.py
@@ -30,8 +30,6 @@
"http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
XMLNS_OS_FLV_ACCESS = \
"http://docs.openstack.org/compute/ext/flavor_access/api/v1.1"
-XMLNS_OS_FLV_WITH_EXT_SPECS = \
- "http://docs.openstack.org/compute/ext/flavor_with_extra_specs/api/v2.0"
class FlavorsClientXML(RestClientXML):
@@ -51,7 +49,7 @@
if k == '{%s}ephemeral' % XMLNS_OS_FLV_EXT_DATA:
k = 'OS-FLV-EXT-DATA:ephemeral'
- if k == '{%s}extra_specs' % XMLNS_OS_FLV_WITH_EXT_SPECS:
+ if k == 'extra_specs':
k = 'OS-FLV-WITH-EXT-SPECS:extra_specs'
flavor[k] = dict(v)
continue
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 12e7034..5c7a629 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -350,7 +350,7 @@
addrs = []
for child in node.getchildren():
addrs.append({'version': int(child.get('version')),
- 'addr': child.get('version')})
+ 'addr': child.get('addr')})
return {node.get('id'): addrs}
def list_addresses(self, server_id):
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index b19ed8d..588dc8f 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -17,7 +17,7 @@
from tempest.common.rest_client import RestClient
-class NetworkClient(RestClient):
+class NetworkClientJSON(RestClient):
"""
Tempest REST client for Neutron. Uses v2 of the Neutron API, since the
@@ -33,8 +33,8 @@
"""
def __init__(self, config, username, password, auth_url, tenant_name=None):
- super(NetworkClient, self).__init__(config, username, password,
- auth_url, tenant_name)
+ super(NetworkClientJSON, self).__init__(config, username, password,
+ auth_url, tenant_name)
self.service = self.config.network.catalog_type
self.version = '2.0'
self.uri_prefix = "v%s" % (self.version)
@@ -108,15 +108,14 @@
body = json.loads(body)
return resp, body
- def create_port(self, network_id, state=None):
- if not state:
- state = True
+ def create_port(self, network_id, **kwargs):
post_body = {
'port': {
'network_id': network_id,
- 'admin_state_up': state,
}
}
+ for key, val in kwargs.items():
+ post_body['port'][key] = val
body = json.dumps(post_body)
uri = '%s/ports' % (self.uri_prefix)
resp, body = self.post(uri, headers=self.headers, body=body)
@@ -244,7 +243,7 @@
'admin_state_up', body['router']['admin_state_up'])
# Must uncomment/modify these lines once LP question#233187 is solved
#update_body['external_gateway_info'] = kwargs.get(
- # 'external_gateway_info', body['router']['external_gateway_info'])
+ # 'external_gateway_info', body['router']['external_gateway_info'])
update_body = dict(router=update_body)
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body, self.headers)
diff --git a/tempest/services/network/xml/__init__.py b/tempest/services/network/xml/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/services/network/xml/__init__.py
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
new file mode 100755
index 0000000..d4fb656
--- /dev/null
+++ b/tempest/services/network/xml/network_client.py
@@ -0,0 +1,172 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import xml.etree.ElementTree as ET
+
+from tempest.common.rest_client import RestClientXML
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import xml_to_json
+
+
+class NetworkClientXML(RestClientXML):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(NetworkClientXML, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.network.catalog_type
+ self.version = '2.0'
+ self.uri_prefix = "v%s" % (self.version)
+
+ def list_networks(self):
+ uri = '%s/networks' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ networks = self._parse_array(etree.fromstring(body))
+ networks = {"networks": networks}
+ return resp, networks
+
+ def create_network(self, name):
+ uri = '%s/networks' % (self.uri_prefix)
+ post_body = Element("network")
+ p2 = Element("name", name)
+ post_body.append(p2)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def create_bulk_network(self, count, names):
+ uri = '%s/networks' % (self.uri_prefix)
+ post_body = Element("networks")
+ for i in range(count):
+ p1 = Element("network")
+ p2 = Element("name", names[i])
+ p1.append(p2)
+ post_body.append(p1)
+ resp, body = self.post(uri, str(Document(post_body)), self.headers)
+ networks = self._parse_array(etree.fromstring(body))
+ networks = {"networks": networks}
+ return resp, networks
+
+ def delete_network(self, uuid):
+ uri = '%s/networks/%s' % (self.uri_prefix, str(uuid))
+ return self.delete(uri, self.headers)
+
+ def show_network(self, uuid):
+ uri = '%s/networks/%s' % (self.uri_prefix, str(uuid))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def create_subnet(self, net_uuid, cidr):
+ uri = '%s/subnets' % (self.uri_prefix)
+ subnet = Element("subnet")
+ p2 = Element("network_id", net_uuid)
+ p3 = Element("cidr", cidr)
+ p4 = Element("ip_version", 4)
+ subnet.append(p2)
+ subnet.append(p3)
+ subnet.append(p4)
+ resp, body = self.post(uri, str(Document(subnet)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def delete_subnet(self, subnet_id):
+ uri = '%s/subnets/%s' % (self.uri_prefix, str(subnet_id))
+ return self.delete(uri, self.headers)
+
+ def list_subnets(self):
+ uri = '%s/subnets' % (self.uri_prefix)
+ resp, body = self.get(uri, self.headers)
+ subnets = self._parse_array(etree.fromstring(body))
+ subnets = {"subnets": subnets}
+ return resp, subnets
+
+ def show_subnet(self, uuid):
+ uri = '%s/subnets/%s' % (self.uri_prefix, str(uuid))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def create_port(self, net_uuid, **kwargs):
+ uri = '%s/ports' % (self.uri_prefix)
+ port = Element("port")
+ p1 = Element('network_id', net_uuid)
+ port.append(p1)
+ for key, val in kwargs.items():
+ key = Element(key, val)
+ port.append(key)
+ resp, body = self.post(uri, str(Document(port)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def delete_port(self, port_id):
+ uri = '%s/ports/%s' % (self.uri_prefix, str(port_id))
+ return self.delete(uri, self.headers)
+
+ def _parse_array(self, node):
+ array = []
+ for child in node.getchildren():
+ array.append(xml_to_json(child))
+ return array
+
+ def list_ports(self):
+ url = '%s/ports' % (self.uri_prefix)
+ resp, body = self.get(url, self.headers)
+ ports = self._parse_array(etree.fromstring(body))
+ ports = {"ports": ports}
+ return resp, ports
+
+ def show_port(self, port_id):
+ uri = '%s/ports/%s' % (self.uri_prefix, str(port_id))
+ resp, body = self.get(uri, self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_port(self, port_id, name):
+ uri = '%s/ports/%s' % (self.uri_prefix, str(port_id))
+ port = Element("port")
+ p2 = Element("name", name)
+ port.append(p2)
+ resp, body = self.put(uri, str(Document(port)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_subnet(self, subnet_id, name):
+ uri = '%s/subnets/%s' % (self.uri_prefix, str(subnet_id))
+ subnet = Element("subnet")
+ p2 = Element("name", name)
+ subnet.append(p2)
+ resp, body = self.put(uri, str(Document(subnet)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def update_network(self, net_id, name):
+ uri = '%s/networks/%s' % (self.uri_prefix, str(net_id))
+ network = Element("network")
+ p2 = Element("name", name)
+ network.append(p2)
+ resp, body = self.put(uri, str(Document(network)), self.headers)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+
+def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
+ body = ET.fromstring(xml_returned_body)
+ root_tag = body.tag
+ if root_tag.startswith("{"):
+ ns, root_tag = root_tag.split("}", 1)
+ body = xml_to_json(etree.fromstring(xml_returned_body))
+ body = {root_tag: body}
+ return body
diff --git a/tempest/stress/actions/unit_test.py b/tempest/stress/actions/unit_test.py
new file mode 100644
index 0000000..95cc1bc
--- /dev/null
+++ b/tempest/stress/actions/unit_test.py
@@ -0,0 +1,79 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.openstack.common import importutils
+import tempest.stress.stressaction as stressaction
+
+
+class SetUpClassRunTime(object):
+
+ process = 'process'
+ action = 'action'
+ application = 'application'
+
+ allowed = set((process, action, application))
+
+ @classmethod
+ def validate(cls, name):
+ if name not in cls.allowed:
+ raise KeyError("\'%s\' not a valid option" % name)
+
+
+class UnitTest(stressaction.StressAction):
+ """This is a special action for running existing unittests as stress test.
+ You need to pass ``test_method`` and ``class_setup_per``
+ using ``kwargs`` in the JSON descriptor;
+ ``test_method`` should be the fully qualified name of a unittest,
+ ``class_setup_per`` should be one from:
+ ``application``: once in the stress job lifetime
+ ``process``: once in the worker process lifetime
+ ``action``: on each action
+ Not all combination working in every case.
+ """
+
+ def setUp(self, **kwargs):
+ method = kwargs['test_method'].split('.')
+ self.test_method = method.pop()
+ self.klass = importutils.import_class('.'.join(method))
+ # valid options are 'process', 'application' , 'action'
+ self.class_setup_per = kwargs.get('class_setup_per',
+ SetUpClassRunTime.process)
+ SetUpClassRunTime.validate(self.class_setup_per)
+
+ if self.class_setup_per == SetUpClassRunTime.application:
+ self.klass.setUpClass()
+ self.setupclass_called = False
+
+ def run_core(self):
+ res = self.klass(self.test_method).run()
+ if res.errors:
+ raise RuntimeError(res.errors)
+
+ def run(self):
+ if self.class_setup_per != SetUpClassRunTime.application:
+ if (self.class_setup_per == SetUpClassRunTime.action
+ or self.setupclass_called is False):
+ self.klass.setUpClass()
+ self.setupclass_called = True
+
+ self.run_core()
+
+ if (self.class_setup_per == SetUpClassRunTime.action):
+ self.klass.tearDownClass()
+ else:
+ self.run_core()
+
+ def tearDown(self):
+ if self.class_setup_per != SetUpClassRunTime.action:
+ self.klass.tearDownClass()
diff --git a/tempest/stress/etc/sample-unit-test.json b/tempest/stress/etc/sample-unit-test.json
new file mode 100644
index 0000000..b388bfe
--- /dev/null
+++ b/tempest/stress/etc/sample-unit-test.json
@@ -0,0 +1,8 @@
+[{"action": "tempest.stress.actions.unit_test.UnitTest",
+ "threads": 8,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {"test_method": "tempest.cli.simple_read_only.test_glance.SimpleReadOnlyGlanceClientTest.test_glance_fake_action",
+ "class_setup_per": "process"}
+ }
+]
diff --git a/tempest/whitebox/manager.py b/tempest/whitebox/manager.py
index b2632f1..3b1b107 100644
--- a/tempest/whitebox/manager.py
+++ b/tempest/whitebox/manager.py
@@ -72,7 +72,7 @@
cls.flavor_ref = cls.config.compute.flavor_ref
cls.flavor_ref_alt = cls.config.compute.flavor_ref_alt
- #NOTE(afazekas): Mimics the helper method used in the api tests
+ # NOTE(afazekas): Mimics the helper method used in the api tests
@classmethod
def create_server(cls, **kwargs):
flavor_ref = cls.config.compute.flavor_ref
@@ -127,7 +127,7 @@
cmd = shlex.split(cmd)
result = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- #Todo(rohitk): Need to define host connection parameters in config
+ # TODO(rohitk): Need to define host connection parameters in config
else:
client = self.get_ssh_connection(self.config.whitebox.api_host,
self.config.whitebox.api_user,
diff --git a/tempest/whitebox/test_servers_whitebox.py b/tempest/whitebox/test_servers_whitebox.py
index 1c1cdeb..abe903c 100644
--- a/tempest/whitebox/test_servers_whitebox.py
+++ b/tempest/whitebox/test_servers_whitebox.py
@@ -26,7 +26,7 @@
@classmethod
def setUpClass(cls):
super(ServersWhiteboxTest, cls).setUpClass()
- #NOTE(afazekas): Strange relationship
+ # NOTE(afazekas): Strange relationship
BaseIdentityAdminTest.setUpClass()
cls.client = cls.servers_client
cls.img_client = cls.images_client