Initial copy of api tests from tempest
This change is the result of running
tools/copy_api_tests_from_tempest.sh.
Change-Id: Ica02dbe1ed26f1bc9526ea9682756ebc5877cf4a
diff --git a/neutron/tests/tempest/common/__init__.py b/neutron/tests/tempest/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron/tests/tempest/common/__init__.py
diff --git a/neutron/tests/tempest/common/accounts.py b/neutron/tests/tempest/common/accounts.py
new file mode 100644
index 0000000..2cb8cec
--- /dev/null
+++ b/neutron/tests/tempest/common/accounts.py
@@ -0,0 +1,350 @@
+# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import hashlib
+import os
+
+import yaml
+
+from neutron.tests.tempest.common import cred_provider
+from neutron.tests.tempest import config
+from neutron.tests.tempest import exceptions
+from oslo_concurrency import lockutils
+from neutron.openstack.common import log as logging
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+def read_accounts_yaml(path):
+ yaml_file = open(path, 'r')
+ accounts = yaml.load(yaml_file)
+ return accounts
+
+
+class Accounts(cred_provider.CredentialProvider):
+
+ def __init__(self, name):
+ super(Accounts, self).__init__(name)
+ self.name = name
+ if os.path.isfile(CONF.auth.test_accounts_file):
+ accounts = read_accounts_yaml(CONF.auth.test_accounts_file)
+ self.use_default_creds = False
+ else:
+ accounts = {}
+ self.use_default_creds = True
+ self.hash_dict = self.get_hash_dict(accounts)
+ self.accounts_dir = os.path.join(CONF.oslo_concurrency.lock_path, 'test_accounts')
+ self.isolated_creds = {}
+
+ @classmethod
+ def _append_role(cls, role, account_hash, hash_dict):
+ if role in hash_dict['roles']:
+ hash_dict['roles'][role].append(account_hash)
+ else:
+ hash_dict['roles'][role] = [account_hash]
+ return hash_dict
+
+ @classmethod
+ def get_hash_dict(cls, accounts):
+ hash_dict = {'roles': {}, 'creds': {}}
+ # Loop over the accounts read from the yaml file
+ for account in accounts:
+ roles = []
+ types = []
+ if 'roles' in account:
+ roles = account.pop('roles')
+ if 'types' in account:
+ types = account.pop('types')
+ temp_hash = hashlib.md5()
+ temp_hash.update(str(account))
+ temp_hash_key = temp_hash.hexdigest()
+ hash_dict['creds'][temp_hash_key] = account
+ for role in roles:
+ hash_dict = cls._append_role(role, temp_hash_key,
+ hash_dict)
+ # If types are set for the account append the matching role
+ # subdict with the hash
+ for type in types:
+ if type == 'admin':
+ hash_dict = cls._append_role(CONF.identity.admin_role,
+ temp_hash_key, hash_dict)
+ elif type == 'operator':
+ hash_dict = cls._append_role(
+ CONF.object_storage.operator_role, temp_hash_key,
+ hash_dict)
+ elif type == 'reseller_admin':
+ hash_dict = cls._append_role(
+ CONF.object_storage.reseller_admin_role,
+ temp_hash_key,
+ hash_dict)
+ return hash_dict
+
+ def is_multi_user(self):
+ # Default credentials is not a valid option with locking Account
+ if self.use_default_creds:
+ raise exceptions.InvalidConfiguration(
+ "Account file %s doesn't exist" % CONF.auth.test_accounts_file)
+ else:
+ return len(self.hash_dict['creds']) > 1
+
+ def is_multi_tenant(self):
+ return self.is_multi_user()
+
+ def _create_hash_file(self, hash_string):
+ path = os.path.join(os.path.join(self.accounts_dir, hash_string))
+ if not os.path.isfile(path):
+ with open(path, 'w') as fd:
+ fd.write(self.name)
+ return True
+ return False
+
+ @lockutils.synchronized('test_accounts_io', external=True)
+ def _get_free_hash(self, hashes):
+ # Cast as a list because in some edge cases a set will be passed in
+ hashes = list(hashes)
+ if not os.path.isdir(self.accounts_dir):
+ os.mkdir(self.accounts_dir)
+ # Create File from first hash (since none are in use)
+ self._create_hash_file(hashes[0])
+ return hashes[0]
+ names = []
+ for _hash in hashes:
+ res = self._create_hash_file(_hash)
+ if res:
+ return _hash
+ else:
+ path = os.path.join(os.path.join(self.accounts_dir,
+ _hash))
+ with open(path, 'r') as fd:
+ names.append(fd.read())
+ msg = ('Insufficient number of users provided. %s have allocated all '
+ 'the credentials for this allocation request' % ','.join(names))
+ raise exceptions.InvalidConfiguration(msg)
+
+ def _get_match_hash_list(self, roles=None):
+ hashes = []
+ if roles:
+ # Loop over all the creds for each role in the subdict and generate
+ # a list of cred lists for each role
+ for role in roles:
+ temp_hashes = self.hash_dict['roles'].get(role, None)
+ if not temp_hashes:
+ raise exceptions.InvalidConfiguration(
+ "No credentials with role: %s specified in the "
+ "accounts ""file" % role)
+ hashes.append(temp_hashes)
+ # Take the list of lists and do a boolean and between each list to
+ # find the creds which fall under all the specified roles
+ temp_list = set(hashes[0])
+ for hash_list in hashes[1:]:
+ temp_list = temp_list & set(hash_list)
+ hashes = temp_list
+ else:
+ hashes = self.hash_dict['creds'].keys()
+ # NOTE(mtreinish): admin is a special case because of the increased
+ # privlege set which could potentially cause issues on tests where that
+ # is not expected. So unless the admin role isn't specified do not
+ # allocate admin.
+ admin_hashes = self.hash_dict['roles'].get(CONF.identity.admin_role,
+ None)
+ if ((not roles or CONF.identity.admin_role not in roles) and
+ admin_hashes):
+ useable_hashes = [x for x in hashes if x not in admin_hashes]
+ else:
+ useable_hashes = hashes
+ return useable_hashes
+
+ def _get_creds(self, roles=None):
+ if self.use_default_creds:
+ raise exceptions.InvalidConfiguration(
+ "Account file %s doesn't exist" % CONF.auth.test_accounts_file)
+ useable_hashes = self._get_match_hash_list(roles)
+ free_hash = self._get_free_hash(useable_hashes)
+ return self.hash_dict['creds'][free_hash]
+
+ @lockutils.synchronized('test_accounts_io', external=True)
+ def remove_hash(self, hash_string):
+ hash_path = os.path.join(self.accounts_dir, hash_string)
+ if not os.path.isfile(hash_path):
+ LOG.warning('Expected an account lock file %s to remove, but '
+ 'one did not exist' % hash_path)
+ else:
+ os.remove(hash_path)
+ if not os.listdir(self.accounts_dir):
+ os.rmdir(self.accounts_dir)
+
+ def get_hash(self, creds):
+ for _hash in self.hash_dict['creds']:
+ # Comparing on the attributes that are expected in the YAML
+ if all([getattr(creds, k) == self.hash_dict['creds'][_hash][k] for
+ k in creds.get_init_attributes()]):
+ return _hash
+ raise AttributeError('Invalid credentials %s' % creds)
+
+ def remove_credentials(self, creds):
+ _hash = self.get_hash(creds)
+ self.remove_hash(_hash)
+
+ def get_primary_creds(self):
+ if self.isolated_creds.get('primary'):
+ return self.isolated_creds.get('primary')
+ creds = self._get_creds()
+ primary_credential = cred_provider.get_credentials(**creds)
+ self.isolated_creds['primary'] = primary_credential
+ return primary_credential
+
+ def get_alt_creds(self):
+ if self.isolated_creds.get('alt'):
+ return self.isolated_creds.get('alt')
+ creds = self._get_creds()
+ alt_credential = cred_provider.get_credentials(**creds)
+ self.isolated_creds['alt'] = alt_credential
+ return alt_credential
+
+ def get_creds_by_roles(self, roles, force_new=False):
+ roles = list(set(roles))
+ exist_creds = self.isolated_creds.get(str(roles), None)
+ # The force kwarg is used to allocate an additional set of creds with
+ # the same role list. The index used for the previously allocation
+ # in the isolated_creds dict will be moved.
+ if exist_creds and not force_new:
+ return exist_creds
+ elif exist_creds and force_new:
+ new_index = str(roles) + '-' + str(len(self.isolated_creds))
+ self.isolated_creds[new_index] = exist_creds
+ creds = self._get_creds(roles=roles)
+ role_credential = cred_provider.get_credentials(**creds)
+ self.isolated_creds[str(roles)] = role_credential
+ return role_credential
+
+ def clear_isolated_creds(self):
+ for creds in self.isolated_creds.values():
+ self.remove_credentials(creds)
+
+ def get_admin_creds(self):
+ return self.get_creds_by_roles([CONF.identity.admin_role])
+
+ def is_role_available(self, role):
+ if self.use_default_creds:
+ return False
+ else:
+ if self.hash_dict['roles'].get(role):
+ return True
+ return False
+
+ def admin_available(self):
+ return self.is_role_available(CONF.identity.admin_role)
+
+
+class NotLockingAccounts(Accounts):
+ """Credentials provider which always returns the first and second
+ configured accounts as primary and alt users.
+ This credential provider can be used in case of serial test execution
+ to preserve the current behaviour of the serial tempest run.
+ """
+
+ def _unique_creds(self, cred_arg=None):
+ """Verify that the configured credentials are valid and distinct """
+ if self.use_default_creds:
+ try:
+ user = self.get_primary_creds()
+ alt_user = self.get_alt_creds()
+ return getattr(user, cred_arg) != getattr(alt_user, cred_arg)
+ except exceptions.InvalidCredentials as ic:
+ msg = "At least one of the configured credentials is " \
+ "not valid: %s" % ic.message
+ raise exceptions.InvalidConfiguration(msg)
+ else:
+ # TODO(andreaf) Add a uniqueness check here
+ return len(self.hash_dict['creds']) > 1
+
+ def is_multi_user(self):
+ return self._unique_creds('username')
+
+ def is_multi_tenant(self):
+ return self._unique_creds('tenant_id')
+
+ def get_creds(self, id, roles=None):
+ try:
+ hashes = self._get_match_hash_list(roles)
+ # No need to sort the dict as within the same python process
+ # the HASH seed won't change, so subsequent calls to keys()
+ # will return the same result
+ _hash = hashes[id]
+ except IndexError:
+ msg = 'Insufficient number of users provided'
+ raise exceptions.InvalidConfiguration(msg)
+ return self.hash_dict['creds'][_hash]
+
+ def get_primary_creds(self):
+ if self.isolated_creds.get('primary'):
+ return self.isolated_creds.get('primary')
+ if not self.use_default_creds:
+ creds = self.get_creds(0)
+ primary_credential = cred_provider.get_credentials(**creds)
+ else:
+ primary_credential = cred_provider.get_configured_credentials(
+ 'user')
+ self.isolated_creds['primary'] = primary_credential
+ return primary_credential
+
+ def get_alt_creds(self):
+ if self.isolated_creds.get('alt'):
+ return self.isolated_creds.get('alt')
+ if not self.use_default_creds:
+ creds = self.get_creds(1)
+ alt_credential = cred_provider.get_credentials(**creds)
+ else:
+ alt_credential = cred_provider.get_configured_credentials(
+ 'alt_user')
+ self.isolated_creds['alt'] = alt_credential
+ return alt_credential
+
+ def clear_isolated_creds(self):
+ self.isolated_creds = {}
+
+ def get_admin_creds(self):
+ if not self.use_default_creds:
+ return self.get_creds_by_roles([CONF.identity.admin_role])
+ else:
+ creds = cred_provider.get_configured_credentials(
+ "identity_admin", fill_in=False)
+ self.isolated_creds['admin'] = creds
+ return creds
+
+ def get_creds_by_roles(self, roles, force_new=False):
+ roles = list(set(roles))
+ exist_creds = self.isolated_creds.get(str(roles), None)
+ index = 0
+ if exist_creds and not force_new:
+ return exist_creds
+ elif exist_creds and force_new:
+ new_index = str(roles) + '-' + str(len(self.isolated_creds))
+ self.isolated_creds[new_index] = exist_creds
+ # Figure out how many existing creds for this roles set are present
+ # use this as the index the returning hash list to ensure separate
+ # creds are returned with force_new being True
+ for creds_names in self.isolated_creds:
+ if str(roles) in creds_names:
+ index = index + 1
+ if not self.use_default_creds:
+ creds = self.get_creds(index, roles=roles)
+ role_credential = cred_provider.get_credentials(**creds)
+ self.isolated_creds[str(roles)] = role_credential
+ else:
+ msg = "Default credentials can not be used with specifying "\
+ "credentials by roles"
+ raise exceptions.InvalidConfiguration(msg)
+ return role_credential
diff --git a/neutron/tests/tempest/common/commands.py b/neutron/tests/tempest/common/commands.py
new file mode 100644
index 0000000..f132e86
--- /dev/null
+++ b/neutron/tests/tempest/common/commands.py
@@ -0,0 +1,39 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import shlex
+import subprocess
+
+from neutron.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def copy_file_to_host(file_from, dest, host, username, pkey):
+ dest = "%s@%s:%s" % (username, host, dest)
+ cmd = "scp -v -o UserKnownHostsFile=/dev/null " \
+ "-o StrictHostKeyChecking=no " \
+ "-i %(pkey)s %(file1)s %(dest)s" % {'pkey': pkey,
+ 'file1': file_from,
+ 'dest': dest}
+ args = shlex.split(cmd.encode('utf-8'))
+ subprocess_args = {'stdout': subprocess.PIPE,
+ 'stderr': subprocess.STDOUT}
+ proc = subprocess.Popen(args, **subprocess_args)
+ stdout, stderr = proc.communicate()
+ if proc.returncode != 0:
+ LOG.error(("Command {0} returned with exit status {1},"
+ "output {2}, error {3}").format(cmd, proc.returncode,
+ stdout, stderr))
+ return stdout
diff --git a/neutron/tests/tempest/common/cred_provider.py b/neutron/tests/tempest/common/cred_provider.py
new file mode 100644
index 0000000..a999cfc
--- /dev/null
+++ b/neutron/tests/tempest/common/cred_provider.py
@@ -0,0 +1,123 @@
+# Copyright (c) 2014 Deutsche Telekom AG
+# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+
+import six
+
+from neutron.tests.tempest import auth
+from neutron.tests.tempest import config
+from neutron.tests.tempest import exceptions
+from neutron.openstack.common import log as logging
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+# Type of credentials available from configuration
+CREDENTIAL_TYPES = {
+ 'identity_admin': ('identity', 'admin'),
+ 'user': ('identity', None),
+ 'alt_user': ('identity', 'alt')
+}
+
+
+# Read credentials from configuration, builds a Credentials object
+# based on the specified or configured version
+def get_configured_credentials(credential_type, fill_in=True,
+ identity_version=None):
+ identity_version = identity_version or CONF.identity.auth_version
+ if identity_version not in ('v2', 'v3'):
+ raise exceptions.InvalidConfiguration(
+ 'Unsupported auth version: %s' % identity_version)
+ if credential_type not in CREDENTIAL_TYPES:
+ raise exceptions.InvalidCredentials()
+ conf_attributes = ['username', 'password', 'tenant_name']
+ if identity_version == 'v3':
+ conf_attributes.append('domain_name')
+ # Read the parts of credentials from config
+ params = {}
+ section, prefix = CREDENTIAL_TYPES[credential_type]
+ for attr in conf_attributes:
+ _section = getattr(CONF, section)
+ if prefix is None:
+ params[attr] = getattr(_section, attr)
+ else:
+ params[attr] = getattr(_section, prefix + "_" + attr)
+ # Build and validate credentials. We are reading configured credentials,
+ # so validate them even if fill_in is False
+ credentials = get_credentials(fill_in=fill_in, **params)
+ if not fill_in:
+ if not credentials.is_valid():
+ msg = ("The %s credentials are incorrectly set in the config file."
+ " Double check that all required values are assigned" %
+ credential_type)
+ raise exceptions.InvalidConfiguration(msg)
+ return credentials
+
+
+# Wrapper around auth.get_credentials to use the configured identity version
+# is none is specified
+def get_credentials(fill_in=True, identity_version=None, **kwargs):
+ identity_version = identity_version or CONF.identity.auth_version
+ # In case of "v3" add the domain from config if not specified
+ if identity_version == 'v3':
+ domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
+ if 'domain' in x)
+ if not domain_fields.intersection(kwargs.keys()):
+ kwargs['user_domain_name'] = CONF.identity.admin_domain_name
+ auth_url = CONF.identity.uri_v3
+ else:
+ auth_url = CONF.identity.uri
+ return auth.get_credentials(auth_url,
+ fill_in=fill_in,
+ identity_version=identity_version,
+ **kwargs)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class CredentialProvider(object):
+ def __init__(self, name, password='pass', network_resources=None):
+ self.name = name
+
+ @abc.abstractmethod
+ def get_primary_creds(self):
+ return
+
+ @abc.abstractmethod
+ def get_admin_creds(self):
+ return
+
+ @abc.abstractmethod
+ def get_alt_creds(self):
+ return
+
+ @abc.abstractmethod
+ def clear_isolated_creds(self):
+ return
+
+ @abc.abstractmethod
+ def is_multi_user(self):
+ return
+
+ @abc.abstractmethod
+ def is_multi_tenant(self):
+ return
+
+ @abc.abstractmethod
+ def get_creds_by_roles(self, roles, force_new=False):
+ return
+
+ @abc.abstractmethod
+ def is_role_available(self, role):
+ return
diff --git a/neutron/tests/tempest/common/credentials.py b/neutron/tests/tempest/common/credentials.py
new file mode 100644
index 0000000..a52ec4a
--- /dev/null
+++ b/neutron/tests/tempest/common/credentials.py
@@ -0,0 +1,64 @@
+# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from neutron.tests.tempest.common import accounts
+from neutron.tests.tempest.common import cred_provider
+from neutron.tests.tempest.common import isolated_creds
+from neutron.tests.tempest import config
+from neutron.tests.tempest import exceptions
+
+CONF = config.CONF
+
+
+# Return the right implementation of CredentialProvider based on config
+# Dropping interface and password, as they are never used anyways
+# TODO(andreaf) Drop them from the CredentialsProvider interface completely
+def get_isolated_credentials(name, network_resources=None,
+ force_tenant_isolation=False):
+ # If a test requires a new account to work, it can have it via forcing
+ # tenant isolation. A new account will be produced only for that test.
+ # In case admin credentials are not available for the account creation,
+ # the test should be skipped else it would fail.
+ if CONF.auth.allow_tenant_isolation or force_tenant_isolation:
+ return isolated_creds.IsolatedCreds(
+ name=name,
+ network_resources=network_resources)
+ else:
+ if CONF.auth.locking_credentials_provider:
+ # Most params are not relevant for pre-created accounts
+ return accounts.Accounts(name=name)
+ else:
+ return accounts.NotLockingAccounts(name=name)
+
+
+# We want a helper function here to check and see if admin credentials
+# are available so we can do a single call from skip_checks if admin
+# creds area vailable.
+def is_admin_available():
+ is_admin = True
+ # If tenant isolation is enabled admin will be available
+ if CONF.auth.allow_tenant_isolation:
+ return is_admin
+ # Check whether test accounts file has the admin specified or not
+ elif os.path.isfile(CONF.auth.test_accounts_file):
+ check_accounts = accounts.Accounts(name='check_admin')
+ if not check_accounts.admin_available():
+ is_admin = False
+ else:
+ try:
+ cred_provider.get_configured_credentials('identity_admin')
+ except exceptions.InvalidConfiguration:
+ is_admin = False
+ return is_admin
diff --git a/neutron/tests/tempest/common/custom_matchers.py b/neutron/tests/tempest/common/custom_matchers.py
new file mode 100644
index 0000000..298a94e
--- /dev/null
+++ b/neutron/tests/tempest/common/custom_matchers.py
@@ -0,0 +1,226 @@
+# Copyright 2013 NTT Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+
+from testtools import helpers
+
+
+class ExistsAllResponseHeaders(object):
+ """
+ Specific matcher to check the existence of Swift's response headers
+
+ This matcher checks the existence of common headers for each HTTP method
+ or the target, which means account, container or object.
+ When checking the existence of 'specific' headers such as
+ X-Account-Meta-* or X-Object-Manifest for example, those headers must be
+ checked in each test code.
+ """
+
+ def __init__(self, target, method):
+ """
+ param: target Account/Container/Object
+ param: method PUT/GET/HEAD/DELETE/COPY/POST
+ """
+ self.target = target
+ self.method = method
+
+ def match(self, actual):
+ """
+ param: actual HTTP response headers
+ """
+ # Check common headers for all HTTP methods
+ if 'content-length' not in actual:
+ return NonExistentHeader('content-length')
+ if 'content-type' not in actual:
+ return NonExistentHeader('content-type')
+ if 'x-trans-id' not in actual:
+ return NonExistentHeader('x-trans-id')
+ if 'date' not in actual:
+ return NonExistentHeader('date')
+
+ # Check headers for a specific method or target
+ if self.method == 'GET' or self.method == 'HEAD':
+ if 'x-timestamp' not in actual:
+ return NonExistentHeader('x-timestamp')
+ if 'accept-ranges' not in actual:
+ return NonExistentHeader('accept-ranges')
+ if self.target == 'Account':
+ if 'x-account-bytes-used' not in actual:
+ return NonExistentHeader('x-account-bytes-used')
+ if 'x-account-container-count' not in actual:
+ return NonExistentHeader('x-account-container-count')
+ if 'x-account-object-count' not in actual:
+ return NonExistentHeader('x-account-object-count')
+ elif self.target == 'Container':
+ if 'x-container-bytes-used' not in actual:
+ return NonExistentHeader('x-container-bytes-used')
+ if 'x-container-object-count' not in actual:
+ return NonExistentHeader('x-container-object-count')
+ elif self.target == 'Object':
+ if 'etag' not in actual:
+ return NonExistentHeader('etag')
+ if 'last-modified' not in actual:
+ return NonExistentHeader('last-modified')
+ elif self.method == 'PUT':
+ if self.target == 'Object':
+ if 'etag' not in actual:
+ return NonExistentHeader('etag')
+ if 'last-modified' not in actual:
+ return NonExistentHeader('last-modified')
+ elif self.method == 'COPY':
+ if self.target == 'Object':
+ if 'etag' not in actual:
+ return NonExistentHeader('etag')
+ if 'last-modified' not in actual:
+ return NonExistentHeader('last-modified')
+ if 'x-copied-from' not in actual:
+ return NonExistentHeader('x-copied-from')
+ if 'x-copied-from-last-modified' not in actual:
+ return NonExistentHeader('x-copied-from-last-modified')
+
+ return None
+
+
+class NonExistentHeader(object):
+ """
+ Informs an error message for end users in the case of missing a
+ certain header in Swift's responses
+ """
+
+ def __init__(self, header):
+ self.header = header
+
+ def describe(self):
+ return "%s header does not exist" % self.header
+
+ def get_details(self):
+ return {}
+
+
+class AreAllWellFormatted(object):
+ """
+ Specific matcher to check the correctness of formats of values of Swift's
+ response headers
+
+ This matcher checks the format of values of response headers.
+ When checking the format of values of 'specific' headers such as
+ X-Account-Meta-* or X-Object-Manifest for example, those values must be
+ checked in each test code.
+ """
+
+ def match(self, actual):
+ for key, value in actual.iteritems():
+ if key in ('content-length', 'x-account-bytes-used',
+ 'x-account-container-count', 'x-account-object-count',
+ 'x-container-bytes-used', 'x-container-object-count')\
+ and not value.isdigit():
+ return InvalidFormat(key, value)
+ elif key in ('content-type', 'date', 'last-modified',
+ 'x-copied-from-last-modified') and not value:
+ return InvalidFormat(key, value)
+ elif key == 'x-timestamp' and not re.match("^\d+\.?\d*\Z", value):
+ return InvalidFormat(key, value)
+ elif key == 'x-copied-from' and not re.match("\S+/\S+", value):
+ return InvalidFormat(key, value)
+ elif key == 'x-trans-id' and \
+ not re.match("^tx[0-9a-f]{21}-[0-9a-f]{10}.*", value):
+ return InvalidFormat(key, value)
+ elif key == 'accept-ranges' and not value == 'bytes':
+ return InvalidFormat(key, value)
+ elif key == 'etag' and not value.isalnum():
+ return InvalidFormat(key, value)
+ elif key == 'transfer-encoding' and not value == 'chunked':
+ return InvalidFormat(key, value)
+
+ return None
+
+
+class InvalidFormat(object):
+ """
+ Informs an error message for end users if a format of a certain header
+ is invalid
+ """
+
+ def __init__(self, key, value):
+ self.key = key
+ self.value = value
+
+ def describe(self):
+ return "InvalidFormat (%s, %s)" % (self.key, self.value)
+
+ def get_details(self):
+ return {}
+
+
+class MatchesDictExceptForKeys(object):
+ """Matches two dictionaries. Verifies all items are equals except for those
+ identified by a list of keys.
+ """
+
+ def __init__(self, expected, excluded_keys=None):
+ self.expected = expected
+ self.excluded_keys = excluded_keys if excluded_keys is not None else []
+
+ def match(self, actual):
+ filtered_expected = helpers.dict_subtract(self.expected,
+ self.excluded_keys)
+ filtered_actual = helpers.dict_subtract(actual,
+ self.excluded_keys)
+ if filtered_actual != filtered_expected:
+ return DictMismatch(filtered_expected, filtered_actual)
+
+
+class DictMismatch(object):
+ """Mismatch between two dicts describes deltas"""
+
+ def __init__(self, expected, actual):
+ self.expected = expected
+ self.actual = actual
+ self.intersect = set(self.expected) & set(self.actual)
+ self.symmetric_diff = set(self.expected) ^ set(self.actual)
+
+ def _format_dict(self, dict_to_format):
+ # Ensure the error string dict is printed in a set order
+ # NOTE(mtreinish): needed to ensure a deterministic error msg for
+ # testing. Otherwise the error message will be dependent on the
+ # dict ordering.
+ dict_string = "{"
+ for key in sorted(dict_to_format):
+ dict_string += "'%s': %s, " % (key, dict_to_format[key])
+ dict_string = dict_string[:-2] + '}'
+ return dict_string
+
+ def describe(self):
+ msg = ""
+ if self.symmetric_diff:
+ only_expected = helpers.dict_subtract(self.expected, self.actual)
+ only_actual = helpers.dict_subtract(self.actual, self.expected)
+ if only_expected:
+ msg += "Only in expected:\n %s\n" % self._format_dict(
+ only_expected)
+ if only_actual:
+ msg += "Only in actual:\n %s\n" % self._format_dict(
+ only_actual)
+ diff_set = set(o for o in self.intersect if
+ self.expected[o] != self.actual[o])
+ if diff_set:
+ msg += "Differences:\n"
+ for o in diff_set:
+ msg += " %s: expected %s, actual %s\n" % (
+ o, self.expected[o], self.actual[o])
+ return msg
+
+ def get_details(self):
+ return {}
diff --git a/neutron/tests/tempest/common/generator/__init__.py b/neutron/tests/tempest/common/generator/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron/tests/tempest/common/generator/__init__.py
diff --git a/neutron/tests/tempest/common/generator/base_generator.py b/neutron/tests/tempest/common/generator/base_generator.py
new file mode 100644
index 0000000..2771823
--- /dev/null
+++ b/neutron/tests/tempest/common/generator/base_generator.py
@@ -0,0 +1,182 @@
+# Copyright 2014 Deutsche Telekom AG
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import functools
+
+import jsonschema
+
+from neutron.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def _check_for_expected_result(name, schema):
+ expected_result = None
+ if "results" in schema:
+ if name in schema["results"]:
+ expected_result = schema["results"][name]
+ return expected_result
+
+
+def generator_type(*args, **kwargs):
+ def wrapper(func):
+ func.types = args
+ for key in kwargs:
+ setattr(func, key, kwargs[key])
+ return func
+ return wrapper
+
+
+def simple_generator(fn):
+ """
+ Decorator for simple generators that return one value
+ """
+ @functools.wraps(fn)
+ def wrapped(self, schema):
+ result = fn(self, schema)
+ if result is not None:
+ expected_result = _check_for_expected_result(fn.__name__, schema)
+ return (fn.__name__, result, expected_result)
+ return
+ return wrapped
+
+
+class BasicGeneratorSet(object):
+ _instance = None
+
+ schema = {
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "http-method": {
+ "enum": ["GET", "PUT", "HEAD",
+ "POST", "PATCH", "DELETE", 'COPY']
+ },
+ "admin_client": {"type": "boolean"},
+ "url": {"type": "string"},
+ "default_result_code": {"type": "integer"},
+ "json-schema": {},
+ "resources": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "expected_result": {"type": "integer"}
+ }
+ }
+ ]
+ }
+ },
+ "results": {
+ "type": "object",
+ "properties": {}
+ }
+ },
+ "required": ["name", "http-method", "url"],
+ "additionalProperties": False,
+ }
+
+ def __init__(self):
+ self.types_dict = {}
+ for m in dir(self):
+ if callable(getattr(self, m)) and not'__' in m:
+ method = getattr(self, m)
+ if hasattr(method, "types"):
+ for type in method.types:
+ if type not in self.types_dict:
+ self.types_dict[type] = []
+ self.types_dict[type].append(method)
+
+ def validate_schema(self, schema):
+ if "json-schema" in schema:
+ jsonschema.Draft4Validator.check_schema(schema['json-schema'])
+ jsonschema.validate(schema, self.schema)
+
+ def generate_scenarios(self, schema, path=None):
+ """
+ Generates the scenario (all possible test cases) out of the given
+ schema.
+
+ :param schema: a dict style schema (see ``BasicGeneratorSet.schema``)
+ :param path: the schema path if the given schema is a subschema
+ """
+ schema_type = schema['type']
+ scenarios = []
+
+ if schema_type == 'object':
+ properties = schema["properties"]
+ for attribute, definition in properties.iteritems():
+ current_path = copy.copy(path)
+ if path is not None:
+ current_path.append(attribute)
+ else:
+ current_path = [attribute]
+ scenarios.extend(
+ self.generate_scenarios(definition, current_path))
+ elif isinstance(schema_type, list):
+ if "integer" in schema_type:
+ schema_type = "integer"
+ else:
+ raise Exception("non-integer list types not supported")
+ for generator in self.types_dict[schema_type]:
+ if hasattr(generator, "needed_property"):
+ prop = generator.needed_property
+ if (prop not in schema or
+ schema[prop] is None or
+ schema[prop] is False):
+ continue
+
+ name = generator.__name__
+ if ("exclude_tests" in schema and
+ name in schema["exclude_tests"]):
+ continue
+ if path is not None:
+ name = "%s_%s" % ("_".join(path), name)
+ scenarios.append({
+ "_negtest_name": name,
+ "_negtest_generator": generator,
+ "_negtest_schema": schema,
+ "_negtest_path": path})
+ return scenarios
+
+ def generate_payload(self, test, schema):
+ """
+ Generates one jsonschema out of the given test. It's mandatory to use
+ generate_scenarios before to register all needed variables to the test.
+
+ :param test: A test object (scenario) with all _negtest variables on it
+ :param schema: schema for the test
+ """
+ generator = test._negtest_generator
+ ret = generator(test._negtest_schema)
+ path = copy.copy(test._negtest_path)
+ expected_result = None
+
+ if ret is not None:
+ generator_result = generator(test._negtest_schema)
+ invalid_snippet = generator_result[1]
+ expected_result = generator_result[2]
+ element = path.pop()
+ if len(path) > 0:
+ schema_snip = reduce(dict.get, path, schema)
+ schema_snip[element] = invalid_snippet
+ else:
+ schema[element] = invalid_snippet
+ return expected_result
diff --git a/neutron/tests/tempest/common/generator/negative_generator.py b/neutron/tests/tempest/common/generator/negative_generator.py
new file mode 100644
index 0000000..704d9fb
--- /dev/null
+++ b/neutron/tests/tempest/common/generator/negative_generator.py
@@ -0,0 +1,78 @@
+# Copyright 2014 Deutsche Telekom AG
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import neutron.tests.tempest.common.generator.base_generator as base
+import neutron.tests.tempest.common.generator.valid_generator as valid
+from neutron.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+class NegativeTestGenerator(base.BasicGeneratorSet):
+ @base.generator_type("string")
+ @base.simple_generator
+ def gen_int(self, _):
+ return 4
+
+ @base.generator_type("integer")
+ @base.simple_generator
+ def gen_string(self, _):
+ return "XXXXXX"
+
+ @base.generator_type("integer", "string")
+ def gen_none(self, schema):
+ # Note(mkoderer): it's not using the decorator otherwise it'd be
+ # filtered
+ expected_result = base._check_for_expected_result('gen_none', schema)
+ return ('gen_none', None, expected_result)
+
+ @base.generator_type("string")
+ @base.simple_generator
+ def gen_str_min_length(self, schema):
+ min_length = schema.get("minLength", 0)
+ if min_length > 0:
+ return "x" * (min_length - 1)
+
+ @base.generator_type("string", needed_property="maxLength")
+ @base.simple_generator
+ def gen_str_max_length(self, schema):
+ max_length = schema.get("maxLength", -1)
+ return "x" * (max_length + 1)
+
+ @base.generator_type("integer", needed_property="minimum")
+ @base.simple_generator
+ def gen_int_min(self, schema):
+ minimum = schema["minimum"]
+ if "exclusiveMinimum" not in schema:
+ minimum -= 1
+ return minimum
+
+ @base.generator_type("integer", needed_property="maximum")
+ @base.simple_generator
+ def gen_int_max(self, schema):
+ maximum = schema["maximum"]
+ if "exclusiveMaximum" not in schema:
+ maximum += 1
+ return maximum
+
+ @base.generator_type("object", needed_property="additionalProperties")
+ @base.simple_generator
+ def gen_obj_add_attr(self, schema):
+ valid_schema = valid.ValidTestGenerator().generate_valid(schema)
+ new_valid = copy.deepcopy(valid_schema)
+ new_valid["$$$$$$$$$$"] = "xxx"
+ return new_valid
diff --git a/neutron/tests/tempest/common/generator/valid_generator.py b/neutron/tests/tempest/common/generator/valid_generator.py
new file mode 100644
index 0000000..4d1906e
--- /dev/null
+++ b/neutron/tests/tempest/common/generator/valid_generator.py
@@ -0,0 +1,81 @@
+# Copyright 2014 Deutsche Telekom AG
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import neutron.tests.tempest.common.generator.base_generator as base
+from neutron.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+class ValidTestGenerator(base.BasicGeneratorSet):
+ @base.generator_type("string")
+ @base.simple_generator
+ def generate_valid_string(self, schema):
+ size = schema.get("minLength", 1)
+ # TODO(dkr mko): handle format and pattern
+ return "x" * size
+
+ @base.generator_type("integer")
+ @base.simple_generator
+ def generate_valid_integer(self, schema):
+ # TODO(dkr mko): handle multipleOf
+ if "minimum" in schema:
+ minimum = schema["minimum"]
+ if "exclusiveMinimum" not in schema:
+ return minimum
+ else:
+ return minimum + 1
+ if "maximum" in schema:
+ maximum = schema["maximum"]
+ if "exclusiveMaximum" not in schema:
+ return maximum
+ else:
+ return maximum - 1
+ return 0
+
+ @base.generator_type("object")
+ @base.simple_generator
+ def generate_valid_object(self, schema):
+ obj = {}
+ for k, v in schema["properties"].iteritems():
+ obj[k] = self.generate_valid(v)
+ return obj
+
+ def generate(self, schema):
+ schema_type = schema["type"]
+ if isinstance(schema_type, list):
+ if "integer" in schema_type:
+ schema_type = "integer"
+ else:
+ raise Exception("non-integer list types not supported")
+ result = []
+ if schema_type not in self.types_dict:
+ raise TypeError("generator (%s) doesn't support type: %s"
+ % (self.__class__.__name__, schema_type))
+ for generator in self.types_dict[schema_type]:
+ ret = generator(schema)
+ if ret is not None:
+ if isinstance(ret, list):
+ result.extend(ret)
+ elif isinstance(ret, tuple):
+ result.append(ret)
+ else:
+ raise Exception("generator (%s) returns invalid result: %s"
+ % (generator, ret))
+ return result
+
+ def generate_valid(self, schema):
+ return self.generate(schema)[0][1]
diff --git a/neutron/tests/tempest/common/glance_http.py b/neutron/tests/tempest/common/glance_http.py
new file mode 100644
index 0000000..c802472
--- /dev/null
+++ b/neutron/tests/tempest/common/glance_http.py
@@ -0,0 +1,377 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Originally copied from python-glanceclient
+
+import copy
+import hashlib
+import httplib
+import json
+import posixpath
+import re
+import socket
+import StringIO
+import struct
+import urlparse
+
+
+import OpenSSL
+from six import moves
+from tempest_lib import exceptions as lib_exc
+
+from neutron.tests.tempest import exceptions as exc
+from neutron.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+USER_AGENT = 'tempest'
+CHUNKSIZE = 1024 * 64 # 64kB
+TOKEN_CHARS_RE = re.compile('^[-A-Za-z0-9+/=]*$')
+
+
+class HTTPClient(object):
+
+ def __init__(self, auth_provider, filters, **kwargs):
+ self.auth_provider = auth_provider
+ self.filters = filters
+ self.endpoint = auth_provider.base_url(filters)
+ endpoint_parts = urlparse.urlparse(self.endpoint)
+ self.endpoint_scheme = endpoint_parts.scheme
+ self.endpoint_hostname = endpoint_parts.hostname
+ self.endpoint_port = endpoint_parts.port
+ self.endpoint_path = endpoint_parts.path
+
+ self.connection_class = self.get_connection_class(self.endpoint_scheme)
+ self.connection_kwargs = self.get_connection_kwargs(
+ self.endpoint_scheme, **kwargs)
+
+ @staticmethod
+ def get_connection_class(scheme):
+ if scheme == 'https':
+ return VerifiedHTTPSConnection
+ else:
+ return httplib.HTTPConnection
+
+ @staticmethod
+ def get_connection_kwargs(scheme, **kwargs):
+ _kwargs = {'timeout': float(kwargs.get('timeout', 600))}
+
+ if scheme == 'https':
+ _kwargs['ca_certs'] = kwargs.get('ca_certs', None)
+ _kwargs['cert_file'] = kwargs.get('cert_file', None)
+ _kwargs['key_file'] = kwargs.get('key_file', None)
+ _kwargs['insecure'] = kwargs.get('insecure', False)
+ _kwargs['ssl_compression'] = kwargs.get('ssl_compression', True)
+
+ return _kwargs
+
+ def get_connection(self):
+ _class = self.connection_class
+ try:
+ return _class(self.endpoint_hostname, self.endpoint_port,
+ **self.connection_kwargs)
+ except httplib.InvalidURL:
+ raise exc.EndpointNotFound
+
+ def _http_request(self, url, method, **kwargs):
+ """Send an http request with the specified characteristics.
+
+ Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
+ as setting headers and error handling.
+ """
+ # Copy the kwargs so we can reuse the original in case of redirects
+ kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
+ kwargs['headers'].setdefault('User-Agent', USER_AGENT)
+
+ self._log_request(method, url, kwargs['headers'])
+
+ conn = self.get_connection()
+
+ try:
+ url_parts = urlparse.urlparse(url)
+ conn_url = posixpath.normpath(url_parts.path)
+ LOG.debug('Actual Path: {path}'.format(path=conn_url))
+ if kwargs['headers'].get('Transfer-Encoding') == 'chunked':
+ conn.putrequest(method, conn_url)
+ for header, value in kwargs['headers'].items():
+ conn.putheader(header, value)
+ conn.endheaders()
+ chunk = kwargs['body'].read(CHUNKSIZE)
+ # Chunk it, baby...
+ while chunk:
+ conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
+ chunk = kwargs['body'].read(CHUNKSIZE)
+ conn.send('0\r\n\r\n')
+ else:
+ conn.request(method, conn_url, **kwargs)
+ resp = conn.getresponse()
+ except socket.gaierror as e:
+ message = ("Error finding address for %(url)s: %(e)s" %
+ {'url': url, 'e': e})
+ raise exc.EndpointNotFound(message)
+ except (socket.error, socket.timeout) as e:
+ message = ("Error communicating with %(endpoint)s %(e)s" %
+ {'endpoint': self.endpoint, 'e': e})
+ raise exc.TimeoutException(message)
+
+ body_iter = ResponseBodyIterator(resp)
+ # Read body into string if it isn't obviously image data
+ if resp.getheader('content-type', None) != 'application/octet-stream':
+ body_str = ''.join([body_chunk for body_chunk in body_iter])
+ body_iter = StringIO.StringIO(body_str)
+ self._log_response(resp, None)
+ else:
+ self._log_response(resp, body_iter)
+
+ return resp, body_iter
+
+ def _log_request(self, method, url, headers):
+ LOG.info('Request: ' + method + ' ' + url)
+ if headers:
+ headers_out = headers
+ if 'X-Auth-Token' in headers and headers['X-Auth-Token']:
+ token = headers['X-Auth-Token']
+ if len(token) > 64 and TOKEN_CHARS_RE.match(token):
+ headers_out = headers.copy()
+ headers_out['X-Auth-Token'] = "<Token omitted>"
+ LOG.info('Request Headers: ' + str(headers_out))
+
+ def _log_response(self, resp, body):
+ status = str(resp.status)
+ LOG.info("Response Status: " + status)
+ if resp.getheaders():
+ LOG.info('Response Headers: ' + str(resp.getheaders()))
+ if body:
+ str_body = str(body)
+ length = len(body)
+ LOG.info('Response Body: ' + str_body[:2048])
+ if length >= 2048:
+ self.LOG.debug("Large body (%d) md5 summary: %s", length,
+ hashlib.md5(str_body).hexdigest())
+
+ def json_request(self, method, url, **kwargs):
+ kwargs.setdefault('headers', {})
+ kwargs['headers'].setdefault('Content-Type', 'application/json')
+ if kwargs['headers']['Content-Type'] != 'application/json':
+ msg = "Only application/json content-type is supported."
+ raise lib_exc.InvalidContentType(msg)
+
+ if 'body' in kwargs:
+ kwargs['body'] = json.dumps(kwargs['body'])
+
+ resp, body_iter = self._http_request(url, method, **kwargs)
+
+ if 'application/json' in resp.getheader('content-type', ''):
+ body = ''.join([chunk for chunk in body_iter])
+ try:
+ body = json.loads(body)
+ except ValueError:
+ LOG.error('Could not decode response body as JSON')
+ else:
+ msg = "Only json/application content-type is supported."
+ raise lib_exc.InvalidContentType(msg)
+
+ return resp, body
+
+ def raw_request(self, method, url, **kwargs):
+ kwargs.setdefault('headers', {})
+ kwargs['headers'].setdefault('Content-Type',
+ 'application/octet-stream')
+ if 'body' in kwargs:
+ if (hasattr(kwargs['body'], 'read')
+ and method.lower() in ('post', 'put')):
+ # We use 'Transfer-Encoding: chunked' because
+ # body size may not always be known in advance.
+ kwargs['headers']['Transfer-Encoding'] = 'chunked'
+
+ # Decorate the request with auth
+ req_url, kwargs['headers'], kwargs['body'] = \
+ self.auth_provider.auth_request(
+ method=method, url=url, headers=kwargs['headers'],
+ body=kwargs.get('body', None), filters=self.filters)
+ return self._http_request(req_url, method, **kwargs)
+
+
+class OpenSSLConnectionDelegator(object):
+ """
+ An OpenSSL.SSL.Connection delegator.
+
+ Supplies an additional 'makefile' method which httplib requires
+ and is not present in OpenSSL.SSL.Connection.
+
+ Note: Since it is not possible to inherit from OpenSSL.SSL.Connection
+ a delegator must be used.
+ """
+ def __init__(self, *args, **kwargs):
+ self.connection = OpenSSL.SSL.Connection(*args, **kwargs)
+
+ def __getattr__(self, name):
+ return getattr(self.connection, name)
+
+ def makefile(self, *args, **kwargs):
+ # Ensure the socket is closed when this file is closed
+ kwargs['close'] = True
+ return socket._fileobject(self.connection, *args, **kwargs)
+
+
+class VerifiedHTTPSConnection(httplib.HTTPSConnection):
+ """
+ Extended HTTPSConnection which uses the OpenSSL library
+ for enhanced SSL support.
+ Note: Much of this functionality can eventually be replaced
+ with native Python 3.3 code.
+ """
+ def __init__(self, host, port=None, key_file=None, cert_file=None,
+ ca_certs=None, timeout=None, insecure=False,
+ ssl_compression=True):
+ httplib.HTTPSConnection.__init__(self, host, port,
+ key_file=key_file,
+ cert_file=cert_file)
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.timeout = timeout
+ self.insecure = insecure
+ self.ssl_compression = ssl_compression
+ self.ca_certs = ca_certs
+ self.setcontext()
+
+ @staticmethod
+ def host_matches_cert(host, x509):
+ """
+ Verify that the the x509 certificate we have received
+ from 'host' correctly identifies the server we are
+ connecting to, ie that the certificate's Common Name
+ or a Subject Alternative Name matches 'host'.
+ """
+ # First see if we can match the CN
+ if x509.get_subject().commonName == host:
+ return True
+
+ # Also try Subject Alternative Names for a match
+ san_list = None
+ for i in moves.xrange(x509.get_extension_count()):
+ ext = x509.get_extension(i)
+ if ext.get_short_name() == 'subjectAltName':
+ san_list = str(ext)
+ for san in ''.join(san_list.split()).split(','):
+ if san == "DNS:%s" % host:
+ return True
+
+ # Server certificate does not match host
+ msg = ('Host "%s" does not match x509 certificate contents: '
+ 'CommonName "%s"' % (host, x509.get_subject().commonName))
+ if san_list is not None:
+ msg = msg + ', subjectAltName "%s"' % san_list
+ raise exc.SSLCertificateError(msg)
+
+ def verify_callback(self, connection, x509, errnum,
+ depth, preverify_ok):
+ if x509.has_expired():
+ msg = "SSL Certificate expired on '%s'" % x509.get_notAfter()
+ raise exc.SSLCertificateError(msg)
+
+ if depth == 0 and preverify_ok is True:
+ # We verify that the host matches against the last
+ # certificate in the chain
+ return self.host_matches_cert(self.host, x509)
+ else:
+ # Pass through OpenSSL's default result
+ return preverify_ok
+
+ def setcontext(self):
+ """
+ Set up the OpenSSL context.
+ """
+ self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
+
+ if self.ssl_compression is False:
+ self.context.set_options(0x20000) # SSL_OP_NO_COMPRESSION
+
+ if self.insecure is not True:
+ self.context.set_verify(OpenSSL.SSL.VERIFY_PEER,
+ self.verify_callback)
+ else:
+ self.context.set_verify(OpenSSL.SSL.VERIFY_NONE,
+ self.verify_callback)
+
+ if self.cert_file:
+ try:
+ self.context.use_certificate_file(self.cert_file)
+ except Exception as e:
+ msg = 'Unable to load cert from "%s" %s' % (self.cert_file, e)
+ raise exc.SSLConfigurationError(msg)
+ if self.key_file is None:
+ # We support having key and cert in same file
+ try:
+ self.context.use_privatekey_file(self.cert_file)
+ except Exception as e:
+ msg = ('No key file specified and unable to load key '
+ 'from "%s" %s' % (self.cert_file, e))
+ raise exc.SSLConfigurationError(msg)
+
+ if self.key_file:
+ try:
+ self.context.use_privatekey_file(self.key_file)
+ except Exception as e:
+ msg = 'Unable to load key from "%s" %s' % (self.key_file, e)
+ raise exc.SSLConfigurationError(msg)
+
+ if self.ca_certs:
+ try:
+ self.context.load_verify_locations(self.ca_certs)
+ except Exception as e:
+ msg = 'Unable to load CA from "%s"' % (self.ca_certs, e)
+ raise exc.SSLConfigurationError(msg)
+ else:
+ self.context.set_default_verify_paths()
+
+ def connect(self):
+ """
+ Connect to an SSL port using the OpenSSL library and apply
+ per-connection parameters.
+ """
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if self.timeout is not None:
+ # '0' microseconds
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,
+ struct.pack('LL', self.timeout, 0))
+ self.sock = OpenSSLConnectionDelegator(self.context, sock)
+ self.sock.connect((self.host, self.port))
+
+ def close(self):
+ if self.sock:
+ # Remove the reference to the socket but don't close it yet.
+ # Response close will close both socket and associated
+ # file. Closing socket too soon will cause response
+ # reads to fail with socket IO error 'Bad file descriptor'.
+ self.sock = None
+ httplib.HTTPSConnection.close(self)
+
+
+class ResponseBodyIterator(object):
+ """A class that acts as an iterator over an HTTP response."""
+
+ def __init__(self, resp):
+ self.resp = resp
+
+ def __iter__(self):
+ while True:
+ yield self.next()
+
+ def next(self):
+ chunk = self.resp.read(CHUNKSIZE)
+ if chunk:
+ return chunk
+ else:
+ raise StopIteration()
diff --git a/neutron/tests/tempest/common/isolated_creds.py b/neutron/tests/tempest/common/isolated_creds.py
new file mode 100644
index 0000000..41d7390
--- /dev/null
+++ b/neutron/tests/tempest/common/isolated_creds.py
@@ -0,0 +1,392 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import netaddr
+from tempest_lib import exceptions as lib_exc
+
+from neutron.tests.api.contrib import clients
+from neutron.tests.tempest.common import cred_provider
+from neutron.tests.tempest.common.utils import data_utils
+from neutron.tests.tempest import config
+from neutron.tests.tempest import exceptions
+from neutron.openstack.common import log as logging
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class IsolatedCreds(cred_provider.CredentialProvider):
+
+ def __init__(self, name, password='pass', network_resources=None):
+ super(IsolatedCreds, self).__init__(name, password, network_resources)
+ self.network_resources = network_resources
+ self.isolated_creds = {}
+ self.isolated_net_resources = {}
+ self.ports = []
+ self.password = password
+ self.identity_admin_client, self.network_admin_client = (
+ self._get_admin_clients())
+
+ def _get_admin_clients(self):
+ """
+ Returns a tuple with instances of the following admin clients (in this
+ order):
+ identity
+ network
+ """
+ os = clients.AdminManager()
+ return os.identity_client, os.network_client
+
+ def _create_tenant(self, name, description):
+ tenant = self.identity_admin_client.create_tenant(
+ name=name, description=description)
+ return tenant
+
+ def _get_tenant_by_name(self, name):
+ tenant = self.identity_admin_client.get_tenant_by_name(name)
+ return tenant
+
+ def _create_user(self, username, password, tenant, email):
+ user = self.identity_admin_client.create_user(
+ username, password, tenant['id'], email)
+ return user
+
+ def _get_user(self, tenant, username):
+ user = self.identity_admin_client.get_user_by_username(
+ tenant['id'], username)
+ return user
+
+ def _list_roles(self):
+ roles = self.identity_admin_client.list_roles()
+ return roles
+
+ def _assign_user_role(self, tenant, user, role_name):
+ role = None
+ try:
+ roles = self._list_roles()
+ role = next(r for r in roles if r['name'] == role_name)
+ except StopIteration:
+ msg = 'No "%s" role found' % role_name
+ raise lib_exc.NotFound(msg)
+ try:
+ self.identity_admin_client.assign_user_role(tenant['id'],
+ user['id'],
+ role['id'])
+ except lib_exc.Conflict:
+ LOG.warning('Trying to add %s for user %s in tenant %s but they '
+ ' were already granted that role' % (role_name,
+ user['name'],
+ tenant['name']))
+
+ def _delete_user(self, user):
+ self.identity_admin_client.delete_user(user)
+
+ def _delete_tenant(self, tenant):
+ if CONF.service_available.neutron:
+ self._cleanup_default_secgroup(tenant)
+ self.identity_admin_client.delete_tenant(tenant)
+
+ def _create_creds(self, suffix="", admin=False, roles=None):
+ """Create random credentials under the following schema.
+
+ If the name contains a '.' is the full class path of something, and
+ we don't really care. If it isn't, it's probably a meaningful name,
+ so use it.
+
+ For logging purposes, -user and -tenant are long and redundant,
+ don't use them. The user# will be sufficient to figure it out.
+ """
+ if '.' in self.name:
+ root = ""
+ else:
+ root = self.name
+
+ tenant_name = data_utils.rand_name(root) + suffix
+ tenant_desc = tenant_name + "-desc"
+ tenant = self._create_tenant(name=tenant_name,
+ description=tenant_desc)
+
+ username = data_utils.rand_name(root) + suffix
+ email = data_utils.rand_name(root) + suffix + "@example.com"
+ user = self._create_user(username, self.password,
+ tenant, email)
+ if admin:
+ self._assign_user_role(tenant, user, CONF.identity.admin_role)
+ # Add roles specified in config file
+ for conf_role in CONF.auth.tempest_roles:
+ self._assign_user_role(tenant, user, conf_role)
+ # Add roles requested by caller
+ if roles:
+ for role in roles:
+ self._assign_user_role(tenant, user, role)
+ return self._get_credentials(user, tenant)
+
+ def _get_credentials(self, user, tenant):
+ return cred_provider.get_credentials(
+ username=user['name'], user_id=user['id'],
+ tenant_name=tenant['name'], tenant_id=tenant['id'],
+ password=self.password)
+
+ def _create_network_resources(self, tenant_id):
+ network = None
+ subnet = None
+ router = None
+ # Make sure settings
+ if self.network_resources:
+ if self.network_resources['router']:
+ if (not self.network_resources['subnet'] or
+ not self.network_resources['network']):
+ raise exceptions.InvalidConfiguration(
+ 'A router requires a subnet and network')
+ elif self.network_resources['subnet']:
+ if not self.network_resources['network']:
+ raise exceptions.InvalidConfiguration(
+ 'A subnet requires a network')
+ elif self.network_resources['dhcp']:
+ raise exceptions.InvalidConfiguration('DHCP requires a subnet')
+
+ data_utils.rand_name_root = data_utils.rand_name(self.name)
+ if not self.network_resources or self.network_resources['network']:
+ network_name = data_utils.rand_name_root + "-network"
+ network = self._create_network(network_name, tenant_id)
+ try:
+ if not self.network_resources or self.network_resources['subnet']:
+ subnet_name = data_utils.rand_name_root + "-subnet"
+ subnet = self._create_subnet(subnet_name, tenant_id,
+ network['id'])
+ if not self.network_resources or self.network_resources['router']:
+ router_name = data_utils.rand_name_root + "-router"
+ router = self._create_router(router_name, tenant_id)
+ self._add_router_interface(router['id'], subnet['id'])
+ except Exception:
+ if router:
+ self._clear_isolated_router(router['id'], router['name'])
+ if subnet:
+ self._clear_isolated_subnet(subnet['id'], subnet['name'])
+ if network:
+ self._clear_isolated_network(network['id'], network['name'])
+ raise
+ return network, subnet, router
+
+ def _create_network(self, name, tenant_id):
+ resp_body = self.network_admin_client.create_network(
+ name=name, tenant_id=tenant_id)
+ return resp_body['network']
+
+ def _create_subnet(self, subnet_name, tenant_id, network_id):
+ base_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
+ mask_bits = CONF.network.tenant_network_mask_bits
+ for subnet_cidr in base_cidr.subnet(mask_bits):
+ try:
+ if self.network_resources:
+ resp_body = self.network_admin_client.\
+ create_subnet(
+ network_id=network_id, cidr=str(subnet_cidr),
+ name=subnet_name,
+ tenant_id=tenant_id,
+ enable_dhcp=self.network_resources['dhcp'],
+ ip_version=4)
+ else:
+ resp_body = self.network_admin_client.\
+ create_subnet(network_id=network_id,
+ cidr=str(subnet_cidr),
+ name=subnet_name,
+ tenant_id=tenant_id,
+ ip_version=4)
+ break
+ except lib_exc.BadRequest as e:
+ if 'overlaps with another subnet' not in str(e):
+ raise
+ else:
+ message = 'Available CIDR for subnet creation could not be found'
+ raise Exception(message)
+ return resp_body['subnet']
+
+ def _create_router(self, router_name, tenant_id):
+ external_net_id = dict(
+ network_id=CONF.network.public_network_id)
+ resp_body = self.network_admin_client.create_router(
+ router_name,
+ external_gateway_info=external_net_id,
+ tenant_id=tenant_id)
+ return resp_body['router']
+
+ def _add_router_interface(self, router_id, subnet_id):
+ self.network_admin_client.add_router_interface_with_subnet_id(
+ router_id, subnet_id)
+
+ def get_primary_network(self):
+ return self.isolated_net_resources.get('primary')[0]
+
+ def get_primary_subnet(self):
+ return self.isolated_net_resources.get('primary')[1]
+
+ def get_primary_router(self):
+ return self.isolated_net_resources.get('primary')[2]
+
+ def get_admin_network(self):
+ return self.isolated_net_resources.get('admin')[0]
+
+ def get_admin_subnet(self):
+ return self.isolated_net_resources.get('admin')[1]
+
+ def get_admin_router(self):
+ return self.isolated_net_resources.get('admin')[2]
+
+ def get_alt_network(self):
+ return self.isolated_net_resources.get('alt')[0]
+
+ def get_alt_subnet(self):
+ return self.isolated_net_resources.get('alt')[1]
+
+ def get_alt_router(self):
+ return self.isolated_net_resources.get('alt')[2]
+
+ def get_credentials(self, credential_type):
+ if self.isolated_creds.get(str(credential_type)):
+ credentials = self.isolated_creds[str(credential_type)]
+ else:
+ if credential_type in ['primary', 'alt', 'admin']:
+ is_admin = (credential_type == 'admin')
+ credentials = self._create_creds(admin=is_admin)
+ else:
+ credentials = self._create_creds(roles=credential_type)
+ self.isolated_creds[str(credential_type)] = credentials
+ # Maintained until tests are ported
+ LOG.info("Acquired isolated creds:\n credentials: %s"
+ % credentials)
+ if (CONF.service_available.neutron and
+ not CONF.baremetal.driver_enabled):
+ network, subnet, router = self._create_network_resources(
+ credentials.tenant_id)
+ self.isolated_net_resources[str(credential_type)] = (
+ network, subnet, router,)
+ LOG.info("Created isolated network resources for : \n"
+ + " credentials: %s" % credentials)
+ return credentials
+
+ def get_primary_creds(self):
+ return self.get_credentials('primary')
+
+ def get_admin_creds(self):
+ return self.get_credentials('admin')
+
+ def get_alt_creds(self):
+ return self.get_credentials('alt')
+
+ def get_creds_by_roles(self, roles, force_new=False):
+ roles = list(set(roles))
+ # The roles list as a str will become the index as the dict key for
+ # the created credentials set in the isolated_creds dict.
+ exist_creds = self.isolated_creds.get(str(roles))
+ # If force_new flag is True 2 cred sets with the same roles are needed
+ # handle this by creating a separate index for old one to store it
+ # separately for cleanup
+ if exist_creds and force_new:
+ new_index = str(roles) + '-' + str(len(self.isolated_creds))
+ self.isolated_creds[new_index] = exist_creds
+ del self.isolated_creds[str(roles)]
+ # Handle isolated neutron resouces if they exist too
+ if CONF.service_available.neutron:
+ exist_net = self.isolated_net_resources.get(str(roles))
+ if exist_net:
+ self.isolated_net_resources[new_index] = exist_net
+ del self.isolated_net_resources[str(roles)]
+ return self.get_credentials(roles)
+
+ def _clear_isolated_router(self, router_id, router_name):
+ net_client = self.network_admin_client
+ try:
+ net_client.delete_router(router_id)
+ except lib_exc.NotFound:
+ LOG.warn('router with name: %s not found for delete' %
+ router_name)
+
+ def _clear_isolated_subnet(self, subnet_id, subnet_name):
+ net_client = self.network_admin_client
+ try:
+ net_client.delete_subnet(subnet_id)
+ except lib_exc.NotFound:
+ LOG.warn('subnet with name: %s not found for delete' %
+ subnet_name)
+
+ def _clear_isolated_network(self, network_id, network_name):
+ net_client = self.network_admin_client
+ try:
+ net_client.delete_network(network_id)
+ except lib_exc.NotFound:
+ LOG.warn('network with name: %s not found for delete' %
+ network_name)
+
+ def _cleanup_default_secgroup(self, tenant):
+ net_client = self.network_admin_client
+ resp_body = net_client.list_security_groups(tenant_id=tenant,
+ name="default")
+ secgroups_to_delete = resp_body['security_groups']
+ for secgroup in secgroups_to_delete:
+ try:
+ net_client.delete_security_group(secgroup['id'])
+ except lib_exc.NotFound:
+ LOG.warn('Security group %s, id %s not found for clean-up' %
+ (secgroup['name'], secgroup['id']))
+
+ def _clear_isolated_net_resources(self):
+ net_client = self.network_admin_client
+ for cred in self.isolated_net_resources:
+ network, subnet, router = self.isolated_net_resources.get(cred)
+ LOG.debug("Clearing network: %(network)s, "
+ "subnet: %(subnet)s, router: %(router)s",
+ {'network': network, 'subnet': subnet, 'router': router})
+ if (not self.network_resources or
+ self.network_resources.get('router')):
+ try:
+ net_client.remove_router_interface_with_subnet_id(
+ router['id'], subnet['id'])
+ except lib_exc.NotFound:
+ LOG.warn('router with name: %s not found for delete' %
+ router['name'])
+ self._clear_isolated_router(router['id'], router['name'])
+ if (not self.network_resources or
+ self.network_resources.get('subnet')):
+ self._clear_isolated_subnet(subnet['id'], subnet['name'])
+ if (not self.network_resources or
+ self.network_resources.get('network')):
+ self._clear_isolated_network(network['id'], network['name'])
+ self.isolated_net_resources = {}
+
+ def clear_isolated_creds(self):
+ if not self.isolated_creds:
+ return
+ self._clear_isolated_net_resources()
+ for creds in self.isolated_creds.itervalues():
+ try:
+ self._delete_user(creds.user_id)
+ except lib_exc.NotFound:
+ LOG.warn("user with name: %s not found for delete" %
+ creds.username)
+ try:
+ self._delete_tenant(creds.tenant_id)
+ except lib_exc.NotFound:
+ LOG.warn("tenant with name: %s not found for delete" %
+ creds.tenant_name)
+ self.isolated_creds = {}
+
+ def is_multi_user(self):
+ return True
+
+ def is_multi_tenant(self):
+ return True
+
+ def is_role_available(self, role):
+ return True
diff --git a/neutron/tests/tempest/common/negative_rest_client.py b/neutron/tests/tempest/common/negative_rest_client.py
new file mode 100644
index 0000000..9058516
--- /dev/null
+++ b/neutron/tests/tempest/common/negative_rest_client.py
@@ -0,0 +1,71 @@
+# (c) 2014 Deutsche Telekom AG
+# Copyright 2014 Red Hat, Inc.
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from neutron.tests.tempest.common import service_client
+from neutron.tests.tempest import config
+
+CONF = config.CONF
+
+
+class NegativeRestClient(service_client.ServiceClient):
+ """
+ Version of RestClient that does not raise exceptions.
+ """
+ def __init__(self, auth_provider, service):
+ region = self._get_region(service)
+ super(NegativeRestClient, self).__init__(auth_provider,
+ service, region)
+
+ def _get_region(self, service):
+ """
+ Returns the region for a specific service
+ """
+ service_region = None
+ for cfgname in dir(CONF._config):
+ # Find all config.FOO.catalog_type and assume FOO is a service.
+ cfg = getattr(CONF, cfgname)
+ catalog_type = getattr(cfg, 'catalog_type', None)
+ if catalog_type == service:
+ service_region = getattr(cfg, 'region', None)
+ if not service_region:
+ service_region = CONF.identity.region
+ return service_region
+
+ def _error_checker(self, method, url,
+ headers, body, resp, resp_body):
+ pass
+
+ def send_request(self, method, url_template, resources, body=None):
+ url = url_template % tuple(resources)
+ if method == "GET":
+ resp, body = self.get(url)
+ elif method == "POST":
+ resp, body = self.post(url, body)
+ elif method == "PUT":
+ resp, body = self.put(url, body)
+ elif method == "PATCH":
+ resp, body = self.patch(url, body)
+ elif method == "HEAD":
+ resp, body = self.head(url)
+ elif method == "DELETE":
+ resp, body = self.delete(url)
+ elif method == "COPY":
+ resp, body = self.copy(url)
+ else:
+ assert False
+
+ return resp, body
diff --git a/neutron/tests/tempest/common/service_client.py b/neutron/tests/tempest/common/service_client.py
new file mode 100644
index 0000000..ed19e89
--- /dev/null
+++ b/neutron/tests/tempest/common/service_client.py
@@ -0,0 +1,93 @@
+# Copyright 2015 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest_lib.common import rest_client
+
+from neutron.tests.tempest import config
+
+CONF = config.CONF
+
+
+class ServiceClient(rest_client.RestClient):
+
+ def __init__(self, auth_provider, service, region,
+ endpoint_type=None, build_interval=None, build_timeout=None,
+ disable_ssl_certificate_validation=None, ca_certs=None,
+ trace_requests=None):
+
+ # TODO(oomichi): This params setting should be removed after all
+ # service clients pass these values, and we can make ServiceClient
+ # free from CONF values.
+ dscv = (disable_ssl_certificate_validation or
+ CONF.identity.disable_ssl_certificate_validation)
+ params = {
+ 'disable_ssl_certificate_validation': dscv,
+ 'ca_certs': ca_certs or CONF.identity.ca_certificates_file,
+ 'trace_requests': trace_requests or CONF.debug.trace_requests
+ }
+
+ if endpoint_type is not None:
+ params.update({'endpoint_type': endpoint_type})
+ if build_interval is not None:
+ params.update({'build_interval': build_interval})
+ if build_timeout is not None:
+ params.update({'build_timeout': build_timeout})
+ super(ServiceClient, self).__init__(auth_provider, service, region,
+ **params)
+
+
+class ResponseBody(dict):
+ """Class that wraps an http response and dict body into a single value.
+
+ Callers that receive this object will normally use it as a dict but
+ can extract the response if needed.
+ """
+
+ def __init__(self, response, body=None):
+ body_data = body or {}
+ self.update(body_data)
+ self.response = response
+
+ def __str__(self):
+ body = super(ResponseBody, self).__str__()
+ return "response: %s\nBody: %s" % (self.response, body)
+
+
+class ResponseBodyData(object):
+ """Class that wraps an http response and string data into a single value.
+ """
+
+ def __init__(self, response, data):
+ self.response = response
+ self.data = data
+
+ def __str__(self):
+ return "response: %s\nBody: %s" % (self.response, self.data)
+
+
+class ResponseBodyList(list):
+ """Class that wraps an http response and list body into a single value.
+
+ Callers that receive this object will normally use it as a list but
+ can extract the response if needed.
+ """
+
+ def __init__(self, response, body=None):
+ body_data = body or []
+ self.extend(body_data)
+ self.response = response
+
+ def __str__(self):
+ body = super(ResponseBodyList, self).__str__()
+ return "response: %s\nBody: %s" % (self.response, body)
diff --git a/neutron/tests/tempest/common/ssh.py b/neutron/tests/tempest/common/ssh.py
new file mode 100644
index 0000000..de1ad88
--- /dev/null
+++ b/neutron/tests/tempest/common/ssh.py
@@ -0,0 +1,152 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import cStringIO
+import select
+import socket
+import time
+import warnings
+
+import six
+
+from neutron.tests.tempest import exceptions
+from neutron.openstack.common import log as logging
+
+
+with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ import paramiko
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Client(object):
+
+ def __init__(self, host, username, password=None, timeout=300, pkey=None,
+ channel_timeout=10, look_for_keys=False, key_filename=None):
+ self.host = host
+ self.username = username
+ self.password = password
+ if isinstance(pkey, six.string_types):
+ pkey = paramiko.RSAKey.from_private_key(
+ cStringIO.StringIO(str(pkey)))
+ self.pkey = pkey
+ self.look_for_keys = look_for_keys
+ self.key_filename = key_filename
+ self.timeout = int(timeout)
+ self.channel_timeout = float(channel_timeout)
+ self.buf_size = 1024
+
+ def _get_ssh_connection(self, sleep=1.5, backoff=1):
+ """Returns an ssh connection to the specified host."""
+ bsleep = sleep
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(
+ paramiko.AutoAddPolicy())
+ _start_time = time.time()
+ if self.pkey is not None:
+ LOG.info("Creating ssh connection to '%s' as '%s'"
+ " with public key authentication",
+ self.host, self.username)
+ else:
+ LOG.info("Creating ssh connection to '%s' as '%s'"
+ " with password %s",
+ self.host, self.username, str(self.password))
+ attempts = 0
+ while True:
+ try:
+ ssh.connect(self.host, username=self.username,
+ password=self.password,
+ look_for_keys=self.look_for_keys,
+ key_filename=self.key_filename,
+ timeout=self.channel_timeout, pkey=self.pkey)
+ LOG.info("ssh connection to %s@%s successfuly created",
+ self.username, self.host)
+ return ssh
+ except (socket.error,
+ paramiko.SSHException) as e:
+ if self._is_timed_out(_start_time):
+ LOG.exception("Failed to establish authenticated ssh"
+ " connection to %s@%s after %d attempts",
+ self.username, self.host, attempts)
+ raise exceptions.SSHTimeout(host=self.host,
+ user=self.username,
+ password=self.password)
+ bsleep += backoff
+ attempts += 1
+ LOG.warning("Failed to establish authenticated ssh"
+ " connection to %s@%s (%s). Number attempts: %s."
+ " Retry after %d seconds.",
+ self.username, self.host, e, attempts, bsleep)
+ time.sleep(bsleep)
+
+ def _is_timed_out(self, start_time):
+ return (time.time() - self.timeout) > start_time
+
+ def exec_command(self, cmd):
+ """
+ Execute the specified command on the server.
+
+ Note that this method is reading whole command outputs to memory, thus
+ shouldn't be used for large outputs.
+
+ :returns: data read from standard output of the command.
+ :raises: SSHExecCommandFailed if command returns nonzero
+ status. The exception contains command status stderr content.
+ """
+ ssh = self._get_ssh_connection()
+ transport = ssh.get_transport()
+ channel = transport.open_session()
+ channel.fileno() # Register event pipe
+ channel.exec_command(cmd)
+ channel.shutdown_write()
+ out_data = []
+ err_data = []
+ poll = select.poll()
+ poll.register(channel, select.POLLIN)
+ start_time = time.time()
+
+ while True:
+ ready = poll.poll(self.channel_timeout)
+ if not any(ready):
+ if not self._is_timed_out(start_time):
+ continue
+ raise exceptions.TimeoutException(
+ "Command: '{0}' executed on host '{1}'.".format(
+ cmd, self.host))
+ if not ready[0]: # If there is nothing to read.
+ continue
+ out_chunk = err_chunk = None
+ if channel.recv_ready():
+ out_chunk = channel.recv(self.buf_size)
+ out_data += out_chunk,
+ if channel.recv_stderr_ready():
+ err_chunk = channel.recv_stderr(self.buf_size)
+ err_data += err_chunk,
+ if channel.closed and not err_chunk and not out_chunk:
+ break
+ exit_status = channel.recv_exit_status()
+ if 0 != exit_status:
+ raise exceptions.SSHExecCommandFailed(
+ command=cmd, exit_status=exit_status,
+ strerror=''.join(err_data))
+ return ''.join(out_data)
+
+ def test_connection_auth(self):
+ """Raises an exception when we can not connect to server via ssh."""
+ connection = self._get_ssh_connection()
+ connection.close()
diff --git a/neutron/tests/tempest/common/tempest_fixtures.py b/neutron/tests/tempest/common/tempest_fixtures.py
new file mode 100644
index 0000000..5c66cc6
--- /dev/null
+++ b/neutron/tests/tempest/common/tempest_fixtures.py
@@ -0,0 +1,21 @@
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from neutron.openstack.common.fixture import lockutils
+
+
+class LockFixture(lockutils.LockFixture):
+ def __init__(self, name):
+ super(LockFixture, self).__init__(name, 'tempest-')
diff --git a/neutron/tests/tempest/common/utils/__init__.py b/neutron/tests/tempest/common/utils/__init__.py
new file mode 100644
index 0000000..04d898d
--- /dev/null
+++ b/neutron/tests/tempest/common/utils/__init__.py
@@ -0,0 +1,3 @@
+PING_IPV4_COMMAND = 'ping -c 3 '
+PING_IPV6_COMMAND = 'ping6 -c 3 '
+PING_PACKET_LOSS_REGEX = '(\d{1,3})\.?\d*\% packet loss'
diff --git a/neutron/tests/tempest/common/utils/data_utils.py b/neutron/tests/tempest/common/utils/data_utils.py
new file mode 100644
index 0000000..d441778
--- /dev/null
+++ b/neutron/tests/tempest/common/utils/data_utils.py
@@ -0,0 +1,101 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import itertools
+import netaddr
+import random
+import uuid
+
+
+def rand_uuid():
+ return str(uuid.uuid4())
+
+
+def rand_uuid_hex():
+ return uuid.uuid4().hex
+
+
+def rand_name(name=''):
+ randbits = str(random.randint(1, 0x7fffffff))
+ if name:
+ return name + '-' + randbits
+ else:
+ return randbits
+
+
+def rand_url():
+ randbits = str(random.randint(1, 0x7fffffff))
+ return 'https://url-' + randbits + '.com'
+
+
+def rand_int_id(start=0, end=0x7fffffff):
+ return random.randint(start, end)
+
+
+def rand_mac_address():
+ """Generate an Ethernet MAC address."""
+ # NOTE(vish): We would prefer to use 0xfe here to ensure that linux
+ # bridge mac addresses don't change, but it appears to
+ # conflict with libvirt, so we use the next highest octet
+ # that has the unicast and locally administered bits set
+ # properly: 0xfa.
+ # Discussion: https://bugs.launchpad.net/nova/+bug/921838
+ mac = [0xfa, 0x16, 0x3e,
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff)]
+ return ':'.join(["%02x" % x for x in mac])
+
+
+def parse_image_id(image_ref):
+ """Return the image id from a given image ref."""
+ return image_ref.rsplit('/')[-1]
+
+
+def arbitrary_string(size=4, base_text=None):
+ """
+ Return size characters from base_text, repeating the base_text infinitely
+ if needed.
+ """
+ if not base_text:
+ base_text = 'test'
+ return ''.join(itertools.islice(itertools.cycle(base_text), size))
+
+
+def random_bytes(size=1024):
+ """
+ Return size randomly selected bytes as a string.
+ """
+ return ''.join([chr(random.randint(0, 255))
+ for i in range(size)])
+
+
+def get_ipv6_addr_by_EUI64(cidr, mac):
+ # Check if the prefix is IPv4 address
+ is_ipv4 = netaddr.valid_ipv4(cidr)
+ if is_ipv4:
+ msg = "Unable to generate IP address by EUI64 for IPv4 prefix"
+ raise TypeError(msg)
+ try:
+ eui64 = int(netaddr.EUI(mac).eui64())
+ prefix = netaddr.IPNetwork(cidr)
+ return netaddr.IPAddress(prefix.first + eui64 ^ (1 << 57))
+ except (ValueError, netaddr.AddrFormatError):
+ raise TypeError('Bad prefix or mac format for generating IPv6 '
+ 'address by EUI-64: %(prefix)s, %(mac)s:'
+ % {'prefix': cidr, 'mac': mac})
+ except TypeError:
+ raise TypeError('Bad prefix type for generate IPv6 address by '
+ 'EUI-64: %s' % cidr)
diff --git a/neutron/tests/tempest/common/utils/file_utils.py b/neutron/tests/tempest/common/utils/file_utils.py
new file mode 100644
index 0000000..43083f4
--- /dev/null
+++ b/neutron/tests/tempest/common/utils/file_utils.py
@@ -0,0 +1,23 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+def have_effective_read_access(path):
+ try:
+ fh = open(path, "rb")
+ except IOError:
+ return False
+ fh.close()
+ return True
diff --git a/neutron/tests/tempest/common/utils/misc.py b/neutron/tests/tempest/common/utils/misc.py
new file mode 100644
index 0000000..cc0004d
--- /dev/null
+++ b/neutron/tests/tempest/common/utils/misc.py
@@ -0,0 +1,87 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import inspect
+import re
+
+from neutron.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def singleton(cls):
+ """Simple wrapper for classes that should only have a single instance."""
+ instances = {}
+
+ def getinstance():
+ if cls not in instances:
+ instances[cls] = cls()
+ return instances[cls]
+ return getinstance
+
+
+def find_test_caller():
+ """Find the caller class and test name.
+
+ Because we know that the interesting things that call us are
+ test_* methods, and various kinds of setUp / tearDown, we
+ can look through the call stack to find appropriate methods,
+ and the class we were in when those were called.
+ """
+ caller_name = None
+ names = []
+ frame = inspect.currentframe()
+ is_cleanup = False
+ # Start climbing the ladder until we hit a good method
+ while True:
+ try:
+ frame = frame.f_back
+ name = frame.f_code.co_name
+ names.append(name)
+ if re.search("^(test_|setUp|tearDown)", name):
+ cname = ""
+ if 'self' in frame.f_locals:
+ cname = frame.f_locals['self'].__class__.__name__
+ if 'cls' in frame.f_locals:
+ cname = frame.f_locals['cls'].__name__
+ caller_name = cname + ":" + name
+ break
+ elif re.search("^_run_cleanup", name):
+ is_cleanup = True
+ elif name == 'main':
+ caller_name = 'main'
+ break
+ else:
+ cname = ""
+ if 'self' in frame.f_locals:
+ cname = frame.f_locals['self'].__class__.__name__
+ if 'cls' in frame.f_locals:
+ cname = frame.f_locals['cls'].__name__
+
+ # the fact that we are running cleanups is indicated pretty
+ # deep in the stack, so if we see that we want to just
+ # start looking for a real class name, and declare victory
+ # once we do.
+ if is_cleanup and cname:
+ if not re.search("^RunTest", cname):
+ caller_name = cname + ":_run_cleanups"
+ break
+ except Exception:
+ break
+ # prevents frame leaks
+ del frame
+ if caller_name is None:
+ LOG.debug("Sane call name not found in %s" % names)
+ return caller_name
diff --git a/neutron/tests/tempest/common/waiters.py b/neutron/tests/tempest/common/waiters.py
new file mode 100644
index 0000000..b54ef73
--- /dev/null
+++ b/neutron/tests/tempest/common/waiters.py
@@ -0,0 +1,160 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import time
+
+from neutron.tests.tempest.common.utils import misc as misc_utils
+from neutron.tests.tempest import config
+from neutron.tests.tempest import exceptions
+from neutron.openstack.common import log as logging
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+# NOTE(afazekas): This function needs to know a token and a subject.
+def wait_for_server_status(client, server_id, status, ready_wait=True,
+ extra_timeout=0, raise_on_error=True):
+ """Waits for a server to reach a given status."""
+
+ def _get_task_state(body):
+ return body.get('OS-EXT-STS:task_state', None)
+
+ # NOTE(afazekas): UNKNOWN status possible on ERROR
+ # or in a very early stage.
+ body = client.get_server(server_id)
+ old_status = server_status = body['status']
+ old_task_state = task_state = _get_task_state(body)
+ start_time = int(time.time())
+ timeout = client.build_timeout + extra_timeout
+ while True:
+ # NOTE(afazekas): Now the BUILD status only reached
+ # between the UNKNOWN->ACTIVE transition.
+ # TODO(afazekas): enumerate and validate the stable status set
+ if status == 'BUILD' and server_status != 'UNKNOWN':
+ return
+ if server_status == status:
+ if ready_wait:
+ if status == 'BUILD':
+ return
+ # NOTE(afazekas): The instance is in "ready for action state"
+ # when no task in progress
+ # NOTE(afazekas): Converted to string bacuse of the XML
+ # responses
+ if str(task_state) == "None":
+ # without state api extension 3 sec usually enough
+ time.sleep(CONF.compute.ready_wait)
+ return
+ else:
+ return
+
+ time.sleep(client.build_interval)
+ body = client.get_server(server_id)
+ server_status = body['status']
+ task_state = _get_task_state(body)
+ if (server_status != old_status) or (task_state != old_task_state):
+ LOG.info('State transition "%s" ==> "%s" after %d second wait',
+ '/'.join((old_status, str(old_task_state))),
+ '/'.join((server_status, str(task_state))),
+ time.time() - start_time)
+ if (server_status == 'ERROR') and raise_on_error:
+ if 'fault' in body:
+ raise exceptions.BuildErrorException(body['fault'],
+ server_id=server_id)
+ else:
+ raise exceptions.BuildErrorException(server_id=server_id)
+
+ timed_out = int(time.time()) - start_time >= timeout
+
+ if timed_out:
+ expected_task_state = 'None' if ready_wait else 'n/a'
+ message = ('Server %(server_id)s failed to reach %(status)s '
+ 'status and task state "%(expected_task_state)s" '
+ 'within the required time (%(timeout)s s).' %
+ {'server_id': server_id,
+ 'status': status,
+ 'expected_task_state': expected_task_state,
+ 'timeout': timeout})
+ message += ' Current status: %s.' % server_status
+ message += ' Current task state: %s.' % task_state
+ caller = misc_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
+ raise exceptions.TimeoutException(message)
+ old_status = server_status
+ old_task_state = task_state
+
+
+def wait_for_image_status(client, image_id, status):
+ """Waits for an image to reach a given status.
+
+ The client should have a get_image(image_id) method to get the image.
+ The client should also have build_interval and build_timeout attributes.
+ """
+ image = client.get_image(image_id)
+ start = int(time.time())
+
+ while image['status'] != status:
+ time.sleep(client.build_interval)
+ image = client.get_image(image_id)
+ status_curr = image['status']
+ if status_curr == 'ERROR':
+ raise exceptions.AddImageException(image_id=image_id)
+
+ # check the status again to avoid a false negative where we hit
+ # the timeout at the same time that the image reached the expected
+ # status
+ if status_curr == status:
+ return
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('Image %(image_id)s failed to reach %(status)s state'
+ '(current state %(status_curr)s) '
+ 'within the required time (%(timeout)s s).' %
+ {'image_id': image_id,
+ 'status': status,
+ 'status_curr': status_curr,
+ 'timeout': client.build_timeout})
+ caller = misc_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
+ raise exceptions.TimeoutException(message)
+
+
+def wait_for_bm_node_status(client, node_id, attr, status):
+ """Waits for a baremetal node attribute to reach given status.
+
+ The client should have a show_node(node_uuid) method to get the node.
+ """
+ _, node = client.show_node(node_id)
+ start = int(time.time())
+
+ while node[attr] != status:
+ time.sleep(client.build_interval)
+ _, node = client.show_node(node_id)
+ status_curr = node[attr]
+ if status_curr == status:
+ return
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s '
+ 'within the required time (%(timeout)s s).' %
+ {'node_id': node_id,
+ 'attr': attr,
+ 'status': status,
+ 'timeout': client.build_timeout})
+ message += ' Current state of %s: %s.' % (attr, status_curr)
+ caller = misc_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
+ raise exceptions.TimeoutException(message)