Merge "Fix to create an image with given min_ram value."
diff --git a/tempest/clients.py b/tempest/clients.py
index b3b5906..678b595 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -48,8 +48,11 @@
from tempest.services.compute.xml.servers_client import ServersClientXML
from tempest.services.compute.xml.volumes_extensions_client import \
VolumesExtensionsClientXML
+from tempest.services.identity.v3.json.endpoints_client import \
+ EndPointClientJSON
from tempest.services.identity.json.identity_client import IdentityClientJSON
from tempest.services.identity.json.identity_client import TokenClientJSON
+from tempest.services.identity.v3.xml.endpoints_client import EndPointClientXML
from tempest.services.identity.xml.identity_client import IdentityClientXML
from tempest.services.identity.xml.identity_client import TokenClientXML
from tempest.services.image.v1.json.image_client import ImageClientJSON
@@ -74,6 +77,8 @@
InterfacesClientJSON
from tempest.services.compute.xml.interfaces_client import \
InterfacesClientXML
+from tempest.services.compute.json.fixed_ips_client import FixedIPsClientJSON
+from tempest.services.compute.xml.fixed_ips_client import FixedIPsClientXML
LOG = logging.getLogger(__name__)
@@ -157,6 +162,16 @@
"xml": InterfacesClientXML,
}
+ENDPOINT_CLIENT = {
+ "json": EndPointClientJSON,
+ "xml": EndPointClientXML,
+}
+
+FIXED_IPS_CLIENT = {
+ "json": FixedIPsClientJSON,
+ "xml": FixedIPsClientXML
+}
+
class Manager(object):
@@ -219,6 +234,8 @@
self.security_groups_client = \
SECURITY_GROUPS_CLIENT[interface](*client_args)
self.interfaces_client = INTERFACES_CLIENT[interface](*client_args)
+ self.endpoints_client = ENDPOINT_CLIENT[interface](*client_args)
+ self.fixed_ips_client = FIXED_IPS_CLIENT[interface](*client_args)
except KeyError:
msg = "Unsupported interface type `%s'" % interface
raise exceptions.InvalidConfiguration(msg)
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 366e467..fba3b0f 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -179,6 +179,9 @@
def delete(self, url, headers=None):
return self.request('DELETE', url, headers)
+ def patch(self, url, body, headers):
+ return self.request('PATCH', url, headers, body)
+
def put(self, url, body, headers):
return self.request('PUT', url, headers, body)
diff --git a/tempest/services/compute/json/fixed_ips_client.py b/tempest/services/compute/json/fixed_ips_client.py
new file mode 100644
index 0000000..4ef7c4c
--- /dev/null
+++ b/tempest/services/compute/json/fixed_ips_client.py
@@ -0,0 +1,40 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from tempest.common.rest_client import RestClient
+
+
+class FixedIPsClientJSON(RestClient):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(FixedIPsClientJSON, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.compute.catalog_type
+
+ def get_fixed_ip_details(self, fixed_ip):
+ url = "os-fixed-ips/%s" % (fixed_ip)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['fixed_ip']
+
+ def reserve_fixed_ip(self, ip, body):
+ """This reserves and unreserves fixed ips."""
+ url = "os-fixed-ips/%s/action" % (ip)
+ resp, body = self.post(url, json.dumps(body), self.headers)
+ return resp, body
diff --git a/tempest/services/compute/json/quotas_client.py b/tempest/services/compute/json/quotas_client.py
index 37d4131..5b1e48f 100644
--- a/tempest/services/compute/json/quotas_client.py
+++ b/tempest/services/compute/json/quotas_client.py
@@ -35,6 +35,14 @@
body = json.loads(body)
return resp, body['quota_set']
+ def get_default_quota_set(self, tenant_id):
+ """List the default quota set for a tenant."""
+
+ url = 'os-quota-sets/%s/defaults' % str(tenant_id)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['quota_set']
+
def update_quota_set(self, tenant_id, injected_file_content_bytes=None,
metadata_items=None, ram=None, floating_ips=None,
fixed_ips=None, key_pairs=None, instances=None,
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index bc9d9bd..9e71f3d 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -52,6 +52,7 @@
min_count: Count of minimum number of instances to launch.
max_count: Count of maximum number of instances to launch.
disk_config: Determines if user or admin controls disk configuration.
+ return_reservation_id: Enable/Disable the return of reservation id
"""
post_body = {
'name': name,
@@ -63,7 +64,8 @@
'security_groups', 'networks', 'user_data',
'availability_zone', 'accessIPv4', 'accessIPv6',
'min_count', 'max_count', ('metadata', 'meta'),
- ('OS-DCF:diskConfig', 'disk_config')]:
+ ('OS-DCF:diskConfig', 'disk_config'),
+ 'return_reservation_id']:
if isinstance(option, tuple):
post_param = option[0]
key = option[1]
@@ -77,6 +79,10 @@
resp, body = self.post('servers', post_body, self.headers)
body = json.loads(body)
+ # NOTE(maurosr): this deals with the case of multiple server create
+ # with return reservation id set True
+ if 'reservation_id' in body:
+ return resp, body
return resp, body['server']
def update_server(self, server_id, name=None, meta=None, accessIPv4=None,
diff --git a/tempest/services/compute/xml/fixed_ips_client.py b/tempest/services/compute/xml/fixed_ips_client.py
new file mode 100644
index 0000000..ef023f0
--- /dev/null
+++ b/tempest/services/compute/xml/fixed_ips_client.py
@@ -0,0 +1,54 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+
+from tempest.common.rest_client import RestClientXML
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import Text
+from tempest.services.compute.xml.common import xml_to_json
+
+
+class FixedIPsClientXML(RestClientXML):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(FixedIPsClientXML, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.compute.catalog_type
+
+ def _parse_fixed_ip_details(self, body):
+ body = xml_to_json(etree.fromstring(body))
+ return body
+
+ def get_fixed_ip_details(self, fixed_ip):
+ url = "os-fixed-ips/%s" % (fixed_ip)
+ resp, body = self.get(url, self.headers)
+ body = self._parse_resp(body)
+ return resp, body
+
+ def reserve_fixed_ip(self, ip, body):
+ """This reserves and unreserves fixed ips."""
+ url = "os-fixed-ips/%s/action" % (ip)
+ # NOTE(maurosr): First converts the dict body to a json string then
+ # accept any action key value here to permit tests to cover cases with
+ # invalid actions raising badrequest.
+ key, value = body.popitem()
+ xml_body = Element(key)
+ xml_body.append(Text(value))
+ resp, body = self.post(url, str(Document(xml_body)), self.headers)
+ return resp, body
diff --git a/tempest/services/compute/xml/quotas_client.py b/tempest/services/compute/xml/quotas_client.py
index 20e04b4..8912443 100644
--- a/tempest/services/compute/xml/quotas_client.py
+++ b/tempest/services/compute/xml/quotas_client.py
@@ -55,6 +55,15 @@
body = self._format_quota(body)
return resp, body
+ def get_default_quota_set(self, tenant_id):
+ """List the default quota set for a tenant."""
+
+ url = 'os-quota-sets/%s/defaults' % str(tenant_id)
+ resp, body = self.get(url, self.headers)
+ body = xml_to_json(etree.fromstring(body))
+ body = self._format_quota(body)
+ return resp, body
+
def update_quota_set(self, tenant_id, injected_file_content_bytes=None,
metadata_items=None, ram=None, floating_ips=None,
fixed_ips=None, key_pairs=None, instances=None,
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index e6c2a6c..331d560 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -42,6 +42,13 @@
version = ip.get('version')
if version:
ip['version'] = int(version)
+ # NOTE(maurosr): just a fast way to avoid the xml version with the
+ # expanded xml namespace.
+ type_ns_prefix = ('{http://docs.openstack.org/compute/ext/extended_ips/'
+ 'api/v1.1}type')
+ if type_ns_prefix in ip:
+ ip['OS-EXT-IPS:type'] = ip[type_ns_prefix]
+ ip.pop(type_ns_prefix)
return ip
@@ -235,7 +242,8 @@
name=name)
for attr in ["adminPass", "accessIPv4", "accessIPv6", "key_name",
- "user_data", "availability_zone"]:
+ "user_data", "availability_zone", "min_count",
+ "max_count", "return_reservation_id"]:
if attr in kwargs:
server.add_attr(attr, kwargs[attr])
diff --git a/tempest/services/identity/v3/__init__.py b/tempest/services/identity/v3/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/services/identity/v3/__init__.py
diff --git a/tempest/services/identity/v3/json/__init__.py b/tempest/services/identity/v3/json/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/services/identity/v3/json/__init__.py
diff --git a/tempest/services/identity/v3/json/endpoints_client.py b/tempest/services/identity/v3/json/endpoints_client.py
new file mode 100755
index 0000000..3cb8f90
--- /dev/null
+++ b/tempest/services/identity/v3/json/endpoints_client.py
@@ -0,0 +1,87 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+from urlparse import urlparse
+
+from tempest.common.rest_client import RestClient
+
+
+class EndPointClientJSON(RestClient):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(EndPointClientJSON, self).__init__(config,
+ username, password,
+ auth_url, tenant_name)
+ self.service = self.config.identity.catalog_type
+ self.endpoint_url = 'adminURL'
+
+ def request(self, method, url, headers=None, body=None, wait=None):
+ """Overriding the existing HTTP request in super class rest_client."""
+ self._set_auth()
+ self.base_url = self.base_url.replace(urlparse(self.base_url).path,
+ "/v3")
+ return super(EndPointClientJSON, self).request(method, url,
+ headers=headers,
+ body=body)
+
+ def list_endpoints(self):
+ """GET endpoints."""
+ resp, body = self.get('endpoints')
+ body = json.loads(body)
+ return resp, body['endpoints']
+
+ def create_endpoint(self, service_id, interface, url, **kwargs):
+ """Create endpoint."""
+ region = kwargs.get('region', None)
+ enabled = kwargs.get('enabled', None)
+ post_body = {
+ 'service_id': service_id,
+ 'interface': interface,
+ 'url': url,
+ 'region': region,
+ 'enabled': enabled
+ }
+ post_body = json.dumps({'endpoint': post_body})
+ resp, body = self.post('endpoints', post_body, self.headers)
+ body = json.loads(body)
+ return resp, body['endpoint']
+
+ def update_endpoint(self, endpoint_id, service_id=None, interface=None,
+ url=None, region=None, enabled=None):
+ """Updates an endpoint with given parameters."""
+ post_body = {}
+ if service_id is not None:
+ post_body['service_id'] = service_id
+ if interface is not None:
+ post_body['interface'] = interface
+ if url is not None:
+ post_body['url'] = url
+ if region is not None:
+ post_body['region'] = region
+ if enabled is not None:
+ post_body['enabled'] = enabled
+ post_body = json.dumps({'endpoint': post_body})
+ resp, body = self.patch('endpoints/%s' % endpoint_id, post_body,
+ self.headers)
+ body = json.loads(body)
+ return resp, body['endpoint']
+
+ def delete_endpoint(self, endpoint_id):
+ """Delete endpoint."""
+ resp_header, resp_body = self.delete('endpoints/%s' % endpoint_id)
+ return resp_header, resp_body
diff --git a/tempest/services/identity/v3/xml/__init__.py b/tempest/services/identity/v3/xml/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/services/identity/v3/xml/__init__.py
diff --git a/tempest/services/identity/v3/xml/endpoints_client.py b/tempest/services/identity/v3/xml/endpoints_client.py
new file mode 100755
index 0000000..8400976
--- /dev/null
+++ b/tempest/services/identity/v3/xml/endpoints_client.py
@@ -0,0 +1,107 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from urlparse import urlparse
+
+import httplib2
+from lxml import etree
+
+from tempest.common.rest_client import RestClientXML
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import xml_to_json
+
+XMLNS = "http://docs.openstack.org/identity/api/v3"
+
+
+class EndPointClientXML(RestClientXML):
+
+ def __init__(self, config, username, password, auth_url, tenant_name=None):
+ super(EndPointClientXML, self).__init__(config, username, password,
+ auth_url, tenant_name)
+ self.service = self.config.identity.catalog_type
+ self.endpoint_url = 'adminURL'
+
+ def _parse_array(self, node):
+ array = []
+ for child in node.getchildren():
+ tag_list = child.tag.split('}', 1)
+ if tag_list[1] == "endpoint":
+ array.append(xml_to_json(child))
+ return array
+
+ def _parse_body(self, body):
+ json = xml_to_json(body)
+ return json
+
+ def request(self, method, url, headers=None, body=None, wait=None):
+ """Overriding the existing HTTP request in super class RestClient."""
+ dscv = self.config.identity.disable_ssl_certificate_validation
+ self.http_obj = httplib2.Http(disable_ssl_certificate_validation=dscv)
+ self._set_auth()
+ self.base_url = self.base_url.replace(urlparse(self.base_url).path,
+ "/v3")
+ return super(EndPointClientXML, self).request(method, url,
+ headers=headers,
+ body=body)
+
+ def list_endpoints(self):
+ """Get the list of endpoints."""
+ resp, body = self.get("endpoints", self.headers)
+ body = self._parse_array(etree.fromstring(body))
+ return resp, body
+
+ def create_endpoint(self, service_id, interface, url, **kwargs):
+ """Create endpoint."""
+ region = kwargs.get('region', None)
+ enabled = kwargs.get('enabled', None)
+ create_endpoint = Element("endpoint",
+ xmlns=XMLNS,
+ service_id=service_id,
+ interface=interface,
+ url=url, region=region,
+ enabled=enabled)
+ resp, body = self.post('endpoints', str(Document(create_endpoint)),
+ self.headers)
+ body = self._parse_body(etree.fromstring(body))
+ return resp, body
+
+ def update_endpoint(self, endpoint_id, service_id=None, interface=None,
+ url=None, region=None, enabled=None):
+ """Updates an endpoint with given parameters."""
+ doc = Document()
+ endpoint = Element("endpoint")
+ doc.append(endpoint)
+
+ if service_id:
+ endpoint.add_attr("service_id", service_id)
+ if interface:
+ endpoint.add_attr("interface", interface)
+ if url:
+ endpoint.add_attr("url", url)
+ if region:
+ endpoint.add_attr("region", region)
+ if enabled is not None:
+ endpoint.add_attr("enabled", enabled)
+ resp, body = self.patch('endpoints/%s' % str(endpoint_id),
+ str(doc), self.headers)
+ body = self._parse_body(etree.fromstring(body))
+ return resp, body
+
+ def delete_endpoint(self, endpoint_id):
+ """Delete endpoint."""
+ resp_header, resp_body = self.delete('endpoints/%s' % endpoint_id)
+ return resp_header, resp_body
diff --git a/tempest/services/volume/json/volumes_client.py b/tempest/services/volume/json/volumes_client.py
index 6b0befd..87c0eba 100644
--- a/tempest/services/volume/json/volumes_client.py
+++ b/tempest/services/volume/json/volumes_client.py
@@ -72,6 +72,7 @@
metadata: A dictionary of values to be used as metadata.
volume_type: Optional Name of volume_type for the volume
snapshot_id: When specified the volume is created from this snapshot
+ imageRef: When specified the volume is created from this image
"""
post_body = {'size': size}
post_body.update(kwargs)
diff --git a/tempest/services/volume/xml/volumes_client.py b/tempest/services/volume/xml/volumes_client.py
index 6fd1397..8eda26b 100644
--- a/tempest/services/volume/xml/volumes_client.py
+++ b/tempest/services/volume/xml/volumes_client.py
@@ -100,6 +100,8 @@
:param volume_type: Optional Name of volume_type for the volume
:param snapshot_id: When specified the volume is created from
this snapshot
+ :param imageRef: When specified the volume is created from this
+ image
"""
#NOTE(afazekas): it should use a volume namespace
volume = Element("volume", xmlns=XMLNS_11, size=size)
diff --git a/tempest/smoke.py b/tempest/smoke.py
deleted file mode 100644
index 0d4043f..0000000
--- a/tempest/smoke.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack, LLC
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import logging
-
-from tempest import test
-
-LOG = logging.getLogger(__name__)
-
-
-class SmokeTest(object):
-
- """
- Base test case class mixin for "smoke tests"
-
- Smoke tests are tests that have the following characteristics:
-
- * Test basic operations of an API, typically in an order that
- a regular user would perform those operations
- * Test only the correct inputs and action paths -- no fuzz or
- random input data is sent, only valid inputs.
- * Use only the default client tool for calling an API
- """
- pass
-
-
-class DefaultClientSmokeTest(test.DefaultClientTest, SmokeTest):
-
- """
- Base smoke test case class that provides the default clients to
- access the various OpenStack APIs.
- """
-
- @classmethod
- def tearDownClass(cls):
- # NOTE(jaypipes): Because smoke tests are typically run in a specific
- # order, and because test methods in smoke tests generally create
- # resources in a particular order, we destroy resources in the reverse
- # order in which resources are added to the smoke test class object
- while cls.os_resources:
- thing = cls.os_resources.pop()
- LOG.debug("Deleting %r from shared resources of %s" %
- (thing, cls.__name__))
-
- try:
- # OpenStack resources are assumed to have a delete()
- # method which destroys the resource...
- thing.delete()
- except Exception as e:
- # If the resource is already missing, mission accomplished.
- if e.__class__.__name__ == 'NotFound':
- continue
- raise
-
- def is_deletion_complete():
- # Deletion testing is only required for objects whose
- # existence cannot be checked via retrieval.
- if isinstance(thing, dict):
- return True
- try:
- thing.get()
- except Exception as e:
- # Clients are expected to return an exception
- # called 'NotFound' if retrieval fails.
- if e.__class__.__name__ == 'NotFound':
- return True
- raise
- return False
-
- # Block until resource deletion has completed or timed-out
- test.call_until_true(is_deletion_complete, 10, 1)
diff --git a/tempest/test.py b/tempest/test.py
index e0639b6..ccb2251 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -115,42 +115,91 @@
return False
-class DefaultClientTest(TestCase):
+def status_timeout(things, thing_id, expected_status):
+ """
+ Given a thing and an expected status, do a loop, sleeping
+ for a configurable amount of time, checking for the
+ expected status to show. At any time, if the returned
+ status of the thing is ERROR, fail out.
+ """
+ def check_status():
+ # python-novaclient has resources available to its client
+ # that all implement a get() method taking an identifier
+ # for the singular resource to retrieve.
+ thing = things.get(thing_id)
+ new_status = thing.status
+ if new_status == 'ERROR':
+ self.fail("%s failed to get to expected status."
+ "In ERROR state."
+ % thing)
+ elif new_status == expected_status:
+ return True # All good.
+ LOG.debug("Waiting for %s to get to %s status. "
+ "Currently in %s status",
+ thing, expected_status, new_status)
+ conf = config.TempestConfig()
+ if not call_until_true(check_status,
+ conf.compute.build_timeout,
+ conf.compute.build_interval):
+ self.fail("Timed out waiting for thing %s to become %s"
+ % (thing_id, expected_status))
+
+
+class DefaultClientSmokeTest(TestCase):
"""
- Base test case class that provides the default clients to access
- the various OpenStack APIs.
+ Base smoke test case class that provides the default clients to
+ access the various OpenStack APIs.
+
+ Smoke tests are tests that have the following characteristics:
+
+ * Test basic operations of an API, typically in an order that
+ a regular user would perform those operations
+ * Test only the correct inputs and action paths -- no fuzz or
+ random input data is sent, only valid inputs.
+ * Use only the default client tool for calling an API
"""
manager_class = manager.DefaultClientManager
- def status_timeout(self, things, thing_id, expected_status):
- """
- Given a thing and an expected status, do a loop, sleeping
- for a configurable amount of time, checking for the
- expected status to show. At any time, if the returned
- status of the thing is ERROR, fail out.
- """
- def check_status():
- # python-novaclient has resources available to its client
- # that all implement a get() method taking an identifier
- # for the singular resource to retrieve.
- thing = things.get(thing_id)
- new_status = thing.status
- if new_status == 'ERROR':
- self.fail("%s failed to get to expected status."
- "In ERROR state."
- % thing)
- elif new_status == expected_status:
- return True # All good.
- LOG.debug("Waiting for %s to get to %s status. "
- "Currently in %s status",
- thing, expected_status, new_status)
- if not call_until_true(check_status,
- self.config.compute.build_timeout,
- self.config.compute.build_interval):
- self.fail("Timed out waiting for thing %s to become %s"
- % (thing_id, expected_status))
+ @classmethod
+ def tearDownClass(cls):
+ # NOTE(jaypipes): Because smoke tests are typically run in a specific
+ # order, and because test methods in smoke tests generally create
+ # resources in a particular order, we destroy resources in the reverse
+ # order in which resources are added to the smoke test class object
+ while cls.os_resources:
+ thing = cls.os_resources.pop()
+ LOG.debug("Deleting %r from shared resources of %s" %
+ (thing, cls.__name__))
+
+ try:
+ # OpenStack resources are assumed to have a delete()
+ # method which destroys the resource...
+ thing.delete()
+ except Exception as e:
+ # If the resource is already missing, mission accomplished.
+ if e.__class__.__name__ == 'NotFound':
+ continue
+ raise
+
+ def is_deletion_complete():
+ # Deletion testing is only required for objects whose
+ # existence cannot be checked via retrieval.
+ if isinstance(thing, dict):
+ return True
+ try:
+ thing.get()
+ except Exception as e:
+ # Clients are expected to return an exception
+ # called 'NotFound' if retrieval fails.
+ if e.__class__.__name__ == 'NotFound':
+ return True
+ raise
+ return False
+
+ # Block until resource deletion has completed or timed-out
+ call_until_true(is_deletion_complete, 10, 1)
class ComputeFuzzClientTest(TestCase):
@@ -161,46 +210,3 @@
"""
manager_class = manager.ComputeFuzzClientManager
-
- def status_timeout(self, client_get_method, thing_id, expected_status):
- """
- Given a method to get a resource and an expected status, do a loop,
- sleeping for a configurable amount of time, checking for the
- expected status to show. At any time, if the returned
- status of the thing is ERROR, fail out.
-
- :param client_get_method: The callable that will retrieve the thing
- with ID :param:thing_id
- :param thing_id: The ID of the thing to get
- :param expected_status: String value of the expected status of the
- thing that we are looking for.
-
- :code ..
-
- Usage:
-
- def test_some_server_action(self):
- client = self.servers_client
- resp, server = client.create_server('random_server')
- self.status_timeout(client.get_server, server['id'], 'ACTIVE')
- """
- def check_status():
- # Tempest REST client has resources available to its client
- # that all implement a various get_$resource() methods taking
- # an identifier for the singular resource to retrieve.
- thing = client_get_method(thing_id)
- new_status = thing['status']
- if new_status == 'ERROR':
- self.fail("%s failed to get to expected status."
- "In ERROR state."
- % thing)
- elif new_status == expected_status:
- return True # All good.
- LOG.debug("Waiting for %s to get to %s status. "
- "Currently in %s status",
- thing, expected_status, new_status)
- if not call_until_true(check_status,
- self.config.compute.build_timeout,
- self.config.compute.build_interval):
- self.fail("Timed out waiting for thing %s to become %s"
- % (thing_id, expected_status))
diff --git a/tempest/tests/compute/admin/test_fixed_ips.py b/tempest/tests/compute/admin/test_fixed_ips.py
new file mode 100644
index 0000000..d8b1359
--- /dev/null
+++ b/tempest/tests/compute/admin/test_fixed_ips.py
@@ -0,0 +1,108 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import exceptions
+from tempest.test import attr
+from tempest.tests.compute import base
+
+
+class FixedIPsBase(base.BaseComputeAdminTest):
+ _interface = 'json'
+ ip = None
+
+ @classmethod
+ def setUpClass(cls):
+ super(FixedIPsBase, cls).setUpClass()
+ # NOTE(maurosr): The idea here is: the server creation is just an
+ # auxiliary element to the ip details or reservation, there was no way
+ # (at least none in my mind) to get an valid and existing ip except
+ # by creating a server and using its ip. So the intention is to create
+ # fewer server possible (one) and use it to both: json and xml tests.
+ # This decreased time to run both tests, in my test machine, from 53
+ # secs to 29 (agains 23 secs when running only json tests)
+ if cls.ip is None:
+ cls.client = cls.os_adm.fixed_ips_client
+ cls.non_admin_client = cls.fixed_ips_client
+ resp, server = cls.create_server(wait_until='ACTIVE')
+ resp, server = cls.servers_client.get_server(server['id'])
+ for ip_set in server['addresses']:
+ for ip in server['addresses'][ip_set]:
+ if ip['OS-EXT-IPS:type'] == 'fixed':
+ cls.ip = ip['addr']
+ break
+ if cls.ip:
+ break
+
+
+class FixedIPsTestJson(FixedIPsBase):
+ _interface = 'json'
+
+ @attr(type='positive')
+ def test_list_fixed_ip_details(self):
+ resp, fixed_ip = self.client.get_fixed_ip_details(self.ip)
+ self.assertEqual(fixed_ip['address'], self.ip)
+
+ @attr(type='negative')
+ def test_list_fixed_ip_details_with_non_admin_user(self):
+ self.assertRaises(exceptions.Unauthorized,
+ self.non_admin_client.get_fixed_ip_details, self.ip)
+
+ @attr(type='positive')
+ def test_set_reserve(self):
+ body = {"reserve": "None"}
+ resp, body = self.client.reserve_fixed_ip(self.ip, body)
+ self.assertEqual(resp.status, 202)
+
+ @attr(type='positive')
+ def test_set_unreserve(self):
+ body = {"unreserve": "None"}
+ resp, body = self.client.reserve_fixed_ip(self.ip, body)
+ self.assertEqual(resp.status, 202)
+
+ @attr(type='negative')
+ def test_set_reserve_with_non_admin_user(self):
+ body = {"reserve": "None"}
+ self.assertRaises(exceptions.Unauthorized,
+ self.non_admin_client.reserve_fixed_ip,
+ self.ip, body)
+
+ @attr(type='negative')
+ def test_set_unreserve_with_non_admin_user(self):
+ body = {"unreserve": "None"}
+ self.assertRaises(exceptions.Unauthorized,
+ self.non_admin_client.reserve_fixed_ip,
+ self.ip, body)
+
+ @attr(type='negative')
+ def test_set_reserve_with_invalid_ip(self):
+ # NOTE(maurosr): since this exercises the same code snippet, we do it
+ # only for reserve action
+ body = {"reserve": "None"}
+ self.assertRaises(exceptions.NotFound,
+ self.client.reserve_fixed_ip,
+ "my.invalid.ip", body)
+
+ @attr(type='negative')
+ def test_fixed_ip_with_invalid_action(self):
+ body = {"invalid_action": "None"}
+ self.assertRaises(exceptions.BadRequest,
+ self.client.reserve_fixed_ip,
+ self.ip, body)
+
+
+class FixedIPsTestXml(FixedIPsTestJson):
+ _interface = 'xml'
diff --git a/tempest/tests/compute/admin/test_flavors.py b/tempest/tests/compute/admin/test_flavors.py
index 32b06f8..7957009 100644
--- a/tempest/tests/compute/admin/test_flavors.py
+++ b/tempest/tests/compute/admin/test_flavors.py
@@ -48,6 +48,11 @@
cls.swap = 1024
cls.rxtx = 2
+ def flavor_clean_up(self, flavor_id):
+ resp, body = self.client.delete_flavor(flavor_id)
+ self.assertEqual(resp.status, 202)
+ self.client.wait_for_resource_deletion(flavor_id)
+
@attr(type='positive')
def test_create_flavor(self):
# Create a flavor and ensure it is listed
@@ -55,43 +60,37 @@
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- try:
- #Create the flavor
- resp, flavor = self.client.create_flavor(flavor_name,
- self.ram, self.vcpus,
- self.disk,
- new_flavor_id,
- ephemeral=self.ephemeral,
- swap=self.swap,
- rxtx=self.rxtx)
- self.assertEqual(200, resp.status)
- self.assertEqual(flavor['name'], flavor_name)
- self.assertEqual(flavor['vcpus'], self.vcpus)
- self.assertEqual(flavor['disk'], self.disk)
- self.assertEqual(flavor['ram'], self.ram)
- self.assertEqual(int(flavor['id']), new_flavor_id)
- self.assertEqual(flavor['swap'], self.swap)
- self.assertEqual(flavor['rxtx_factor'], self.rxtx)
- self.assertEqual(flavor['OS-FLV-EXT-DATA:ephemeral'],
- self.ephemeral)
- if self._interface == "xml":
- XMLNS_OS_FLV_ACCESS = "http://docs.openstack.org/compute/ext/"\
- "flavor_access/api/v2"
- key = "{" + XMLNS_OS_FLV_ACCESS + "}is_public"
- self.assertEqual(flavor[key], "True")
- if self._interface == "json":
- self.assertEqual(flavor['os-flavor-access:is_public'], True)
+ #Create the flavor
+ resp, flavor = self.client.create_flavor(flavor_name,
+ self.ram, self.vcpus,
+ self.disk,
+ new_flavor_id,
+ ephemeral=self.ephemeral,
+ swap=self.swap,
+ rxtx=self.rxtx)
+ self.addCleanup(self.flavor_clean_up, flavor['id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(flavor['name'], flavor_name)
+ self.assertEqual(flavor['vcpus'], self.vcpus)
+ self.assertEqual(flavor['disk'], self.disk)
+ self.assertEqual(flavor['ram'], self.ram)
+ self.assertEqual(int(flavor['id']), new_flavor_id)
+ self.assertEqual(flavor['swap'], self.swap)
+ self.assertEqual(flavor['rxtx_factor'], self.rxtx)
+ self.assertEqual(flavor['OS-FLV-EXT-DATA:ephemeral'],
+ self.ephemeral)
+ if self._interface == "xml":
+ XMLNS_OS_FLV_ACCESS = "http://docs.openstack.org/compute/ext/"\
+ "flavor_access/api/v2"
+ key = "{" + XMLNS_OS_FLV_ACCESS + "}is_public"
+ self.assertEqual(flavor[key], "True")
+ if self._interface == "json":
+ self.assertEqual(flavor['os-flavor-access:is_public'], True)
- #Verify flavor is retrieved
- resp, flavor = self.client.get_flavor_details(new_flavor_id)
- self.assertEqual(resp.status, 200)
- self.assertEqual(flavor['name'], flavor_name)
-
- finally:
- #Delete the flavor
- resp, body = self.client.delete_flavor(new_flavor_id)
- self.assertEqual(resp.status, 202)
- self.client.wait_for_resource_deletion(new_flavor_id)
+ #Verify flavor is retrieved
+ resp, flavor = self.client.get_flavor_details(new_flavor_id)
+ self.assertEqual(resp.status, 200)
+ self.assertEqual(flavor['name'], flavor_name)
@attr(type='positive')
def test_create_flavor_verify_entry_in_list_details(self):
@@ -100,29 +99,23 @@
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- try:
- #Create the flavor
- resp, flavor = self.client.create_flavor(flavor_name,
- self.ram, self.vcpus,
- self.disk,
- new_flavor_id,
- ephemeral=self.ephemeral,
- swap=self.swap,
- rxtx=self.rxtx)
- flag = False
- #Verify flavor is retrieved
- resp, flavors = self.client.list_flavors_with_detail()
- self.assertEqual(resp.status, 200)
- for flavor in flavors:
- if flavor['name'] == flavor_name:
- flag = True
- self.assertTrue(flag)
-
- finally:
- #Delete the flavor
- resp, body = self.client.delete_flavor(new_flavor_id)
- self.assertEqual(resp.status, 202)
- self.client.wait_for_resource_deletion(new_flavor_id)
+ #Create the flavor
+ resp, flavor = self.client.create_flavor(flavor_name,
+ self.ram, self.vcpus,
+ self.disk,
+ new_flavor_id,
+ ephemeral=self.ephemeral,
+ swap=self.swap,
+ rxtx=self.rxtx)
+ self.addCleanup(self.flavor_clean_up, flavor['id'])
+ flag = False
+ #Verify flavor is retrieved
+ resp, flavors = self.client.list_flavors_with_detail()
+ self.assertEqual(resp.status, 200)
+ for flavor in flavors:
+ if flavor['name'] == flavor_name:
+ flag = True
+ self.assertTrue(flag)
@attr(type='negative')
def test_get_flavor_details_for_deleted_flavor(self):
@@ -138,11 +131,11 @@
ephemeral=self.ephemeral,
swap=self.swap,
rxtx=self.rxtx)
- self.assertEquals(200, resp.status)
-
# Delete the flavor
- resp, _ = self.client.delete_flavor(new_flavor_id)
- self.assertEqual(resp.status, 202)
+ new_flavor_id = flavor['id']
+ resp_delete, body = self.client.delete_flavor(new_flavor_id)
+ self.assertEquals(200, resp.status)
+ self.assertEquals(202, resp_delete.status)
# Deleted flavors can be seen via detailed GET
resp, flavor = self.client.get_flavor_details(new_flavor_id)
@@ -164,46 +157,40 @@
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- try:
- #Create the flavor
- resp, flavor = self.client.create_flavor(flavor_name,
- self.ram, self.vcpus,
- self.disk,
- new_flavor_id)
- self.assertEqual(200, resp.status)
- self.assertEqual(flavor['name'], flavor_name)
- self.assertEqual(flavor['ram'], self.ram)
- self.assertEqual(flavor['vcpus'], self.vcpus)
- self.assertEqual(flavor['disk'], self.disk)
- self.assertEqual(int(flavor['id']), new_flavor_id)
- self.assertEqual(flavor['swap'], '')
- self.assertEqual(int(flavor['rxtx_factor']), 1)
- self.assertEqual(int(flavor['OS-FLV-EXT-DATA:ephemeral']), 0)
- if self._interface == "xml":
- XMLNS_OS_FLV_ACCESS = "http://docs.openstack.org/compute/ext/"\
- "flavor_access/api/v2"
- key = "{" + XMLNS_OS_FLV_ACCESS + "}is_public"
- self.assertEqual(flavor[key], "True")
- if self._interface == "json":
- self.assertEqual(flavor['os-flavor-access:is_public'], True)
+ #Create the flavor
+ resp, flavor = self.client.create_flavor(flavor_name,
+ self.ram, self.vcpus,
+ self.disk,
+ new_flavor_id)
+ self.addCleanup(self.flavor_clean_up, flavor['id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(flavor['name'], flavor_name)
+ self.assertEqual(flavor['ram'], self.ram)
+ self.assertEqual(flavor['vcpus'], self.vcpus)
+ self.assertEqual(flavor['disk'], self.disk)
+ self.assertEqual(int(flavor['id']), new_flavor_id)
+ self.assertEqual(flavor['swap'], '')
+ self.assertEqual(int(flavor['rxtx_factor']), 1)
+ self.assertEqual(int(flavor['OS-FLV-EXT-DATA:ephemeral']), 0)
+ if self._interface == "xml":
+ XMLNS_OS_FLV_ACCESS = "http://docs.openstack.org/compute/ext/"\
+ "flavor_access/api/v2"
+ key = "{" + XMLNS_OS_FLV_ACCESS + "}is_public"
+ self.assertEqual(flavor[key], "True")
+ if self._interface == "json":
+ self.assertEqual(flavor['os-flavor-access:is_public'], True)
- #Verify flavor is retrieved
- resp, flavor = self.client.get_flavor_details(new_flavor_id)
- self.assertEqual(resp.status, 200)
- self.assertEqual(flavor['name'], flavor_name)
- #Check if flavor is present in list
- resp, flavors = self.client.list_flavors_with_detail()
- self.assertEqual(resp.status, 200)
- for flavor in flavors:
- if flavor['name'] == flavor_name:
- flag = True
- self.assertTrue(flag)
-
- finally:
- #Delete the flavor
- resp, body = self.client.delete_flavor(new_flavor_id)
- self.assertEqual(resp.status, 202)
- self.client.wait_for_resource_deletion(new_flavor_id)
+ #Verify flavor is retrieved
+ resp, flavor = self.client.get_flavor_details(new_flavor_id)
+ self.assertEqual(resp.status, 200)
+ self.assertEqual(flavor['name'], flavor_name)
+ #Check if flavor is present in list
+ resp, flavors = self.client.list_flavors_with_detail()
+ self.assertEqual(resp.status, 200)
+ for flavor in flavors:
+ if flavor['name'] == flavor_name:
+ flag = True
+ self.assertTrue(flag)
@attr(type='positive')
def test_flavor_not_public_verify_entry_not_in_list_details(self):
@@ -213,25 +200,21 @@
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- try:
- #Create the flavor
- resp, flavor = self.client.create_flavor(flavor_name,
- self.ram, self.vcpus,
- self.disk,
- new_flavor_id,
- is_public="False")
- flag = False
- #Verify flavor is retrieved
- resp, flavors = self.client.list_flavors_with_detail()
- self.assertEqual(resp.status, 200)
- for flavor in flavors:
- if flavor['name'] == flavor_name:
- flag = True
- self.assertFalse(flag)
- finally:
- #Delete the flavor
- resp, body = self.client.delete_flavor(new_flavor_id)
- self.assertEqual(resp.status, 202)
+ #Create the flavor
+ resp, flavor = self.client.create_flavor(flavor_name,
+ self.ram, self.vcpus,
+ self.disk,
+ new_flavor_id,
+ is_public="False")
+ self.addCleanup(self.flavor_clean_up, flavor['id'])
+ flag = False
+ #Verify flavor is retrieved
+ resp, flavors = self.client.list_flavors_with_detail()
+ self.assertEqual(resp.status, 200)
+ for flavor in flavors:
+ if flavor['name'] == flavor_name:
+ flag = True
+ self.assertFalse(flag)
def test_list_public_flavor_with_other_user(self):
#Create a Flavor with public access.
@@ -239,76 +222,65 @@
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
- try:
#Create the flavor
- resp, flavor = self.client.create_flavor(flavor_name,
- self.ram, self.vcpus,
- self.disk,
- new_flavor_id,
- is_public="True")
- flag = False
- self.new_client = self.flavors_client
- #Verify flavor is retrieved with new user
- resp, flavors = self.new_client.list_flavors_with_detail()
- self.assertEqual(resp.status, 200)
- for flavor in flavors:
- if flavor['name'] == flavor_name:
- flag = True
- self.assertTrue(flag)
- finally:
- #Delete the flavor
- resp, body = self.client.delete_flavor(new_flavor_id)
- self.assertEqual(resp.status, 202)
- self.client.wait_for_resource_deletion(new_flavor_id)
+ resp, flavor = self.client.create_flavor(flavor_name,
+ self.ram, self.vcpus,
+ self.disk,
+ new_flavor_id,
+ is_public="True")
+ self.addCleanup(self.flavor_clean_up, flavor['id'])
+ flag = False
+ self.new_client = self.flavors_client
+ #Verify flavor is retrieved with new user
+ resp, flavors = self.new_client.list_flavors_with_detail()
+ self.assertEqual(resp.status, 200)
+ for flavor in flavors:
+ if flavor['name'] == flavor_name:
+ flag = True
+ self.assertTrue(flag)
@attr(type='positive')
def test_is_public_string_variations(self):
- try:
- flavor_id_not_public = rand_int_id(start=1000)
- flavor_name_not_public = rand_name(self.flavor_name_prefix)
- flavor_id_public = rand_int_id(start=1000)
- flavor_name_public = rand_name(self.flavor_name_prefix)
+ flavor_id_not_public = rand_int_id(start=1000)
+ flavor_name_not_public = rand_name(self.flavor_name_prefix)
+ flavor_id_public = rand_int_id(start=1000)
+ flavor_name_public = rand_name(self.flavor_name_prefix)
- # Create a non public flavor
- resp, flavor = self.client.create_flavor(flavor_name_not_public,
- self.ram, self.vcpus,
- self.disk,
- flavor_id_not_public,
- is_public="False")
+ # Create a non public flavor
+ resp, flavor = self.client.create_flavor(flavor_name_not_public,
+ self.ram, self.vcpus,
+ self.disk,
+ flavor_id_not_public,
+ is_public="False")
+ self.addCleanup(self.flavor_clean_up, flavor['id'])
- # Create a public flavor
- resp, flavor = self.client.create_flavor(flavor_name_public,
- self.ram, self.vcpus,
- self.disk,
- flavor_id_public,
- is_public="True")
+ # Create a public flavor
+ resp, flavor = self.client.create_flavor(flavor_name_public,
+ self.ram, self.vcpus,
+ self.disk,
+ flavor_id_public,
+ is_public="True")
+ self.addCleanup(self.flavor_clean_up, flavor['id'])
- def _flavor_lookup(flavors, flavor_name):
- for flavor in flavors:
- if flavor['name'] == flavor_name:
- return flavor
- return None
+ def _flavor_lookup(flavors, flavor_name):
+ for flavor in flavors:
+ if flavor['name'] == flavor_name:
+ return flavor
+ return None
- def _test_string_variations(variations, flavor_name):
- for string in variations:
- params = {'is_public': string}
- r, flavors = self.client.list_flavors_with_detail(params)
- self.assertEqual(r.status, 200)
- flavor = _flavor_lookup(flavors, flavor_name)
- self.assertNotEqual(flavor, None)
+ def _test_string_variations(variations, flavor_name):
+ for string in variations:
+ params = {'is_public': string}
+ r, flavors = self.client.list_flavors_with_detail(params)
+ self.assertEqual(r.status, 200)
+ flavor = _flavor_lookup(flavors, flavor_name)
+ self.assertNotEqual(flavor, None)
- _test_string_variations(['f', 'false', 'no', '0'],
- flavor_name_not_public)
+ _test_string_variations(['f', 'false', 'no', '0'],
+ flavor_name_not_public)
- _test_string_variations(['t', 'true', 'yes', '1'],
- flavor_name_public)
-
- finally:
- # Delete flavors
- for flavor_id in [flavor_id_not_public, flavor_id_public]:
- resp, body = self.client.delete_flavor(flavor_id)
- self.assertEqual(resp.status, 202)
- self.client.wait_for_resource_deletion(flavor_id)
+ _test_string_variations(['t', 'true', 'yes', '1'],
+ flavor_name_public)
@attr(type='negative')
def test_invalid_is_public_string(self):
diff --git a/tempest/tests/compute/admin/test_quotas.py b/tempest/tests/compute/admin/test_quotas.py
index 5a9b6f9..8f520f9 100644
--- a/tempest/tests/compute/admin/test_quotas.py
+++ b/tempest/tests/compute/admin/test_quotas.py
@@ -63,12 +63,10 @@
# Admin can get the default resource quota set for a tenant
expected_quota_set = self.default_quota_set.copy()
expected_quota_set['id'] = self.demo_tenant_id
- try:
- resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
- self.assertEqual(200, resp.status)
- self.assertEqual(expected_quota_set, quota_set)
- except Exception:
- self.fail("Admin could not get the default quota set for a tenant")
+ resp, quota_set = self.client.get_default_quota_set(
+ self.demo_tenant_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(expected_quota_set, quota_set)
def test_update_all_quota_resources_for_tenant(self):
# Admin can update all the resource quota limits for a tenant
diff --git a/tempest/tests/compute/base.py b/tempest/tests/compute/base.py
index 87aa889..7716922 100644
--- a/tempest/tests/compute/base.py
+++ b/tempest/tests/compute/base.py
@@ -60,6 +60,7 @@
cls.volumes_extensions_client = os.volumes_extensions_client
cls.volumes_client = os.volumes_client
cls.interfaces_client = os.interfaces_client
+ cls.fixed_ips_client = os.fixed_ips_client
cls.build_interval = cls.config.compute.build_interval
cls.build_timeout = cls.config.compute.build_timeout
cls.ssh_user = cls.config.compute.ssh_user
diff --git a/tempest/tests/compute/security_groups/test_security_group_rules.py b/tempest/tests/compute/security_groups/test_security_group_rules.py
index 99d9a5d..c2032d4 100644
--- a/tempest/tests/compute/security_groups/test_security_group_rules.py
+++ b/tempest/tests/compute/security_groups/test_security_group_rules.py
@@ -33,30 +33,24 @@
def test_security_group_rules_create(self):
# Positive test: Creation of Security Group rule
# should be successfull
- try:
- #Creating a Security Group to add rules to it
- s_name = rand_name('securitygroup-')
- s_description = rand_name('description-')
- resp, securitygroup = \
- self.client.create_security_group(s_name, s_description)
- securitygroup_id = securitygroup['id']
- #Adding rules to the created Security Group
- parent_group_id = securitygroup['id']
- ip_protocol = 'tcp'
- from_port = 22
- to_port = 22
- resp, rule = \
- self.client.create_security_group_rule(parent_group_id,
- ip_protocol,
- from_port,
- to_port)
- self.assertEqual(200, resp.status)
- finally:
- #Deleting the Security Group rule, created in this method
- group_rule_id = rule['id']
- self.client.delete_security_group_rule(group_rule_id)
- #Deleting the Security Group created in this method
- resp, _ = self.client.delete_security_group(securitygroup_id)
+ #Creating a Security Group to add rules to it
+ s_name = rand_name('securitygroup-')
+ s_description = rand_name('description-')
+ resp, securitygroup = \
+ self.client.create_security_group(s_name, s_description)
+ securitygroup_id = securitygroup['id']
+ self.addCleanup(self.client.delete_security_group, securitygroup_id)
+ #Adding rules to the created Security Group
+ ip_protocol = 'tcp'
+ from_port = 22
+ to_port = 22
+ resp, rule = \
+ self.client.create_security_group_rule(securitygroup_id,
+ ip_protocol,
+ from_port,
+ to_port)
+ self.addCleanup(self.client.delete_security_group_rule, rule['id'])
+ self.assertEqual(200, resp.status)
@attr(type='positive')
def test_security_group_rules_create_with_optional_arguments(self):
@@ -64,75 +58,38 @@
# with optional arguments
# should be successfull
- rule_id = None
secgroup1 = None
secgroup2 = None
- try:
- #Creating a Security Group to add rules to it
- s_name = rand_name('securitygroup-')
- s_description = rand_name('description-')
- resp, securitygroup = \
- self.client.create_security_group(s_name, s_description)
- secgroup1 = securitygroup['id']
- #Creating a Security Group so as to assign group_id to the rule
- s_name2 = rand_name('securitygroup-')
- s_description2 = rand_name('description-')
- resp, securitygroup = \
- self.client.create_security_group(s_name2, s_description2)
- secgroup2 = securitygroup['id']
- #Adding rules to the created Security Group with optional arguments
- parent_group_id = secgroup1
- ip_protocol = 'tcp'
- from_port = 22
- to_port = 22
- cidr = '10.2.3.124/24'
- group_id = secgroup2
- resp, rule = \
- self.client.create_security_group_rule(parent_group_id,
- ip_protocol,
- from_port,
- to_port,
- cidr=cidr,
- group_id=group_id)
- rule_id = rule['id']
- self.assertEqual(200, resp.status)
- finally:
- #Deleting the Security Group rule, created in this method
- if rule_id:
- self.client.delete_security_group_rule(rule_id)
- #Deleting the Security Groups created in this method
- if secgroup1:
- self.client.delete_security_group(secgroup1)
- if secgroup2:
- self.client.delete_security_group(secgroup2)
-
- @attr(type='positive')
- def test_security_group_rules_create_delete(self):
- # Positive test: Deletion of Security Group rule
- # should be successfull
- try:
- #Creating a Security Group to add rule to it
- s_name = rand_name('securitygroup-')
- s_description = rand_name('description-')
- resp, securitygroup = \
- self.client.create_security_group(s_name, s_description)
- securitygroup_id = securitygroup['id']
- #Adding rules to the created Security Group
- parent_group_id = securitygroup['id']
- ip_protocol = 'tcp'
- from_port = 22
- to_port = 22
- resp, rule = \
- self.client.create_security_group_rule(parent_group_id,
- ip_protocol,
- from_port,
- to_port)
- finally:
- #Deleting the Security Group rule, created in this method
- group_rule_id = rule['id']
- self.client.delete_security_group_rule(group_rule_id)
- #Deleting the Security Group created in this method
- resp, _ = self.client.delete_security_group(securitygroup_id)
+ #Creating a Security Group to add rules to it
+ s_name = rand_name('securitygroup-')
+ s_description = rand_name('description-')
+ resp, securitygroup = \
+ self.client.create_security_group(s_name, s_description)
+ secgroup1 = securitygroup['id']
+ self.addCleanup(self.client.delete_security_group, secgroup1)
+ #Creating a Security Group so as to assign group_id to the rule
+ s_name2 = rand_name('securitygroup-')
+ s_description2 = rand_name('description-')
+ resp, securitygroup = \
+ self.client.create_security_group(s_name2, s_description2)
+ secgroup2 = securitygroup['id']
+ self.addCleanup(self.client.delete_security_group, secgroup2)
+ #Adding rules to the created Security Group with optional arguments
+ parent_group_id = secgroup1
+ ip_protocol = 'tcp'
+ from_port = 22
+ to_port = 22
+ cidr = '10.2.3.124/24'
+ group_id = secgroup2
+ resp, rule = \
+ self.client.create_security_group_rule(parent_group_id,
+ ip_protocol,
+ from_port,
+ to_port,
+ cidr=cidr,
+ group_id=group_id)
+ self.addCleanup(self.client.delete_security_group_rule, rule['id'])
+ self.assertEqual(200, resp.status)
@attr(type='negative')
def test_security_group_rules_create_with_invalid_id(self):
diff --git a/tempest/tests/compute/security_groups/test_security_groups.py b/tempest/tests/compute/security_groups/test_security_groups.py
index 70a01a0..d0afde4 100644
--- a/tempest/tests/compute/security_groups/test_security_groups.py
+++ b/tempest/tests/compute/security_groups/test_security_groups.py
@@ -29,79 +29,80 @@
super(SecurityGroupsTestJSON, cls).setUpClass()
cls.client = cls.security_groups_client
+ def _delete_security_group(self, securitygroup_id):
+ resp, _ = self.client.delete_security_group(securitygroup_id)
+ self.assertEqual(202, resp.status)
+
@attr(type='positive')
def test_security_groups_create_list_delete(self):
# Positive test:Should return the list of Security Groups
- try:
- #Create 3 Security Groups
- security_group_list = list()
- for i in range(3):
- s_name = rand_name('securitygroup-')
- s_description = rand_name('description-')
- resp, securitygroup = \
- self.client.create_security_group(s_name, s_description)
- self.assertEqual(200, resp.status)
- security_group_list.append(securitygroup)
- #Fetch all Security Groups and verify the list
- #has all created Security Groups
- resp, fetched_list = self.client.list_security_groups()
- self.assertEqual(200, resp.status)
- #Now check if all the created Security Groups are in fetched list
- missing_sgs = \
- [sg for sg in security_group_list if sg not in fetched_list]
- self.assertFalse(missing_sgs,
- "Failed to find Security Group %s in fetched "
- "list" % ', '.join(m_group['name']
- for m_group in missing_sgs))
- finally:
- #Delete all the Security Groups created in this method
- for securitygroup in security_group_list:
- resp, _ = \
- self.client.delete_security_group(securitygroup['id'])
- self.assertEqual(202, resp.status)
-
- @attr(type='positive')
- def test_security_group_create_delete(self):
- # Security Group should be created, verified and deleted
- try:
+ #Create 3 Security Groups
+ security_group_list = list()
+ for i in range(3):
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
resp, securitygroup = \
self.client.create_security_group(s_name, s_description)
self.assertEqual(200, resp.status)
- self.assertTrue('id' in securitygroup)
- securitygroup_id = securitygroup['id']
- self.assertFalse(securitygroup_id is None)
- self.assertTrue('name' in securitygroup)
- securitygroup_name = securitygroup['name']
- self.assertEqual(securitygroup_name, s_name,
- "The created Security Group name is "
- "not equal to the requested name")
- finally:
- #Delete Security Group created in this method
- resp, _ = self.client.delete_security_group(securitygroup['id'])
- self.assertEqual(202, resp.status)
+ self.addCleanup(self._delete_security_group,
+ securitygroup['id'])
+ security_group_list.append(securitygroup)
+ #Fetch all Security Groups and verify the list
+ #has all created Security Groups
+ resp, fetched_list = self.client.list_security_groups()
+ self.assertEqual(200, resp.status)
+ #Now check if all the created Security Groups are in fetched list
+ missing_sgs = \
+ [sg for sg in security_group_list if sg not in fetched_list]
+ self.assertFalse(missing_sgs,
+ "Failed to find Security Group %s in fetched "
+ "list" % ', '.join(m_group['name']
+ for m_group in missing_sgs))
+
+ #TODO(afazekas): scheduled for delete,
+ #test_security_group_create_get_delete covers it
+ @attr(type='positive')
+ def test_security_group_create_delete(self):
+ # Security Group should be created, verified and deleted
+ s_name = rand_name('securitygroup-')
+ s_description = rand_name('description-')
+ resp, securitygroup = \
+ self.client.create_security_group(s_name, s_description)
+ self.assertTrue('id' in securitygroup)
+ securitygroup_id = securitygroup['id']
+ self.addCleanup(self._delete_security_group,
+ securitygroup_id)
+ self.assertEqual(200, resp.status)
+ self.assertFalse(securitygroup_id is None)
+ self.assertTrue('name' in securitygroup)
+ securitygroup_name = securitygroup['name']
+ self.assertEqual(securitygroup_name, s_name,
+ "The created Security Group name is "
+ "not equal to the requested name")
@attr(type='positive')
def test_security_group_create_get_delete(self):
# Security Group should be created, fetched and deleted
- try:
- s_name = rand_name('securitygroup-')
- s_description = rand_name('description-')
- resp, securitygroup = \
- self.client.create_security_group(s_name, s_description)
- self.assertEqual(200, resp.status)
- #Now fetch the created Security Group by its 'id'
- resp, fetched_group = \
- self.client.get_security_group(securitygroup['id'])
- self.assertEqual(200, resp.status)
- self.assertEqual(securitygroup, fetched_group,
- "The fetched Security Group is different "
- "from the created Group")
- finally:
- #Delete the Security Group created in this method
- resp, _ = self.client.delete_security_group(securitygroup['id'])
- self.assertEqual(202, resp.status)
+ s_name = rand_name('securitygroup-')
+ s_description = rand_name('description-')
+ resp, securitygroup = \
+ self.client.create_security_group(s_name, s_description)
+ self.addCleanup(self._delete_security_group,
+ securitygroup['id'])
+
+ self.assertEqual(200, resp.status)
+ self.assertTrue('name' in securitygroup)
+ securitygroup_name = securitygroup['name']
+ self.assertEqual(securitygroup_name, s_name,
+ "The created Security Group name is "
+ "not equal to the requested name")
+ #Now fetch the created Security Group by its 'id'
+ resp, fetched_group = \
+ self.client.get_security_group(securitygroup['id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(securitygroup, fetched_group,
+ "The fetched Security Group is different "
+ "from the created Group")
@attr(type='negative')
def test_security_group_get_nonexistant_group(self):
diff --git a/tempest/tests/compute/servers/test_multiple_create.py b/tempest/tests/compute/servers/test_multiple_create.py
new file mode 100644
index 0000000..ad5d604
--- /dev/null
+++ b/tempest/tests/compute/servers/test_multiple_create.py
@@ -0,0 +1,117 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common.utils.data_utils import rand_name
+from tempest import exceptions
+from tempest.test import attr
+from tempest.tests.compute import base
+
+
+class MultipleCreateTestJSON(base.BaseComputeTest):
+ _interface = 'json'
+ _name = 'multiple-create-test'
+
+ def _get_created_servers(self, name):
+ """Get servers created which name match with name param."""
+ resp, body = self.servers_client.list_servers()
+ servers = body['servers']
+ servers_created = []
+ for server in servers:
+ if server['name'].startswith(name):
+ servers_created.append(server)
+ return servers_created
+
+ def _generate_name(self):
+ return rand_name(self._name)
+
+ def _create_multiple_servers(self, name=None, wait_until=None, **kwargs):
+ """
+ This is the right way to create_multiple servers and manage to get the
+ created servers into the servers list to be cleaned up after all.
+ """
+ kwargs['name'] = kwargs.get('name', self._generate_name())
+ resp, body = self.create_server(**kwargs)
+ created_servers = self._get_created_servers(kwargs['name'])
+ # NOTE(maurosr): append it to cls.servers list from base.BaseCompute
+ # class.
+ self.servers.append(created_servers)
+ # NOTE(maurosr): get a server list, check status of the ones with names
+ # that match and wait for them become active. At a first look, since
+ # they are building in parallel, wait inside the for doesn't seem be
+ # harmful to the performance
+ if wait_until is not None:
+ for server in created_servers:
+ self.servers_client.wait_for_server_status(server['id'],
+ wait_until)
+
+ return resp, body
+
+ @attr(type='positive')
+ def test_multiple_create(self):
+ resp, body = self._create_multiple_servers(wait_until='ACTIVE',
+ min_count=1,
+ max_count=2)
+ # NOTE(maurosr): do status response check and also make sure that
+ # reservation_id is not in the response body when the request send
+ # contains return_reservation_id=False
+ self.assertEqual('202', resp['status'])
+ self.assertFalse('reservation_id' in body)
+
+ @attr(type='negative')
+ def test_min_count_less_than_one(self):
+ invalid_min_count = 0
+ self.assertRaises(exceptions.BadRequest, self._create_multiple_servers,
+ min_count=invalid_min_count)
+
+ @attr(type='negative')
+ def test_min_count_non_integer(self):
+ invalid_min_count = 2.5
+ self.assertRaises(exceptions.BadRequest, self._create_multiple_servers,
+ min_count=invalid_min_count)
+
+ @attr(type='negative')
+ def test_max_count_less_than_one(self):
+ invalid_max_count = 0
+ self.assertRaises(exceptions.BadRequest, self._create_multiple_servers,
+ max_count=invalid_max_count)
+
+ @attr(type='negative')
+ def test_max_count_non_integer(self):
+ invalid_max_count = 2.5
+ self.assertRaises(exceptions.BadRequest, self._create_multiple_servers,
+ max_count=invalid_max_count)
+
+ @attr(type='negative')
+ def test_max_count_less_than_min_count(self):
+ min_count = 3
+ max_count = 2
+ self.assertRaises(exceptions.BadRequest, self._create_multiple_servers,
+ min_count=min_count,
+ max_count=max_count)
+
+ @attr(type='positive')
+ def test_multiple_create_with_reservation_return(self):
+ resp, body = self._create_multiple_servers(wait_until='ACTIVE',
+ min_count=1,
+ max_count=2,
+ return_reservation_id=True)
+ self.assertTrue(resp['status'], 202)
+ self.assertIn('reservation_id', body)
+
+
+class MultipleCreateTestXML(MultipleCreateTestJSON):
+ _interface = 'xml'
diff --git a/tempest/tests/compute/servers/test_server_advanced_ops.py b/tempest/tests/compute/servers/test_server_advanced_ops.py
index f949f2e..ac0d7be 100644
--- a/tempest/tests/compute/servers/test_server_advanced_ops.py
+++ b/tempest/tests/compute/servers/test_server_advanced_ops.py
@@ -24,7 +24,7 @@
LOG = logging.getLogger(__name__)
-class TestServerAdvancedOps(test.DefaultClientTest):
+class TestServerAdvancedOps(test.DefaultClientSmokeTest):
"""
This test case stresses some advanced server instance operations:
@@ -66,16 +66,16 @@
self.assertEqual(self.instance.status, 'BUILD')
instance_id = self.get_resource('instance').id
- self.status_timeout(self.compute_client.servers, instance_id, 'ACTIVE')
+ test.status_timeout(self.compute_client.servers, instance_id, 'ACTIVE')
instance = self.get_resource('instance')
instance_id = instance.id
resize_flavor = self.config.compute.flavor_ref_alt
LOG.debug("Resizing instance %s from flavor %s to flavor %s",
instance.id, instance.flavor, resize_flavor)
instance.resize(resize_flavor)
- self.status_timeout(self.compute_client.servers, instance_id,
+ test.status_timeout(self.compute_client.servers, instance_id,
'VERIFY_RESIZE')
LOG.debug("Confirming resize of instance %s", instance_id)
instance.confirm_resize()
- self.status_timeout(self.compute_client.servers, instance_id, 'ACTIVE')
+ test.status_timeout(self.compute_client.servers, instance_id, 'ACTIVE')
diff --git a/tempest/tests/compute/servers/test_server_basic_ops.py b/tempest/tests/compute/servers/test_server_basic_ops.py
index 2183193..c7fad7a 100644
--- a/tempest/tests/compute/servers/test_server_basic_ops.py
+++ b/tempest/tests/compute/servers/test_server_basic_ops.py
@@ -18,12 +18,12 @@
import logging
from tempest.common.utils.data_utils import rand_name
-from tempest import smoke
+from tempest import test
LOG = logging.getLogger(__name__)
-class TestServerBasicOps(smoke.DefaultClientSmokeTest):
+class TestServerBasicOps(test.DefaultClientSmokeTest):
"""
This smoke test case follows this basic set of operations:
@@ -101,7 +101,7 @@
def wait_on_active(self):
instance_id = self.get_resource('instance').id
- self.status_timeout(self.compute_client.servers, instance_id, 'ACTIVE')
+ test.status_timeout(self.compute_client.servers, instance_id, 'ACTIVE')
def pause_server(self):
instance = self.get_resource('instance')
@@ -109,7 +109,7 @@
LOG.debug("Pausing instance %s. Current status: %s",
instance_id, instance.status)
instance.pause()
- self.status_timeout(self.compute_client.servers, instance_id, 'PAUSED')
+ test.status_timeout(self.compute_client.servers, instance_id, 'PAUSED')
def unpause_server(self):
instance = self.get_resource('instance')
@@ -117,7 +117,7 @@
LOG.debug("Unpausing instance %s. Current status: %s",
instance_id, instance.status)
instance.unpause()
- self.status_timeout(self.compute_client.servers, instance_id, 'ACTIVE')
+ test.status_timeout(self.compute_client.servers, instance_id, 'ACTIVE')
def suspend_server(self):
instance = self.get_resource('instance')
@@ -125,7 +125,7 @@
LOG.debug("Suspending instance %s. Current status: %s",
instance_id, instance.status)
instance.suspend()
- self.status_timeout(self.compute_client.servers,
+ test.status_timeout(self.compute_client.servers,
instance_id, 'SUSPENDED')
def resume_server(self):
@@ -134,7 +134,7 @@
LOG.debug("Resuming instance %s. Current status: %s",
instance_id, instance.status)
instance.resume()
- self.status_timeout(self.compute_client.servers, instance_id, 'ACTIVE')
+ test.status_timeout(self.compute_client.servers, instance_id, 'ACTIVE')
def terminate_instance(self):
instance = self.get_resource('instance')
diff --git a/tempest/tests/compute/servers/test_server_rescue.py b/tempest/tests/compute/servers/test_server_rescue.py
index 29c9944..91010ce 100644
--- a/tempest/tests/compute/servers/test_server_rescue.py
+++ b/tempest/tests/compute/servers/test_server_rescue.py
@@ -52,16 +52,16 @@
cls.volumes_extensions_client.create_volume(1,
display_name=
'test_attach')
- cls.volumes_extensions_client.wait_for_volume_status
- (cls.volume_to_attach['id'], 'available')
+ cls.volumes_extensions_client.wait_for_volume_status(
+ cls.volume_to_attach['id'], 'available')
# Create a volume and wait for it to become ready for attach
resp, cls.volume_to_detach = \
cls.volumes_extensions_client.create_volume(1,
display_name=
'test_detach')
- cls.volumes_extensions_client.wait_for_volume_status
- (cls.volume_to_detach['id'], 'available')
+ cls.volumes_extensions_client.wait_for_volume_status(
+ cls.volume_to_detach['id'], 'available')
# Server for positive tests
resp, server = cls.create_server(image_id=cls.image_ref,
@@ -154,12 +154,15 @@
self.servers_client.attach_volume(self.server_id,
self.volume_to_detach['id'],
device='/dev/%s' % self.device)
- self.volumes_extensions_client.wait_for_volume_status
- (self.volume_to_detach['id'], 'in-use')
+ self.volumes_extensions_client.wait_for_volume_status(
+ self.volume_to_detach['id'], 'in-use')
# Rescue the server
self.servers_client.rescue_server(self.server_id, self.password)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
+ #addCleanup is a LIFO queue
+ self.addCleanup(self._detach, self.server_id,
+ self.volume_to_detach['id'])
self.addCleanup(self._unrescue, self.server_id)
# Detach the volume from the server expecting failure
@@ -174,6 +177,7 @@
self.servers_client.rescue_server(
self.server_id, self.password)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
+ self.addCleanup(self._unrescue, self.server_id)
#Association of floating IP to a rescued vm
client = self.floating_ips_client
@@ -188,11 +192,6 @@
self.server_id)
self.assertEqual(202, resp.status)
- # Unrescue the server
- resp, body = self.servers_client.unrescue_server(self.server_id)
- self.assertEqual(202, resp.status)
- self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
-
@attr(type='positive')
@testtools.skip("Skipped until Bug #1126257 is resolved")
def test_rescued_vm_add_remove_security_group(self):
diff --git a/tempest/tests/compute/test_quotas.py b/tempest/tests/compute/test_quotas.py
index a84d041..92e5a70 100644
--- a/tempest/tests/compute/test_quotas.py
+++ b/tempest/tests/compute/test_quotas.py
@@ -30,23 +30,31 @@
resp, tenants = cls.admin_client.list_tenants()
cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
cls.client.tenant_name][0]
+ cls.default_quota_set = {'injected_file_content_bytes': 10240,
+ 'metadata_items': 128, 'injected_files': 5,
+ 'ram': 51200, 'floating_ips': 10,
+ 'fixed_ips': -1, 'key_pairs': 100,
+ 'injected_file_path_bytes': 255,
+ 'instances': 10, 'security_group_rules': 20,
+ 'cores': 20, 'security_groups': 10}
+
+ @attr(type='smoke')
+ def test_get_quotas(self):
+ # User can get the quota set for it's tenant
+ expected_quota_set = self.default_quota_set.copy()
+ expected_quota_set['id'] = self.tenant_id
+ resp, quota_set = self.client.get_quota_set(self.tenant_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(expected_quota_set, quota_set)
@attr(type='smoke')
def test_get_default_quotas(self):
# User can get the default quota set for it's tenant
- expected_quota_set = {'injected_file_content_bytes': 10240,
- 'metadata_items': 128, 'injected_files': 5,
- 'ram': 51200, 'floating_ips': 10,
- 'fixed_ips': -1, 'key_pairs': 100,
- 'injected_file_path_bytes': 255, 'instances': 10,
- 'security_group_rules': 20, 'cores': 20,
- 'id': self.tenant_id, 'security_groups': 10}
- try:
- resp, quota_set = self.client.get_quota_set(self.tenant_id)
- self.assertEqual(200, resp.status)
- self.assertEqual(expected_quota_set, quota_set)
- except Exception:
- self.fail("Quota set for tenant did not have default limits")
+ expected_quota_set = self.default_quota_set.copy()
+ expected_quota_set['id'] = self.tenant_id
+ resp, quota_set = self.client.get_default_quota_set(self.tenant_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(expected_quota_set, quota_set)
class QuotasTestXML(QuotasTestJSON):
diff --git a/tempest/tests/identity/admin/v3/__init__.py b/tempest/tests/identity/admin/v3/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/tests/identity/admin/v3/__init__.py
diff --git a/tempest/tests/identity/admin/v3/test_endpoints.py b/tempest/tests/identity/admin/v3/test_endpoints.py
new file mode 100755
index 0000000..98fab57
--- /dev/null
+++ b/tempest/tests/identity/admin/v3/test_endpoints.py
@@ -0,0 +1,149 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+from tempest.tests.identity import base
+
+
+class EndPointsTestJSON(base.BaseIdentityAdminTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(EndPointsTestJSON, cls).setUpClass()
+ cls.identity_client = cls.client
+ cls.client = cls.endpoints_client
+ cls.service_ids = list()
+ s_name = rand_name('service-')
+ s_type = rand_name('type--')
+ s_description = rand_name('description-')
+ resp, cls.service_data =\
+ cls.identity_client.create_service(s_name, s_type,
+ description=s_description)
+ cls.service_id = cls.service_data['id']
+ cls.service_ids.append(cls.service_id)
+ #Create endpoints so as to use for LIST and GET test cases
+ cls.setup_endpoints = list()
+ for i in range(2):
+ region = rand_name('region')
+ url = rand_name('url')
+ interface = 'public'
+ resp, endpoint = cls.client.create_endpoint(
+ cls.service_id, interface, url, region=region, enabled=True)
+ cls.setup_endpoints.append(endpoint)
+
+ @classmethod
+ def tearDownClass(cls):
+ for e in cls.setup_endpoints:
+ cls.client.delete_endpoint(e['id'])
+ for s in cls.service_ids:
+ cls.identity_client.delete_service(s)
+
+ @attr('positive')
+ def test_list_endpoints(self):
+ # Get a list of endpoints
+ resp, fetched_endpoints = self.client.list_endpoints()
+ #Asserting LIST Endpoint
+ self.assertEqual(resp['status'], '200')
+ missing_endpoints =\
+ [e for e in self.setup_endpoints if e not in fetched_endpoints]
+ self.assertEqual(0, len(missing_endpoints),
+ "Failed to find endpoint %s in fetched list" %
+ ', '.join(str(e) for e in missing_endpoints))
+
+ @attr('positive')
+ def test_create_delete_endpoint(self):
+ region = rand_name('region')
+ url = rand_name('url')
+ interface = 'public'
+ create_flag = False
+ matched = False
+ try:
+ resp, endpoint =\
+ self.client.create_endpoint(self.service_id, interface, url,
+ region=region, enabled=True)
+ create_flag = True
+ #Asserting Create Endpoint response body
+ self.assertEqual(resp['status'], '201')
+ self.assertEqual(region, endpoint['region'])
+ self.assertEqual(url, endpoint['url'])
+ #Checking if created endpoint is present in the list of endpoints
+ resp, fetched_endpoints = self.client.list_endpoints()
+ for e in fetched_endpoints:
+ if endpoint['id'] == e['id']:
+ matched = True
+ if not matched:
+ self.fail("Created endpoint does not appear in the list"
+ " of endpoints")
+ finally:
+ if create_flag:
+ matched = False
+ #Deleting the endpoint created in this method
+ resp_header, resp_body =\
+ self.client.delete_endpoint(endpoint['id'])
+ self.assertEqual(resp_header['status'], '204')
+ self.assertEqual(resp_body, '')
+ #Checking whether endpoint is deleted successfully
+ resp, fetched_endpoints = self.client.list_endpoints()
+ for e in fetched_endpoints:
+ if endpoint['id'] == e['id']:
+ matched = True
+ if matched:
+ self.fail("Delete endpoint is not successful")
+
+ @attr('smoke')
+ def test_update_endpoint(self):
+ #Creating an endpoint so as to check update endpoint
+ #with new values
+ region1 = rand_name('region')
+ url1 = rand_name('url')
+ interface1 = 'public'
+ resp, endpoint_for_update =\
+ self.client.create_endpoint(self.service_id, interface1,
+ url1, region=region1,
+ enabled=True)
+ #Creating service so as update endpoint with new service ID
+ s_name = rand_name('service-')
+ s_type = rand_name('type--')
+ s_description = rand_name('description-')
+ resp, self.service2 =\
+ self.identity_client.create_service(s_name, s_type,
+ description=s_description)
+ self.service_ids.append(self.service2['id'])
+ #Updating endpoint with new values
+ service_id = self.service2['id']
+ region2 = rand_name('region')
+ url2 = rand_name('url')
+ interface2 = 'internal'
+ resp, endpoint = \
+ self.client.update_endpoint(endpoint_for_update['id'],
+ service_id=self.service2['id'],
+ interface=interface2, url=url2,
+ region=region2, enabled=False)
+ self.assertEqual(resp['status'], '200')
+ #Asserting if the attributes of endpoint are updated
+ self.assertEqual(self.service2['id'], endpoint['service_id'])
+ self.assertEqual(interface2, endpoint['interface'])
+ self.assertEqual(url2, endpoint['url'])
+ self.assertEqual(region2, endpoint['region'])
+ self.assertEqual('False', str(endpoint['enabled']))
+ self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
+
+
+class EndPointsTestXML(EndPointsTestJSON):
+ _interface = 'xml'
diff --git a/tempest/tests/identity/base.py b/tempest/tests/identity/base.py
index 168b2ff..64b8993 100644
--- a/tempest/tests/identity/base.py
+++ b/tempest/tests/identity/base.py
@@ -28,6 +28,7 @@
os = clients.AdminManager(interface=cls._interface)
cls.client = os.identity_client
cls.token_client = os.token_client
+ cls.endpoints_client = os.endpoints_client
if not cls.client.has_admin_extensions():
raise cls.skipException("Admin extensions disabled")
diff --git a/tempest/tests/network/common.py b/tempest/tests/network/common.py
index 0bb806f..1cff2c4 100644
--- a/tempest/tests/network/common.py
+++ b/tempest/tests/network/common.py
@@ -21,7 +21,6 @@
from quantumclient.common import exceptions as exc
from tempest.common.utils.data_utils import rand_name
-from tempest import smoke
from tempest import test
@@ -103,7 +102,7 @@
self.client.delete_port(self.id)
-class TestNetworkSmokeCommon(smoke.DefaultClientSmokeTest):
+class TestNetworkSmokeCommon(test.DefaultClientSmokeTest):
"""
Base class for network smoke tests
"""
@@ -274,7 +273,7 @@
self.set_resource(name, server)
except AttributeError:
self.fail("Server not successfully created.")
- self.status_timeout(client.servers, server.id, 'ACTIVE')
+ test.status_timeout(client.servers, server.id, 'ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
diff --git a/tempest/tests/network/test_network_basic_ops.py b/tempest/tests/network/test_network_basic_ops.py
index a38a5c0..92ca65f 100644
--- a/tempest/tests/network/test_network_basic_ops.py
+++ b/tempest/tests/network/test_network_basic_ops.py
@@ -17,11 +17,11 @@
# under the License.
from tempest.common.utils.data_utils import rand_name
-from tempest.tests.network.common import DeletableRouter
-from tempest.tests.network.common import TestNetworkSmokeCommon
+from tempest.test import attr
+import tempest.tests.network.common as net_common
-class TestNetworkBasicOps(TestNetworkSmokeCommon):
+class TestNetworkBasicOps(net_common.TestNetworkSmokeCommon):
"""
This smoke test suite assumes that Nova has been configured to
@@ -124,7 +124,7 @@
network_id = self.config.network.public_network_id
if router_id:
result = self.network_client.show_router(router_id)
- return AttributeDict(**result['router'])
+ return net_common.AttributeDict(**result['router'])
elif network_id:
router = self._create_router(tenant_id)
router.add_gateway(network_id)
@@ -143,20 +143,23 @@
),
)
result = self.network_client.create_router(body=body)
- router = DeletableRouter(client=self.network_client,
- **result['router'])
+ router = net_common.DeletableRouter(client=self.network_client,
+ **result['router'])
self.assertEqual(router.name, name)
self.set_resource(name, router)
return router
+ @attr(type='smoke')
def test_001_create_keypairs(self):
self.keypairs[self.tenant_id] = self._create_keypair(
self.compute_client)
+ @attr(type='smoke')
def test_002_create_security_groups(self):
self.security_groups[self.tenant_id] = self._create_security_group(
self.compute_client)
+ @attr(type='smoke')
def test_003_create_networks(self):
network = self._create_network(self.tenant_id)
router = self._get_router(self.tenant_id)
@@ -166,6 +169,7 @@
self.subnets.append(subnet)
self.routers.append(router)
+ @attr(type='smoke')
def test_004_check_networks(self):
#Checks that we see the newly created network/subnet/router via
#checking the result of list_[networks,routers,subnets]
@@ -189,6 +193,7 @@
self.assertIn(myrouter.name, seen_router_names)
self.assertIn(myrouter.id, seen_router_ids)
+ @attr(type='smoke')
def test_005_create_servers(self):
if not (self.keypairs or self.security_groups or self.networks):
raise self.skipTest('Necessary resources have not been defined')
@@ -201,6 +206,7 @@
name, keypair_name, security_groups)
self.servers.append(server)
+ @attr(type='smoke')
def test_006_check_tenant_network_connectivity(self):
if not self.config.network.tenant_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
@@ -214,6 +220,7 @@
"Timed out waiting for %s's ip to become "
"reachable" % server.name)
+ @attr(type='smoke')
def test_007_assign_floating_ips(self):
public_network_id = self.config.network.public_network_id
if not public_network_id:
@@ -225,6 +232,7 @@
self.floating_ips.setdefault(server, [])
self.floating_ips[server].append(floating_ip)
+ @attr(type='smoke')
def test_008_check_public_network_connectivity(self):
if not self.floating_ips:
raise self.skipTest('No floating ips have been allocated.')
diff --git a/tempest/tests/volume/test_volumes_get.py b/tempest/tests/volume/test_volumes_get.py
index a246afe..8e80e18 100644
--- a/tempest/tests/volume/test_volumes_get.py
+++ b/tempest/tests/volume/test_volumes_get.py
@@ -29,17 +29,22 @@
super(VolumesGetTest, cls).setUpClass()
cls.client = cls.volumes_client
- @attr(type='smoke')
- def test_volume_create_get_delete(self):
+ def _volume_create_get_delete(self, image_ref=None):
# Create a volume, Get it's details and Delete the volume
try:
volume = {}
v_name = rand_name('Volume-')
metadata = {'Type': 'work'}
#Create a volume
- resp, volume = self.client.create_volume(size=1,
- display_name=v_name,
- metadata=metadata)
+ if not image_ref:
+ resp, volume = self.client.create_volume(size=1,
+ display_name=v_name,
+ metadata=metadata)
+ else:
+ resp, volume = self.client.create_volume(size=1,
+ display_name=v_name,
+ metadata=metadata,
+ imageRef=image_ref)
self.assertEqual(200, resp.status)
self.assertTrue('id' in volume)
self.assertTrue('display_name' in volume)
@@ -100,6 +105,14 @@
self.assertEqual(202, resp.status)
self.client.wait_for_resource_deletion(volume['id'])
+ @attr(type='smoke')
+ def test_volume_create_get_delete(self):
+ self._volume_create_get_delete(image_ref=None)
+
+ @attr(type='smoke')
+ def test_volume_from_image(self):
+ self._volume_create_get_delete(image_ref=self.config.compute.image_ref)
+
class VolumesGetTestXML(VolumesGetTest):
_interface = "xml"
diff --git a/tools/find_stack_traces.py b/tools/find_stack_traces.py
index e6c1990..3129484 100755
--- a/tools/find_stack_traces.py
+++ b/tools/find_stack_traces.py
@@ -22,6 +22,46 @@
import sys
import urllib2
+import pprint
+pp = pprint.PrettyPrinter()
+
+NOVA_TIMESTAMP = r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d"
+
+NOVA_REGEX = r"(?P<timestamp>%s) (?P<pid>\d+ )?(?P<level>(ERROR|TRACE)) " \
+ "(?P<module>[\w\.]+) (?P<msg>.*)" % (NOVA_TIMESTAMP)
+
+
+class StackTrace(object):
+ timestamp = None
+ pid = None
+ level = ""
+ module = ""
+ msg = ""
+
+ def __init__(self, timestamp=None, pid=None, level="", module="",
+ msg=""):
+ self.timestamp = timestamp
+ self.pid = pid
+ self.level = level
+ self.module = module
+ self.msg = msg
+
+ def append(self, msg):
+ self.msg = self.msg + msg
+
+ def is_same(self, data):
+ return (data['timestamp'] == self.timestamp and
+ data['level'] == self.level)
+
+ def not_none(self):
+ return self.timestamp is not None
+
+ def __str__(self):
+ buff = "<%s %s %s>\n" % (self.timestamp, self.level, self.module)
+ for line in self.msg.splitlines():
+ buff = buff + line + "\n"
+ return buff
+
def hunt_for_stacktrace(url):
"""Return TRACE or ERROR lines out of logs."""
@@ -29,11 +69,33 @@
buf = StringIO.StringIO(page.read())
f = gzip.GzipFile(fileobj=buf)
content = f.read()
- traces = re.findall('^(.*? (TRACE|ERROR) .*?)$', content, re.MULTILINE)
- tracelist = map(lambda x: x[0], traces)
- # filter out log definitions as false possitives
- return filter(lambda x: not re.search('logging_exception_prefix', x),
- tracelist)
+
+ traces = []
+ trace = StackTrace()
+ for line in content.splitlines():
+ m = re.match(NOVA_REGEX, line)
+ if m:
+ data = m.groupdict()
+ if trace.not_none() and trace.is_same(data):
+ trace.append(data['msg'] + "\n")
+ else:
+ trace = StackTrace(
+ timestamp=data.get('timestamp'),
+ pid=data.get('pid'),
+ level=data.get('level'),
+ module=data.get('module'),
+ msg=data.get('msg'))
+
+ else:
+ if trace.not_none():
+ traces.append(trace)
+ trace = StackTrace()
+
+ # once more at the end to pick up any stragglers
+ if trace.not_none():
+ traces.append(trace)
+
+ return traces
def log_url(url, log):
@@ -60,6 +122,18 @@
sys.exit(0)
+def print_stats(items, fname, verbose=False):
+ errors = len(filter(lambda x: x.level == "ERROR", items))
+ traces = len(filter(lambda x: x.level == "TRACE", items))
+ print "%d ERRORS found in %s" % (errors, fname)
+ print "%d TRACES found in %s" % (traces, fname)
+
+ if verbose:
+ for item in items:
+ print item
+ print "\n\n"
+
+
def main():
if len(sys.argv) == 2:
url = sys.argv[1]
@@ -72,10 +146,10 @@
for log in loglist:
logurl = log_url(url, log)
traces = hunt_for_stacktrace(logurl)
+
if traces:
- print "\n\nTRACES found in %s\n" % log
- for line in traces:
- print line
+ print_stats(traces, log, verbose=True)
+
else:
usage()