Merge "Migrate scenario utils to tempest client"
diff --git a/HACKING.rst b/HACKING.rst
index 025bf74..29d5bf4 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -227,3 +227,48 @@
2. The unit tests cannot use setUpClass, instead fixtures and testresources
should be used for shared state between tests.
+
+
+.. _TestDocumentation:
+
+Test Documentation
+------------------
+For tests being added we need to require inline documentation in the form of
+docstings to explain what is being tested. In API tests for a new API a class
+level docstring should be added to an API reference doc. If one doesn't exist
+a TODO comment should be put indicating that the reference needs to be added.
+For individual API test cases a method level docstring should be used to
+explain the functionality being tested if the test name isn't descriptive
+enough. For example::
+
+ def test_get_role_by_id(self):
+ """Get a role by its id."""
+
+the docstring there is superfluous and shouldn't be added. but for a method
+like::
+
+ def test_volume_backup_create_get_detailed_list_restore_delete(self):
+ pass
+
+a docstring would be useful because while the test title is fairly descriptive
+the operations being performed are complex enough that a bit more explanation
+will help people figure out the intent of the test.
+
+For scenario tests a class level docstring describing the steps in the scenario
+is required. If there is more than one test case in the class individual
+docstrings for the workflow in each test methods can be used instead. A good
+example of this would be::
+
+ class TestVolumeBootPattern(manager.OfficialClientTest):
+ """
+ This test case attempts to reproduce the following steps:
+
+ * Create in Cinder some bootable volume importing a Glance image
+ * Boot an instance from the bootable volume
+ * Write content to the volume
+ * Delete an instance and Boot a new instance from the volume
+ * Check written content in the instance
+ * Create a volume snapshot while the instance is running
+ * Boot an additional instance from the new snapshot based volume
+ * Check written content in the instance booted from snapshot
+ """
diff --git a/REVIEWING.rst b/REVIEWING.rst
index d6dc83e..74bd2ad 100644
--- a/REVIEWING.rst
+++ b/REVIEWING.rst
@@ -51,6 +51,15 @@
whether to skip or not.
+Test Documentation
+------------------
+When a new test is being added refer to the :ref:`TestDocumentation` section in
+hacking to see if the requirements are being met. With the exception of a class
+level docstring linking to the API ref doc in the API tests and a docstring for
+scenario tests this is up to the reviewers discretion whether a docstring is
+required or not.
+
+
When to approve
---------------
* Every patch needs two +2s before being approved.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index bd4e553..daa293c 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -27,7 +27,6 @@
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
- 'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'oslosphinx'
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 3b0b834..dfcbaba 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -709,6 +709,42 @@
#ssh_user_regex=[["^.*[Cc]irros.*$", "root"]]
+[messaging]
+
+#
+# Options defined in tempest.config
+#
+
+# Catalog type of the Messaging service. (string value)
+#catalog_type=messaging
+
+# The maximum number of queue records per page when listing
+# queues (integer value)
+#max_queues_per_page=20
+
+# The maximum metadata size for a queue (integer value)
+#max_queue_metadata=65536
+
+# The maximum number of queue message per page when listing
+# (or) posting messages (integer value)
+#max_messages_per_page=20
+
+# The maximum size of a message body (integer value)
+#max_message_size=262144
+
+# The maximum number of messages per claim (integer value)
+#max_messages_per_claim=20
+
+# The maximum ttl for a message (integer value)
+#max_message_ttl=1209600
+
+# The maximum ttl for a claim (integer value)
+#max_claim_ttl=43200
+
+# The maximum grace period for a claim (integer value)
+#max_claim_grace=43200
+
+
[negative]
#
@@ -897,42 +933,6 @@
#max_resources_per_stack=1000
-[queuing]
-
-#
-# Options defined in tempest.config
-#
-
-# Catalog type of the Queuing service. (string value)
-#catalog_type=queuing
-
-# The maximum number of queue records per page when listing
-# queues (integer value)
-#max_queues_per_page=20
-
-# The maximum metadata size for a queue (integer value)
-#max_queue_metadata=65536
-
-# The maximum number of queue message per page when listing
-# (or) posting messages (integer value)
-#max_messages_per_page=20
-
-# The maximum size of a message body (integer value)
-#max_message_size=262144
-
-# The maximum number of messages per claim (integer value)
-#max_messages_per_claim=20
-
-# The maximum ttl for a message (integer value)
-#max_message_ttl=1209600
-
-# The maximum ttl for a claim (integer value)
-#max_claim_ttl=43200
-
-# The maximum grace period for a claim (integer value)
-#max_claim_grace=43200
-
-
[scenario]
#
diff --git a/requirements.txt b/requirements.txt
index 9a3b74d..708ede3 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,25 +1,28 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
pbr>=0.6,!=0.7,<1.0
anyjson>=0.3.3
httplib2>=0.7.5
jsonschema>=2.0.0,<3.0.0
testtools>=0.9.34
lxml>=2.3
-boto>=2.12.0,!=2.13.0
+boto>=2.32.1
paramiko>=1.13.0
-netaddr>=0.7.6
+netaddr>=0.7.12
python-ceilometerclient>=1.0.6
-python-glanceclient>=0.13.1
-python-keystoneclient>=0.9.0
-python-novaclient>=2.17.0
-python-neutronclient>=2.3.5,<3
-python-cinderclient>=1.0.7
+python-glanceclient>=0.14.0
+python-keystoneclient>=0.10.0
+python-novaclient>=2.18.0
+python-neutronclient>=2.3.6,<3
+python-cinderclient>=1.1.0
python-heatclient>=0.2.9
-python-ironicclient
-python-saharaclient>=0.6.0
-python-swiftclient>=2.0.2
+python-ironicclient>=0.2.1
+python-saharaclient>=0.7.3
+python-swiftclient>=2.2.0
testresources>=0.2.4
testrepository>=0.0.18
-oslo.config>=1.2.1
+oslo.config>=1.4.0 # Apache-2.0
six>=1.7.0
iso8601>=0.1.9
fixtures>=0.3.14
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index bd49fb2..9aa489c 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -17,6 +17,7 @@
import testtools
from tempest.api.compute import base
+from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
@@ -58,6 +59,8 @@
'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_ram(self):
+ # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
+ self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
resp, quota_set = self.quotas_client.get_default_quota_set(
@@ -78,6 +81,8 @@
'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_vcpus(self):
+ # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
+ self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
ram = 512
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index c7844a7..6c93d33 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -145,14 +145,19 @@
for server in cls.servers:
try:
cls.servers_client.delete_server(server['id'])
- except Exception:
+ except exceptions.NotFound:
+ # Something else already cleaned up the server, nothing to be
+ # worried about
pass
+ except Exception:
+ LOG.exception('Deleting server %s failed' % server['id'])
for server in cls.servers:
try:
cls.servers_client.wait_for_server_termination(server['id'])
except Exception:
- pass
+ LOG.exception('Waiting for deletion of server %s failed'
+ % server['id'])
@classmethod
def server_check_teardown(cls):
diff --git a/tempest/api/compute/v3/admin/test_servers_negative.py b/tempest/api/compute/v3/admin/test_servers_negative.py
index e48432b..f561ed3 100644
--- a/tempest/api/compute/v3/admin/test_servers_negative.py
+++ b/tempest/api/compute/v3/admin/test_servers_negative.py
@@ -17,6 +17,7 @@
import testtools
from tempest.api.compute import base
+from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
@@ -56,6 +57,8 @@
@test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_ram(self):
+ # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
+ self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
resp, quota_set = self.quotas_client.get_default_quota_set(
@@ -74,6 +77,8 @@
@test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_vcpus(self):
+ # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
+ self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
ram = 512
diff --git a/tempest/api/queuing/__init__.py b/tempest/api/messaging/__init__.py
similarity index 100%
rename from tempest/api/queuing/__init__.py
rename to tempest/api/messaging/__init__.py
diff --git a/tempest/api/queuing/base.py b/tempest/api/messaging/base.py
similarity index 78%
rename from tempest/api/queuing/base.py
rename to tempest/api/messaging/base.py
index 41a02f2..0e062c5 100644
--- a/tempest/api/queuing/base.py
+++ b/tempest/api/messaging/base.py
@@ -23,25 +23,25 @@
LOG = logging.getLogger(__name__)
-class BaseQueuingTest(test.BaseTestCase):
+class BaseMessagingTest(test.BaseTestCase):
"""
- Base class for the Queuing tests that use the Tempest Zaqar REST client
+ Base class for the Messaging tests that use the Tempest Zaqar REST client
It is assumed that the following option is defined in the
[service_available] section of etc/tempest.conf
- queuing as True
+ messaging as True
"""
@classmethod
def setUpClass(cls):
- super(BaseQueuingTest, cls).setUpClass()
+ super(BaseMessagingTest, cls).setUpClass()
if not CONF.service_available.zaqar:
raise cls.skipException("Zaqar support is required")
os = cls.get_client_manager()
- cls.queuing_cfg = CONF.queuing
- cls.client = os.queuing_client
+ cls.messaging_cfg = CONF.messaging
+ cls.client = os.messaging_client
@classmethod
def create_queue(cls, queue_name):
@@ -93,42 +93,42 @@
@classmethod
def post_messages(cls, queue_name, rbody):
- '''Wrapper utility that posts messages to a queue.'''
+ """Wrapper utility that posts messages to a queue."""
resp, body = cls.client.post_messages(queue_name, rbody)
return resp, body
@classmethod
def list_messages(cls, queue_name):
- '''Wrapper utility that lists the messages in a queue.'''
+ """Wrapper utility that lists the messages in a queue."""
resp, body = cls.client.list_messages(queue_name)
return resp, body
@classmethod
def get_single_message(cls, message_uri):
- '''Wrapper utility that gets a single message.'''
+ """Wrapper utility that gets a single message."""
resp, body = cls.client.get_single_message(message_uri)
return resp, body
@classmethod
def get_multiple_messages(cls, message_uri):
- '''Wrapper utility that gets multiple messages.'''
+ """Wrapper utility that gets multiple messages."""
resp, body = cls.client.get_multiple_messages(message_uri)
return resp, body
@classmethod
def delete_messages(cls, message_uri):
- '''Wrapper utility that deletes messages.'''
+ """Wrapper utility that deletes messages."""
resp, body = cls.client.delete_messages(message_uri)
return resp, body
@classmethod
def post_claims(cls, queue_name, rbody, url_params=False):
- '''Wrapper utility that claims messages.'''
+ """Wrapper utility that claims messages."""
resp, body = cls.client.post_claims(
queue_name, rbody, url_params=False)
@@ -136,33 +136,34 @@
@classmethod
def query_claim(cls, claim_uri):
- '''Wrapper utility that gets a claim.'''
+ """Wrapper utility that gets a claim."""
resp, body = cls.client.query_claim(claim_uri)
return resp, body
@classmethod
def update_claim(cls, claim_uri, rbody):
- '''Wrapper utility that updates a claim.'''
+ """Wrapper utility that updates a claim."""
resp, body = cls.client.update_claim(claim_uri, rbody)
return resp, body
@classmethod
def release_claim(cls, claim_uri):
- '''Wrapper utility that deletes a claim.'''
+ """Wrapper utility that deletes a claim."""
resp, body = cls.client.release_claim(claim_uri)
return resp, body
@classmethod
def generate_message_body(cls, repeat=1):
- '''Wrapper utility that sets the metadata of a queue.'''
- message_ttl = data_utils.rand_int_id(start=60,
- end=CONF.queuing.max_message_ttl)
+ """Wrapper utility that sets the metadata of a queue."""
+ message_ttl = data_utils.\
+ rand_int_id(start=60, end=CONF.messaging.max_message_ttl)
- key = data_utils.arbitrary_string(size=20, base_text='QueuingKey')
- value = data_utils.arbitrary_string(size=20, base_text='QueuingValue')
+ key = data_utils.arbitrary_string(size=20, base_text='MessagingKey')
+ value = data_utils.arbitrary_string(size=20,
+ base_text='MessagingValue')
message_body = {key: value}
rbody = ([{'body': message_body, 'ttl': message_ttl}] * repeat)
diff --git a/tempest/api/queuing/test_claims.py b/tempest/api/messaging/test_claims.py
similarity index 91%
rename from tempest/api/queuing/test_claims.py
rename to tempest/api/messaging/test_claims.py
index a306623..885f00e 100644
--- a/tempest/api/queuing/test_claims.py
+++ b/tempest/api/messaging/test_claims.py
@@ -16,7 +16,7 @@
import logging
import urlparse
-from tempest.api.queuing import base
+from tempest.api.messaging import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
@@ -26,7 +26,7 @@
CONF = config.CONF
-class TestClaims(base.BaseQueuingTest):
+class TestClaims(base.BaseMessagingTest):
_interface = 'json'
@classmethod
@@ -44,9 +44,9 @@
# Post Claim
claim_ttl = data_utils.rand_int_id(start=60,
- end=CONF.queuing.max_claim_ttl)
- claim_grace = data_utils.rand_int_id(start=60,
- end=CONF.queuing.max_claim_grace)
+ end=CONF.messaging.max_claim_ttl)
+ claim_grace = data_utils.\
+ rand_int_id(start=60, end=CONF.messaging.max_claim_grace)
claim_body = {"ttl": claim_ttl, "grace": claim_grace}
resp, body = self.client.post_claims(queue_name=self.queue_name,
rbody=claim_body)
@@ -90,7 +90,7 @@
# Update Claim
claim_ttl = data_utils.rand_int_id(start=60,
- end=CONF.queuing.max_claim_ttl)
+ end=CONF.messaging.max_claim_ttl)
update_rbody = {"ttl": claim_ttl}
self.client.update_claim(claim_uri, rbody=update_rbody)
diff --git a/tempest/api/queuing/test_messages.py b/tempest/api/messaging/test_messages.py
similarity index 96%
rename from tempest/api/queuing/test_messages.py
rename to tempest/api/messaging/test_messages.py
index 9546c91..3217361 100644
--- a/tempest/api/queuing/test_messages.py
+++ b/tempest/api/messaging/test_messages.py
@@ -15,7 +15,7 @@
import logging
-from tempest.api.queuing import base
+from tempest.api.messaging import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
@@ -25,7 +25,7 @@
CONF = config.CONF
-class TestMessages(base.BaseQueuingTest):
+class TestMessages(base.BaseMessagingTest):
_interface = 'json'
@classmethod
@@ -35,7 +35,7 @@
# Create Queue
cls.client.create_queue(cls.queue_name)
- def _post_messages(self, repeat=CONF.queuing.max_messages_per_page):
+ def _post_messages(self, repeat=CONF.messaging.max_messages_per_page):
message_body = self.generate_message_body(repeat=repeat)
resp, body = self.post_messages(queue_name=self.queue_name,
rbody=message_body)
diff --git a/tempest/api/queuing/test_queues.py b/tempest/api/messaging/test_queues.py
similarity index 96%
rename from tempest/api/queuing/test_queues.py
rename to tempest/api/messaging/test_queues.py
index b340b60..edfe10e 100644
--- a/tempest/api/queuing/test_queues.py
+++ b/tempest/api/messaging/test_queues.py
@@ -18,7 +18,7 @@
from six import moves
from testtools import matchers
-from tempest.api.queuing import base
+from tempest.api.messaging import base
from tempest.common.utils import data_utils
from tempest import test
@@ -26,7 +26,7 @@
LOG = logging.getLogger(__name__)
-class TestQueues(base.BaseQueuingTest):
+class TestQueues(base.BaseMessagingTest):
@test.attr(type='smoke')
def test_create_queue(self):
@@ -40,7 +40,7 @@
self.assertEqual('', body)
-class TestManageQueue(base.BaseQueuingTest):
+class TestManageQueue(base.BaseMessagingTest):
_interface = 'json'
@classmethod
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index ce0bb57..cdd3a29 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -128,7 +128,6 @@
for port in ports:
self.assertEqual(sorted(fields), sorted(port.keys()))
- @test.skip_because(bug="1364166")
@test.attr(type='smoke')
def test_update_port_with_second_ip(self):
# Create a network with two subnets
diff --git a/tempest/api/object_storage/test_account_bulk.py b/tempest/api/object_storage/test_account_bulk.py
index a94c883..743f1aa 100644
--- a/tempest/api/object_storage/test_account_bulk.py
+++ b/tempest/api/object_storage/test_account_bulk.py
@@ -50,16 +50,27 @@
return tarpath.name, container_name, object_name
- @test.attr(type='gate')
- def test_extract_archive(self):
- # Test bulk operation of file upload with an archived file
- filepath, container_name, object_name = self._create_archive()
-
+ def _upload_archive(self, filepath):
+ # upload an archived file
params = {'extract-archive': 'tar'}
with open(filepath) as fh:
mydata = fh.read()
resp, body = self.account_client.create_account(data=mydata,
params=params)
+ return resp, body
+
+ def _check_contents_deleted(self, container_name):
+ param = {'format': 'txt'}
+ resp, body = self.account_client.list_account_containers(param)
+ self.assertHeaders(resp, 'Account', 'GET')
+ self.assertNotIn(container_name, body)
+
+ @test.attr(type='gate')
+ @test.requires_ext(extension='bulk', service='object')
+ def test_extract_archive(self):
+ # Test bulk operation of file upload with an archived file
+ filepath, container_name, object_name = self._create_archive()
+ resp, _ = self._upload_archive(filepath)
self.containers.append(container_name)
@@ -95,23 +106,17 @@
self.assertIn(object_name, [c['name'] for c in contents_list])
@test.attr(type='gate')
+ @test.requires_ext(extension='bulk', service='object')
def test_bulk_delete(self):
# Test bulk operation of deleting multiple files
filepath, container_name, object_name = self._create_archive()
-
- params = {'extract-archive': 'tar'}
- with open(filepath) as fh:
- mydata = fh.read()
- resp, body = self.account_client.create_account(data=mydata,
- params=params)
+ self._upload_archive(filepath)
data = '%s/%s\n%s' % (container_name, object_name, container_name)
params = {'bulk-delete': ''}
resp, body = self.account_client.delete_account(data=data,
params=params)
- self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
-
# When deleting multiple files using the bulk operation, the response
# does not contain 'content-length' header. This is the special case,
# therefore the existence of response headers is checked without
@@ -124,11 +129,33 @@
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
- # Check if a container is deleted
- param = {'format': 'txt'}
- resp, body = self.account_client.list_account_containers(param)
+ # Check if uploaded contents are completely deleted
+ self._check_contents_deleted(container_name)
- self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
- self.assertHeaders(resp, 'Account', 'GET')
+ @test.attr(type='gate')
+ @test.requires_ext(extension='bulk', service='object')
+ def test_bulk_delete_by_POST(self):
+ # Test bulk operation of deleting multiple files
+ filepath, container_name, object_name = self._create_archive()
+ self._upload_archive(filepath)
- self.assertNotIn(container_name, body)
+ data = '%s/%s\n%s' % (container_name, object_name, container_name)
+ params = {'bulk-delete': ''}
+
+ resp, body = self.account_client.create_account_metadata(
+ {}, data=data, params=params)
+
+ # When deleting multiple files using the bulk operation, the response
+ # does not contain 'content-length' header. This is the special case,
+ # therefore the existence of response headers is checked without
+ # custom matcher.
+ self.assertIn('transfer-encoding', resp)
+ self.assertIn('content-type', resp)
+ self.assertIn('x-trans-id', resp)
+ self.assertIn('date', resp)
+
+ # Check only the format of common headers with custom matcher
+ self.assertThat(resp, custom_matchers.AreAllWellFormatted())
+
+ # Check if uploaded contents are completely deleted
+ self._check_contents_deleted(container_name)
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 43f48ff..8aad058 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -69,6 +69,7 @@
if not CONF.volume_feature_enabled.api_v2:
msg = "Volume API v2 is disabled"
raise cls.skipException(msg)
+ cls.snapshots_client = cls.os.snapshots_v2_client
cls.volumes_client = cls.os.volumes_v2_client
cls.volumes_extension_client = cls.os.volumes_v2_extension_client
cls.availability_zone_client = (
diff --git a/tempest/api/volume/test_snapshot_metadata.py b/tempest/api/volume/test_snapshot_metadata.py
index 94ba095..7040891 100644
--- a/tempest/api/volume/test_snapshot_metadata.py
+++ b/tempest/api/volume/test_snapshot_metadata.py
@@ -17,13 +17,12 @@
from tempest import test
-class SnapshotMetadataTest(base.BaseVolumeV1Test):
- _interface = "json"
+class SnapshotV2MetadataTestJSON(base.BaseVolumeTest):
@classmethod
@test.safe_setup
def setUpClass(cls):
- super(SnapshotMetadataTest, cls).setUpClass()
+ super(SnapshotV2MetadataTestJSON, cls).setUpClass()
cls.client = cls.snapshots_client
# Create a volume
cls.volume = cls.create_volume()
@@ -34,7 +33,7 @@
def tearDown(self):
# Update the metadata to {}
self.client.update_snapshot_metadata(self.snapshot_id, {})
- super(SnapshotMetadataTest, self).tearDown()
+ super(SnapshotV2MetadataTestJSON, self).tearDown()
@test.attr(type='gate')
def test_create_get_delete_snapshot_metadata(self):
@@ -100,5 +99,13 @@
self.assertEqual(expect, body)
-class SnapshotMetadataTestXML(SnapshotMetadataTest):
+class SnapshotV2MetadataTestXML(SnapshotV2MetadataTestJSON):
+ _interface = "xml"
+
+
+class SnapshotV1MetadataTestJSON(SnapshotV2MetadataTestJSON):
+ _api_version = 1
+
+
+class SnapshotV1MetadataTestXML(SnapshotV1MetadataTestJSON):
_interface = "xml"
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 7db1ef1..8390f03 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -20,21 +20,19 @@
CONF = config.CONF
-class VolumesSnapshotTest(base.BaseVolumeV1Test):
- _interface = "json"
+class VolumesV2SnapshotTestJSON(base.BaseVolumeTest):
@classmethod
@test.safe_setup
def setUpClass(cls):
- super(VolumesSnapshotTest, cls).setUpClass()
+ super(VolumesV2SnapshotTestJSON, cls).setUpClass()
cls.volume_origin = cls.create_volume()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
- @classmethod
- def tearDownClass(cls):
- super(VolumesSnapshotTest, cls).tearDownClass()
+ cls.name_field = cls.special_fields['name_field']
+ cls.descrip_field = cls.special_fields['descrip_field']
def _detach(self, volume_id):
"""Detach volume."""
@@ -90,8 +88,8 @@
def test_snapshot_create_get_list_update_delete(self):
# Create a snapshot
s_name = data_utils.rand_name('snap')
- snapshot = self.create_snapshot(self.volume_origin['id'],
- display_name=s_name)
+ params = {self.name_field: s_name}
+ snapshot = self.create_snapshot(self.volume_origin['id'], **params)
# Get the snap and check for some of its details
_, snap_get = self.snapshots_client.get_snapshot(snapshot['id'])
@@ -100,26 +98,26 @@
"Referred volume origin mismatch")
# Compare also with the output from the list action
- tracking_data = (snapshot['id'], snapshot['display_name'])
+ tracking_data = (snapshot['id'], snapshot[self.name_field])
_, snaps_list = self.snapshots_client.list_snapshots()
- snaps_data = [(f['id'], f['display_name']) for f in snaps_list]
+ snaps_data = [(f['id'], f[self.name_field]) for f in snaps_list]
self.assertIn(tracking_data, snaps_data)
# Updates snapshot with new values
new_s_name = data_utils.rand_name('new-snap')
new_desc = 'This is the new description of snapshot.'
+ params = {self.name_field: new_s_name,
+ self.descrip_field: new_desc}
_, update_snapshot = \
- self.snapshots_client.update_snapshot(snapshot['id'],
- display_name=new_s_name,
- display_description=new_desc)
+ self.snapshots_client.update_snapshot(snapshot['id'], **params)
# Assert response body for update_snapshot method
- self.assertEqual(new_s_name, update_snapshot['display_name'])
- self.assertEqual(new_desc, update_snapshot['display_description'])
+ self.assertEqual(new_s_name, update_snapshot[self.name_field])
+ self.assertEqual(new_desc, update_snapshot[self.descrip_field])
# Assert response body for get_snapshot method
_, updated_snapshot = \
self.snapshots_client.get_snapshot(snapshot['id'])
- self.assertEqual(new_s_name, updated_snapshot['display_name'])
- self.assertEqual(new_desc, updated_snapshot['display_description'])
+ self.assertEqual(new_s_name, updated_snapshot[self.name_field])
+ self.assertEqual(new_desc, updated_snapshot[self.descrip_field])
# Delete the snapshot
self.snapshots_client.delete_snapshot(snapshot['id'])
@@ -131,11 +129,11 @@
"""list snapshots with params."""
# Create a snapshot
display_name = data_utils.rand_name('snap')
- snapshot = self.create_snapshot(self.volume_origin['id'],
- display_name=display_name)
+ params = {self.name_field: display_name}
+ snapshot = self.create_snapshot(self.volume_origin['id'], **params)
# Verify list snapshots by display_name filter
- params = {'display_name': snapshot['display_name']}
+ params = {self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params)
# Verify list snapshots by status filter
@@ -144,7 +142,7 @@
# Verify list snapshots by status and display name filter
params = {'status': 'available',
- 'display_name': snapshot['display_name']}
+ self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params)
@test.attr(type='gate')
@@ -152,18 +150,18 @@
"""list snapshot details with params."""
# Create a snapshot
display_name = data_utils.rand_name('snap')
- snapshot = self.create_snapshot(self.volume_origin['id'],
- display_name=display_name)
+ params = {self.name_field: display_name}
+ snapshot = self.create_snapshot(self.volume_origin['id'], **params)
# Verify list snapshot details by display_name filter
- params = {'display_name': snapshot['display_name']}
+ params = {self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params, with_detail=True)
# Verify list snapshot details by status filter
params = {'status': 'available'}
self._list_by_param_values_and_assert(params, with_detail=True)
# Verify list snapshot details by status and display name filter
params = {'status': 'available',
- 'display_name': snapshot['display_name']}
+ self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params, with_detail=True)
@test.attr(type='gate')
@@ -181,5 +179,13 @@
self.clear_snapshots()
-class VolumesSnapshotTestXML(VolumesSnapshotTest):
+class VolumesV2SnapshotTestXML(VolumesV2SnapshotTestJSON):
+ _interface = "xml"
+
+
+class VolumesV1SnapshotTestJSON(VolumesV2SnapshotTestJSON):
+ _api_version = 1
+
+
+class VolumesV1SnapshotTestXML(VolumesV1SnapshotTestJSON):
_interface = "xml"
diff --git a/tempest/api/volume/test_volumes_snapshots_negative.py b/tempest/api/volume/test_volumes_snapshots_negative.py
index 61aa307..ddecda8 100644
--- a/tempest/api/volume/test_volumes_snapshots_negative.py
+++ b/tempest/api/volume/test_volumes_snapshots_negative.py
@@ -21,12 +21,11 @@
CONF = config.CONF
-class VolumesSnapshotNegativeTest(base.BaseVolumeV1Test):
- _interface = "json"
+class VolumesV2SnapshotNegativeTestJSON(base.BaseVolumeTest):
@classmethod
def setUpClass(cls):
- super(VolumesSnapshotNegativeTest, cls).setUpClass()
+ super(VolumesV2SnapshotNegativeTestJSON, cls).setUpClass()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
@@ -48,5 +47,13 @@
None, display_name=s_name)
-class VolumesSnapshotNegativeTestXML(VolumesSnapshotNegativeTest):
+class VolumesV2SnapshotNegativeTestXML(VolumesV2SnapshotNegativeTestJSON):
+ _interface = "xml"
+
+
+class VolumesV1SnapshotNegativeTestJSON(VolumesV2SnapshotNegativeTestJSON):
+ _api_version = 1
+
+
+class VolumesV1SnapshotNegativeTestXML(VolumesV1SnapshotNegativeTestJSON):
_interface = "xml"
diff --git a/tempest/api_schema/response/compute/v2/security_group_default_rule.py b/tempest/api_schema/response/compute/v2/security_group_default_rule.py
new file mode 100644
index 0000000..9246ab8
--- /dev/null
+++ b/tempest/api_schema/response/compute/v2/security_group_default_rule.py
@@ -0,0 +1,61 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+common_security_group_default_rule_info = {
+ 'type': 'object',
+ 'properties': {
+ 'from_port': {'type': 'integer'},
+ 'id': {'type': 'integer'},
+ 'ip_protocol': {'type': 'string'},
+ 'ip_range': {
+ 'type': 'object',
+ 'properties': {
+ 'cidr': {'type': 'string'}
+ },
+ 'required': ['cidr'],
+ },
+ 'to_port': {'type': 'integer'},
+ },
+ 'required': ['from_port', 'id', 'ip_protocol', 'ip_range', 'to_port'],
+}
+
+create_get_security_group_default_rule = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'security_group_default_rule':
+ common_security_group_default_rule_info
+ },
+ 'required': ['security_group_default_rule']
+ }
+}
+
+delete_security_group_default_rule = {
+ 'status_code': [204]
+}
+
+list_security_group_default_rules = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'security_group_default_rules': {
+ 'type': 'array',
+ 'items': common_security_group_default_rule_info
+ }
+ },
+ 'required': ['security_group_default_rules']
+ }
+}
diff --git a/tempest/api_schema/response/queuing/__init__.py b/tempest/api_schema/response/messaging/__init__.py
similarity index 100%
rename from tempest/api_schema/response/queuing/__init__.py
rename to tempest/api_schema/response/messaging/__init__.py
diff --git a/tempest/api_schema/response/queuing/v1/__init__.py b/tempest/api_schema/response/messaging/v1/__init__.py
similarity index 100%
rename from tempest/api_schema/response/queuing/v1/__init__.py
rename to tempest/api_schema/response/messaging/v1/__init__.py
diff --git a/tempest/api_schema/response/queuing/v1/queues.py b/tempest/api_schema/response/messaging/v1/queues.py
similarity index 100%
rename from tempest/api_schema/response/queuing/v1/queues.py
rename to tempest/api_schema/response/messaging/v1/queues.py
diff --git a/tempest/clients.py b/tempest/clients.py
index eab496e..89cffba 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -151,6 +151,8 @@
from tempest.services.identity.xml.identity_client import TokenClientXML
from tempest.services.image.v1.json.image_client import ImageClientJSON
from tempest.services.image.v2.json.image_client import ImageClientV2JSON
+from tempest.services.messaging.json.messaging_client import \
+ MessagingClientJSON
from tempest.services.network.json.network_client import NetworkClientJSON
from tempest.services.network.xml.network_client import NetworkClientXML
from tempest.services.object_storage.account_client import AccountClient
@@ -162,7 +164,6 @@
ObjectClientCustomizedHeader
from tempest.services.orchestration.json.orchestration_client import \
OrchestrationClient
-from tempest.services.queuing.json.queuing_client import QueuingClientJSON
from tempest.services.telemetry.json.telemetry_client import \
TelemetryClientJSON
from tempest.services.telemetry.xml.telemetry_client import \
@@ -188,11 +189,15 @@
from tempest.services.volume.v2.json.extensions_client import \
ExtensionsV2ClientJSON as VolumeV2ExtensionClientJSON
from tempest.services.volume.v2.json.qos_client import QosSpecsV2ClientJSON
+from tempest.services.volume.v2.json.snapshots_client import \
+ SnapshotsV2ClientJSON
from tempest.services.volume.v2.json.volumes_client import VolumesV2ClientJSON
from tempest.services.volume.v2.xml.availability_zone_client import \
VolumeV2AvailabilityZoneClientXML
from tempest.services.volume.v2.xml.extensions_client import \
ExtensionsV2ClientXML as VolumeV2ExtensionClientXML
+from tempest.services.volume.v2.xml.snapshots_client import \
+ SnapshotsV2ClientXML
from tempest.services.volume.v2.xml.volumes_client import VolumesV2ClientXML
from tempest.services.volume.xml.admin.volume_hosts_client import \
VolumeHostsClientXML
@@ -245,6 +250,7 @@
self.auth_provider)
self.backups_client = BackupsClientXML(self.auth_provider)
self.snapshots_client = SnapshotsClientXML(self.auth_provider)
+ self.snapshots_v2_client = SnapshotsV2ClientXML(self.auth_provider)
self.volumes_client = VolumesClientXML(self.auth_provider)
self.volumes_v2_client = VolumesV2ClientXML(self.auth_provider)
self.volume_types_client = VolumeTypesClientXML(
@@ -324,6 +330,8 @@
self.auth_provider)
self.backups_client = BackupsClientJSON(self.auth_provider)
self.snapshots_client = SnapshotsClientJSON(self.auth_provider)
+ self.snapshots_v2_client = SnapshotsV2ClientJSON(
+ self.auth_provider)
self.volumes_client = VolumesClientJSON(self.auth_provider)
self.volumes_v2_client = VolumesV2ClientJSON(self.auth_provider)
self.volume_types_client = VolumeTypesClientJSON(
@@ -384,7 +392,7 @@
self.auth_provider)
self.database_versions_client = DatabaseVersionsClientJSON(
self.auth_provider)
- self.queuing_client = QueuingClientJSON(self.auth_provider)
+ self.messaging_client = MessagingClientJSON(self.auth_provider)
if CONF.service_available.ceilometer:
self.telemetry_client = TelemetryClientJSON(
self.auth_provider)
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 3f8db3d..bf0cc70 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -213,9 +213,7 @@
self.check_users()
self.check_objects()
self.check_servers()
- # TODO(sdague): Volumes not yet working, bring it back once the
- # code is self testing.
- # self.check_volumes()
+ self.check_volumes()
self.check_telemetry()
def check_users(self):
@@ -300,15 +298,15 @@
LOG.info("checking volumes")
for volume in self.res['volumes']:
client = client_for_user(volume['owner'])
- found = _get_volume_by_name(client, volume['name'])
+ vol_body = _get_volume_by_name(client, volume['name'])
self.assertIsNotNone(
- found,
+ vol_body,
"Couldn't find expected volume %s" % volume['name'])
# Verify that a volume's attachment retrieved
server_id = _get_server_by_name(client, volume['server'])['id']
- attachment = self.client.get_attachment_from_volume(volume)
- self.assertEqual(volume['id'], attachment['volume_id'])
+ attachment = client.volumes.get_attachment_from_volume(vol_body)
+ self.assertEqual(vol_body['id'], attachment['volume_id'])
self.assertEqual(server_id, attachment['server_id'])
def _confirm_telemetry_sample(self, server, sample):
@@ -501,8 +499,8 @@
def _get_volume_by_name(client, name):
r, body = client.volumes.list_volumes()
- for volume in body['volumes']:
- if name == volume['name']:
+ for volume in body:
+ if name == volume['display_name']:
return volume
return None
@@ -512,11 +510,15 @@
client = client_for_user(volume['owner'])
# only create a volume if the name isn't here
- r, body = client.volumes.list_volumes()
- if any(item['name'] == volume['name'] for item in body):
+ if _get_volume_by_name(client, volume['name']):
+ LOG.info("volume '%s' already exists" % volume['name'])
continue
- client.volumes.create_volume(volume['name'], volume['size'])
+ size = volume['gb']
+ v_name = volume['name']
+ resp, body = client.volumes.create_volume(size=size,
+ display_name=v_name)
+ client.volumes.wait_for_volume_status(body['id'], 'available')
def destroy_volumes(volumes):
@@ -529,9 +531,10 @@
def attach_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
-
server_id = _get_server_by_name(client, volume['server'])['id']
- client.volumes.attach_volume(volume['name'], server_id)
+ volume_id = _get_volume_by_name(client, volume['name'])['id']
+ device = volume['device']
+ client.volumes.attach_volume(volume_id, server_id, device)
#######################
@@ -552,10 +555,8 @@
create_objects(RES['objects'])
create_images(RES['images'])
create_servers(RES['servers'])
- # TODO(sdague): volumes definition doesn't work yet, bring it
- # back once we're actually executing the code
- # create_volumes(RES['volumes'])
- # attach_volumes(RES['volumes'])
+ create_volumes(RES['volumes'])
+ attach_volumes(RES['volumes'])
def destroy_resources():
diff --git a/tempest/cmd/resources.yaml b/tempest/cmd/resources.yaml
index 3450e1f..19ee6d5 100644
--- a/tempest/cmd/resources.yaml
+++ b/tempest/cmd/resources.yaml
@@ -36,11 +36,13 @@
- name: assegai
server: peltast
owner: javelin
- size: 1
+ gb: 1
+ device: /dev/vdb
- name: pifpouf
server: hoplite
owner: javelin
- size: 2
+ gb: 2
+ device: /dev/vdb
servers:
- name: peltast
owner: javelin
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index cd696a9..5046bff 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -247,7 +247,7 @@
'data_processing': 'sahara',
'baremetal': 'ironic',
'identity': 'keystone',
- 'queuing': 'zaqar',
+ 'messaging': 'zaqar',
'database': 'trove'
}
# Get catalog list for endpoints to use for validation
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index e584cbf..00fe8d2 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -17,11 +17,11 @@
import collections
import json
import re
-import string
import time
import jsonschema
from lxml import etree
+import six
from tempest.common import http
from tempest.common.utils import misc as misc_utils
@@ -40,6 +40,19 @@
HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206)
+# convert a structure into a string safely
+def safe_body(body, maxlen=2048):
+ try:
+ text = six.text_type(body)
+ except UnicodeDecodeError:
+ # if this isn't actually text, return marker that
+ return "<BinaryData: removed>"
+ if len(text) > maxlen:
+ return text[:maxlen]
+ else:
+ return text
+
+
class RestClient(object):
TYPE = "json"
@@ -258,6 +271,31 @@
self.LOG.debug('Starting Request (%s): %s %s' %
(caller_name, method, req_url))
+ def _log_request_full(self, method, req_url, resp,
+ secs="", req_headers=None,
+ req_body=None, resp_body=None,
+ caller_name=None, extra=None):
+ if 'X-Auth-Token' in req_headers:
+ req_headers['X-Auth-Token'] = '<omitted>'
+ log_fmt = """Request (%s): %s %s %s%s
+ Request - Headers: %s
+ Body: %s
+ Response - Headers: %s
+ Body: %s"""
+
+ self.LOG.debug(
+ log_fmt % (
+ caller_name,
+ resp['status'],
+ method,
+ req_url,
+ secs,
+ str(req_headers),
+ safe_body(req_body),
+ str(resp),
+ safe_body(resp_body)),
+ extra=extra)
+
def _log_request(self, method, req_url, resp,
secs="", req_headers=None,
req_body=None, resp_body=None):
@@ -281,32 +319,10 @@
secs),
extra=extra)
- # We intentionally duplicate the info content because in a parallel
- # world this is important to match
- trace_regex = CONF.debug.trace_requests
- if trace_regex and re.search(trace_regex, caller_name):
- if 'X-Auth-Token' in req_headers:
- req_headers['X-Auth-Token'] = '<omitted>'
- log_fmt = """Request (%s): %s %s %s%s
- Request - Headers: %s
- Body: %s
- Response - Headers: %s
- Body: %s"""
-
- self.LOG.debug(
- log_fmt % (
- caller_name,
- resp['status'],
- method,
- req_url,
- secs,
- str(req_headers),
- filter(lambda x: x in string.printable,
- str(req_body)[:2048]),
- str(resp),
- filter(lambda x: x in string.printable,
- str(resp_body)[:2048])),
- extra=extra)
+ # Also look everything at DEBUG if you want to filter this
+ # out, don't run at debug.
+ self._log_request_full(method, req_url, resp, secs, req_headers,
+ req_body, resp_body, caller_name, extra)
def _parse_resp(self, body):
if self._get_type() is "json":
diff --git a/tempest/config.py b/tempest/config.py
index d3449a7..cea9dec 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -470,13 +470,13 @@
)
]
-queuing_group = cfg.OptGroup(name='queuing',
- title='Queuing Service')
+messaging_group = cfg.OptGroup(name='messaging',
+ title='Messaging Service')
-QueuingGroup = [
+MessagingGroup = [
cfg.StrOpt('catalog_type',
- default='queuing',
- help='Catalog type of the Queuing service.'),
+ default='messaging',
+ help='Catalog type of the Messaging service.'),
cfg.IntOpt('max_queues_per_page',
default=20,
help='The maximum number of queue records per page when '
@@ -1034,7 +1034,7 @@
register_opt_group(cfg.CONF, network_group, NetworkGroup)
register_opt_group(cfg.CONF, network_feature_group,
NetworkFeaturesGroup)
- register_opt_group(cfg.CONF, queuing_group, QueuingGroup)
+ register_opt_group(cfg.CONF, messaging_group, MessagingGroup)
register_opt_group(cfg.CONF, volume_group, VolumeGroup)
register_opt_group(cfg.CONF, volume_feature_group,
VolumeFeaturesGroup)
@@ -1091,7 +1091,7 @@
'object-storage-feature-enabled']
self.database = cfg.CONF.database
self.orchestration = cfg.CONF.orchestration
- self.queuing = cfg.CONF.queuing
+ self.messaging = cfg.CONF.messaging
self.telemetry = cfg.CONF.telemetry
self.dashboard = cfg.CONF.dashboard
self.data_processing = cfg.CONF.data_processing
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 6068d35..9933646 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -1248,38 +1248,38 @@
cls.container_client = cls.manager.container_client
cls.object_client = cls.manager.object_client
- def _get_swift_stat(self):
+ def get_swift_stat(self):
"""get swift status for our user account."""
self.account_client.list_account_containers()
LOG.debug('Swift status information obtained successfully')
- def _create_container(self, container_name=None):
+ def create_container(self, container_name=None):
name = container_name or data_utils.rand_name(
'swift-scenario-container')
self.container_client.create_container(name)
# look for the container to assure it is created
- self._list_and_check_container_objects(name)
+ self.list_and_check_container_objects(name)
LOG.debug('Container %s created' % (name))
return name
- def _delete_container(self, container_name):
+ def delete_container(self, container_name):
self.container_client.delete_container(container_name)
LOG.debug('Container %s deleted' % (container_name))
- def _upload_object_to_container(self, container_name, obj_name=None):
+ def upload_object_to_container(self, container_name, obj_name=None):
obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
obj_data = data_utils.arbitrary_string()
self.object_client.create_object(container_name, obj_name, obj_data)
return obj_name, obj_data
- def _delete_object(self, container_name, filename):
+ def delete_object(self, container_name, filename):
self.object_client.delete_object(container_name, filename)
- self._list_and_check_container_objects(container_name,
- not_present_obj=[filename])
+ self.list_and_check_container_objects(container_name,
+ not_present_obj=[filename])
- def _list_and_check_container_objects(self, container_name,
- present_obj=None,
- not_present_obj=None):
+ def list_and_check_container_objects(self, container_name,
+ present_obj=None,
+ not_present_obj=None):
"""
List objects for a given container and assert which are present and
which are not.
@@ -1297,7 +1297,7 @@
for obj in not_present_obj:
self.assertNotIn(obj, object_list)
- def _change_container_acl(self, container_name, acl):
+ def change_container_acl(self, container_name, acl):
metadata_param = {'metadata_prefix': 'x-container-',
'metadata': {'read': acl}}
self.container_client.update_container_metadata(container_name,
@@ -1305,6 +1305,6 @@
resp, _ = self.container_client.list_container_metadata(container_name)
self.assertEqual(resp['x-container-read'], acl)
- def _download_and_verify(self, container_name, obj_name, expected_data):
+ def download_and_verify(self, container_name, obj_name, expected_data):
_, obj = self.object_client.get_object(container_name, obj_name)
self.assertEqual(obj, expected_data)
diff --git a/tempest/scenario/orchestration/test_server_cfn_init.py b/tempest/scenario/orchestration/test_server_cfn_init.py
index dd7e7d4..0ab4311 100644
--- a/tempest/scenario/orchestration/test_server_cfn_init.py
+++ b/tempest/scenario/orchestration/test_server_cfn_init.py
@@ -86,7 +86,8 @@
if not self.ping_ip_address(server_ip):
self._log_console_output(servers=[server])
self.fail(
- "Timed out waiting for %s to become reachable" % server_ip)
+ "(CfnInitScenarioTest:test_server_cfn_init) Timed out waiting "
+ "for %s to become reachable" % server_ip)
try:
self.client.wait_for_resource_status(
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index a7ea70f..71b8a7f 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -39,6 +39,9 @@
@classmethod
def setUpClass(cls):
+ if CONF.scenario.large_ops_number < 1:
+ raise cls.skipException("large_ops_number not set to multiple "
+ "instances")
cls.set_network_resources()
super(TestLargeOpsScenario, cls).setUpClass()
@@ -75,8 +78,6 @@
self._wait_for_server_status('ACTIVE')
def _large_ops_scenario(self):
- if CONF.scenario.large_ops_number < 1:
- return
self.glance_image_create()
self.nova_boot()
diff --git a/tempest/scenario/test_swift_basic_ops.py b/tempest/scenario/test_swift_basic_ops.py
index ad74ec4..9e0fee0 100644
--- a/tempest/scenario/test_swift_basic_ops.py
+++ b/tempest/scenario/test_swift_basic_ops.py
@@ -41,13 +41,13 @@
@test.services('object_storage')
def test_swift_basic_ops(self):
- self._get_swift_stat()
- container_name = self._create_container()
- obj_name, obj_data = self._upload_object_to_container(container_name)
- self._list_and_check_container_objects(container_name, [obj_name])
- self._download_and_verify(container_name, obj_name, obj_data)
- self._delete_object(container_name, obj_name)
- self._delete_container(container_name)
+ self.get_swift_stat()
+ container_name = self.create_container()
+ obj_name, obj_data = self.upload_object_to_container(container_name)
+ self.list_and_check_container_objects(container_name, [obj_name])
+ self.download_and_verify(container_name, obj_name, obj_data)
+ self.delete_object(container_name, obj_name)
+ self.delete_container(container_name)
@test.services('object_storage')
def test_swift_acl_anonymous_download(self):
@@ -58,15 +58,15 @@
4. Check if the object can be download by anonymous user
5. Delete the object and container
"""
- container_name = self._create_container()
- obj_name, _ = self._upload_object_to_container(container_name)
+ container_name = self.create_container()
+ obj_name, _ = self.upload_object_to_container(container_name)
obj_url = '%s/%s/%s' % (self.object_client.base_url,
container_name, obj_name)
http_client = http.ClosingHttp()
resp, _ = http_client.request(obj_url, 'GET')
self.assertEqual(resp.status, 401)
- self._change_container_acl(container_name, '.r:*')
+ self.change_container_acl(container_name, '.r:*')
resp, _ = http_client.request(obj_url, 'GET')
self.assertEqual(resp.status, 200)
- self._delete_object(container_name, obj_name)
- self._delete_container(container_name)
+ self.delete_object(container_name, obj_name)
+ self.delete_container(container_name)
diff --git a/tempest/services/compute/json/security_group_default_rules_client.py b/tempest/services/compute/json/security_group_default_rules_client.py
index 6d29837..7743f9c 100644
--- a/tempest/services/compute/json/security_group_default_rules_client.py
+++ b/tempest/services/compute/json/security_group_default_rules_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.response.compute.v2 import \
+ security_group_default_rule as schema
from tempest.common import rest_client
from tempest import config
@@ -46,8 +48,9 @@
post_body = json.dumps({'security_group_default_rule': post_body})
url = 'os-security-group-default-rules'
resp, body = self.post(url, post_body)
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.create_get_security_group_default_rule,
+ resp, body)
return resp, body['security_group_default_rule']
def delete_security_group_default_rule(self,
@@ -55,20 +58,23 @@
"""Deletes the provided Security Group default rule."""
resp, body = self.delete('os-security-group-default-rules/%s' % str(
security_group_default_rule_id))
- self.expected_success(204, resp.status)
+ self.validate_response(schema.delete_security_group_default_rule,
+ resp, body)
return resp, body
def list_security_group_default_rules(self):
"""List all Security Group default rules."""
resp, body = self.get('os-security-group-default-rules')
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.list_security_group_default_rules,
+ resp, body)
return resp, body['security_group_default_rules']
def get_security_group_default_rule(self, security_group_default_rule_id):
"""Return the details of provided Security Group default rule."""
resp, body = self.get('os-security-group-default-rules/%s' % str(
security_group_default_rule_id))
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.create_get_security_group_default_rule,
+ resp, body)
return resp, body['security_group_default_rule']
diff --git a/tempest/services/queuing/__init__.py b/tempest/services/messaging/__init__.py
similarity index 100%
rename from tempest/services/queuing/__init__.py
rename to tempest/services/messaging/__init__.py
diff --git a/tempest/services/queuing/json/__init__.py b/tempest/services/messaging/json/__init__.py
similarity index 100%
rename from tempest/services/queuing/json/__init__.py
rename to tempest/services/messaging/json/__init__.py
diff --git a/tempest/services/queuing/json/queuing_client.py b/tempest/services/messaging/json/messaging_client.py
similarity index 95%
rename from tempest/services/queuing/json/queuing_client.py
rename to tempest/services/messaging/json/messaging_client.py
index 14960ad..3e82399 100644
--- a/tempest/services/queuing/json/queuing_client.py
+++ b/tempest/services/messaging/json/messaging_client.py
@@ -16,7 +16,7 @@
import json
import urllib
-from tempest.api_schema.response.queuing.v1 import queues as queues_schema
+from tempest.api_schema.response.messaging.v1 import queues as queues_schema
from tempest.common import rest_client
from tempest.common.utils import data_utils
from tempest import config
@@ -25,11 +25,11 @@
CONF = config.CONF
-class QueuingClientJSON(rest_client.RestClient):
+class MessagingClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
- super(QueuingClientJSON, self).__init__(auth_provider)
- self.service = CONF.queuing.catalog_type
+ super(MessagingClientJSON, self).__init__(auth_provider)
+ self.service = CONF.messaging.catalog_type
self.version = '1'
self.uri_prefix = 'v{0}'.format(self.version)
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index eca57c0..4dc588f 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -58,8 +58,6 @@
"""Delete an account."""
url = ''
if params:
- if 'bulk-delete' in params:
- url += 'bulk-delete&'
url = '?%s%s' % (url, urllib.urlencode(params))
resp, body = self.delete(url, headers={}, body=data)
@@ -74,13 +72,19 @@
return resp, body
def create_account_metadata(self, metadata,
- metadata_prefix='X-Account-Meta-'):
+ metadata_prefix='X-Account-Meta-',
+ data=None, params=None):
"""Creates an account metadata entry."""
headers = {}
- for key in metadata:
- headers[metadata_prefix + key] = metadata[key]
+ if metadata:
+ for key in metadata:
+ headers[metadata_prefix + key] = metadata[key]
- resp, body = self.post('', headers=headers, body=None)
+ url = ''
+ if params:
+ url = '?%s%s' % (url, urllib.urlencode(params))
+
+ resp, body = self.post(url, headers=headers, body=data)
return resp, body
def delete_account_metadata(self, metadata,
diff --git a/tempest/services/volume/json/snapshots_client.py b/tempest/services/volume/json/snapshots_client.py
index f50ba2f..1f8065b 100644
--- a/tempest/services/volume/json/snapshots_client.py
+++ b/tempest/services/volume/json/snapshots_client.py
@@ -24,15 +24,16 @@
LOG = logging.getLogger(__name__)
-class SnapshotsClientJSON(rest_client.RestClient):
- """Client class to send CRUD Volume API requests."""
+class BaseSnapshotsClientJSON(rest_client.RestClient):
+ """Base Client class to send CRUD Volume API requests."""
def __init__(self, auth_provider):
- super(SnapshotsClientJSON, self).__init__(auth_provider)
+ super(BaseSnapshotsClientJSON, self).__init__(auth_provider)
self.service = CONF.volume.catalog_type
self.build_interval = CONF.volume.build_interval
self.build_timeout = CONF.volume.build_timeout
+ self.create_resp = 200
def list_snapshots(self, params=None):
"""List all the snapshot."""
@@ -77,7 +78,7 @@
post_body = json.dumps({'snapshot': post_body})
resp, body = self.post('snapshots', post_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.expected_success(self.create_resp, resp.status)
return resp, body['snapshot']
def update_snapshot(self, snapshot_id, **kwargs):
@@ -203,3 +204,7 @@
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
self.expected_success(202, resp.status)
return resp, body
+
+
+class SnapshotsClientJSON(BaseSnapshotsClientJSON):
+ """Client class to send CRUD Volume V1 API requests."""
diff --git a/tempest/services/volume/v2/json/snapshots_client.py b/tempest/services/volume/v2/json/snapshots_client.py
new file mode 100644
index 0000000..553176b
--- /dev/null
+++ b/tempest/services/volume/v2/json/snapshots_client.py
@@ -0,0 +1,23 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.services.volume.json import snapshots_client
+
+
+class SnapshotsV2ClientJSON(snapshots_client.BaseSnapshotsClientJSON):
+ """Client class to send CRUD Volume V2 API requests."""
+
+ def __init__(self, auth_provider):
+ super(SnapshotsV2ClientJSON, self).__init__(auth_provider)
+
+ self.api_version = "v2"
+ self.create_resp = 202
diff --git a/tempest/services/volume/v2/xml/snapshots_client.py b/tempest/services/volume/v2/xml/snapshots_client.py
new file mode 100644
index 0000000..b29d86c
--- /dev/null
+++ b/tempest/services/volume/v2/xml/snapshots_client.py
@@ -0,0 +1,23 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.services.volume.xml import snapshots_client
+
+
+class SnapshotsV2ClientXML(snapshots_client.BaseSnapshotsClientXML):
+ """Client class to send CRUD Volume V2 API requests."""
+
+ def __init__(self, auth_provider):
+ super(SnapshotsV2ClientXML, self).__init__(auth_provider)
+
+ self.api_version = "v2"
+ self.create_resp = 202
diff --git a/tempest/services/volume/xml/snapshots_client.py b/tempest/services/volume/xml/snapshots_client.py
index 7636707..ce98eea 100644
--- a/tempest/services/volume/xml/snapshots_client.py
+++ b/tempest/services/volume/xml/snapshots_client.py
@@ -26,16 +26,17 @@
LOG = logging.getLogger(__name__)
-class SnapshotsClientXML(rest_client.RestClient):
- """Client class to send CRUD Volume API requests."""
+class BaseSnapshotsClientXML(rest_client.RestClient):
+ """Base Client class to send CRUD Volume API requests."""
TYPE = "xml"
def __init__(self, auth_provider):
- super(SnapshotsClientXML, self).__init__(auth_provider)
+ super(BaseSnapshotsClientXML, self).__init__(auth_provider)
self.service = CONF.volume.catalog_type
self.build_interval = CONF.volume.build_interval
self.build_timeout = CONF.volume.build_timeout
+ self.create_resp = 200
def list_snapshots(self, params=None):
"""List all snapshot."""
@@ -90,7 +91,7 @@
resp, body = self.post('snapshots',
str(common.Document(snapshot)))
body = common.xml_to_json(etree.fromstring(body))
- self.expected_success(200, resp.status)
+ self.expected_success(self.create_resp, resp.status)
return resp, body
def update_snapshot(self, snapshot_id, **kwargs):
@@ -243,3 +244,7 @@
body = common.xml_to_json(etree.fromstring(body))
self.expected_success(202, resp.status)
return resp, body
+
+
+class SnapshotsClientXML(BaseSnapshotsClientXML):
+ """Client class to send CRUD Volume V1 API requests."""
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index c0d3f7a..ee904c7 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -200,6 +200,29 @@
instance.terminate()
self.assertInstanceStateWait(instance, '_GONE')
+ def test_run_reboot_terminate_instance(self):
+ # EC2 run, await till it reaches to running state, then reboot,
+ # and wait untill its state is running, and then terminate
+ image_ami = self.ec2_client.get_image(self.images["ami"]
+ ["image_id"])
+ reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
+ ramdisk_id=self.images["ari"]["image_id"],
+ instance_type=self.instance_type)
+
+ self.assertEqual(1, len(reservation.instances))
+
+ instance = reservation.instances[0]
+ if instance.state != "running":
+ self.assertInstanceStateWait(instance, "running")
+
+ instance.reboot()
+ if instance.state != "running":
+ self.assertInstanceStateWait(instance, "running")
+ LOG.debug("Instance rebooted - state: %s", instance.state)
+
+ instance.terminate()
+ self.assertInstanceStateWait(instance, '_GONE')
+
def test_compute_with_volumes(self):
# EC2 1. integration test (not strict)
image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
diff --git a/tempest/thirdparty/boto/test_s3_buckets.py b/tempest/thirdparty/boto/test_s3_buckets.py
index 3a8dc89..1576492 100644
--- a/tempest/thirdparty/boto/test_s3_buckets.py
+++ b/tempest/thirdparty/boto/test_s3_buckets.py
@@ -14,7 +14,6 @@
# under the License.
from tempest.common.utils import data_utils
-from tempest import test
from tempest.thirdparty.boto import test as boto_test
@@ -25,7 +24,6 @@
super(S3BucketsTest, cls).setUpClass()
cls.client = cls.os.s3_client
- @test.skip_because(bug="1076965")
def test_create_and_get_delete_bucket(self):
# S3 Create, get and delete bucket
bucket_name = data_utils.rand_name("s3bucket-")
diff --git a/test-requirements.txt b/test-requirements.txt
index cd8154b..ba70259 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,10 +1,13 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
hacking>=0.9.2,<0.10
# needed for doc build
sphinx>=1.1.2,!=1.2.0,<1.3
python-subunit>=0.0.18
-oslosphinx
+oslosphinx>=2.2.0 # Apache-2.0
mox>=0.5.3
mock>=1.0
coverage>=3.6
-oslotest
-stevedore>=0.14
+oslotest>=1.1.0 # Apache-2.0
+stevedore>=1.0.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index 492c4f6..cab59a8 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,6 +8,8 @@
OS_TEST_PATH=./tempest/test_discover
usedevelop = True
install_command = pip install -U {opts} {packages}
+whitelist_externals = bash
+
[testenv:py26]
setenv = OS_TEST_PATH=./tempest/tests
@@ -17,6 +19,11 @@
setenv = OS_TEST_PATH=./tempest/tests
commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
+[testenv:py34]
+setenv = OS_TEST_PATH=./tempest/tests
+ PYTHONHASHSEED=0
+commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
+
[testenv:py27]
setenv = OS_TEST_PATH=./tempest/tests
commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'