Merge "Fix resource cleanup in server rescue tests"
diff --git a/HACKING.rst b/HACKING.rst
index 025bf74..29d5bf4 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -227,3 +227,48 @@
2. The unit tests cannot use setUpClass, instead fixtures and testresources
should be used for shared state between tests.
+
+
+.. _TestDocumentation:
+
+Test Documentation
+------------------
+For tests being added we need to require inline documentation in the form of
+docstings to explain what is being tested. In API tests for a new API a class
+level docstring should be added to an API reference doc. If one doesn't exist
+a TODO comment should be put indicating that the reference needs to be added.
+For individual API test cases a method level docstring should be used to
+explain the functionality being tested if the test name isn't descriptive
+enough. For example::
+
+ def test_get_role_by_id(self):
+ """Get a role by its id."""
+
+the docstring there is superfluous and shouldn't be added. but for a method
+like::
+
+ def test_volume_backup_create_get_detailed_list_restore_delete(self):
+ pass
+
+a docstring would be useful because while the test title is fairly descriptive
+the operations being performed are complex enough that a bit more explanation
+will help people figure out the intent of the test.
+
+For scenario tests a class level docstring describing the steps in the scenario
+is required. If there is more than one test case in the class individual
+docstrings for the workflow in each test methods can be used instead. A good
+example of this would be::
+
+ class TestVolumeBootPattern(manager.OfficialClientTest):
+ """
+ This test case attempts to reproduce the following steps:
+
+ * Create in Cinder some bootable volume importing a Glance image
+ * Boot an instance from the bootable volume
+ * Write content to the volume
+ * Delete an instance and Boot a new instance from the volume
+ * Check written content in the instance
+ * Create a volume snapshot while the instance is running
+ * Boot an additional instance from the new snapshot based volume
+ * Check written content in the instance booted from snapshot
+ """
diff --git a/REVIEWING.rst b/REVIEWING.rst
index d6dc83e..74bd2ad 100644
--- a/REVIEWING.rst
+++ b/REVIEWING.rst
@@ -51,6 +51,15 @@
whether to skip or not.
+Test Documentation
+------------------
+When a new test is being added refer to the :ref:`TestDocumentation` section in
+hacking to see if the requirements are being met. With the exception of a class
+level docstring linking to the API ref doc in the API tests and a docstring for
+scenario tests this is up to the reviewers discretion whether a docstring is
+required or not.
+
+
When to approve
---------------
* Every patch needs two +2s before being approved.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index bd4e553..daa293c 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -27,7 +27,6 @@
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
- 'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'oslosphinx'
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 3b0b834..dfcbaba 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -709,6 +709,42 @@
#ssh_user_regex=[["^.*[Cc]irros.*$", "root"]]
+[messaging]
+
+#
+# Options defined in tempest.config
+#
+
+# Catalog type of the Messaging service. (string value)
+#catalog_type=messaging
+
+# The maximum number of queue records per page when listing
+# queues (integer value)
+#max_queues_per_page=20
+
+# The maximum metadata size for a queue (integer value)
+#max_queue_metadata=65536
+
+# The maximum number of queue message per page when listing
+# (or) posting messages (integer value)
+#max_messages_per_page=20
+
+# The maximum size of a message body (integer value)
+#max_message_size=262144
+
+# The maximum number of messages per claim (integer value)
+#max_messages_per_claim=20
+
+# The maximum ttl for a message (integer value)
+#max_message_ttl=1209600
+
+# The maximum ttl for a claim (integer value)
+#max_claim_ttl=43200
+
+# The maximum grace period for a claim (integer value)
+#max_claim_grace=43200
+
+
[negative]
#
@@ -897,42 +933,6 @@
#max_resources_per_stack=1000
-[queuing]
-
-#
-# Options defined in tempest.config
-#
-
-# Catalog type of the Queuing service. (string value)
-#catalog_type=queuing
-
-# The maximum number of queue records per page when listing
-# queues (integer value)
-#max_queues_per_page=20
-
-# The maximum metadata size for a queue (integer value)
-#max_queue_metadata=65536
-
-# The maximum number of queue message per page when listing
-# (or) posting messages (integer value)
-#max_messages_per_page=20
-
-# The maximum size of a message body (integer value)
-#max_message_size=262144
-
-# The maximum number of messages per claim (integer value)
-#max_messages_per_claim=20
-
-# The maximum ttl for a message (integer value)
-#max_message_ttl=1209600
-
-# The maximum ttl for a claim (integer value)
-#max_claim_ttl=43200
-
-# The maximum grace period for a claim (integer value)
-#max_claim_grace=43200
-
-
[scenario]
#
diff --git a/requirements.txt b/requirements.txt
index 9a3b74d..708ede3 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,25 +1,28 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
pbr>=0.6,!=0.7,<1.0
anyjson>=0.3.3
httplib2>=0.7.5
jsonschema>=2.0.0,<3.0.0
testtools>=0.9.34
lxml>=2.3
-boto>=2.12.0,!=2.13.0
+boto>=2.32.1
paramiko>=1.13.0
-netaddr>=0.7.6
+netaddr>=0.7.12
python-ceilometerclient>=1.0.6
-python-glanceclient>=0.13.1
-python-keystoneclient>=0.9.0
-python-novaclient>=2.17.0
-python-neutronclient>=2.3.5,<3
-python-cinderclient>=1.0.7
+python-glanceclient>=0.14.0
+python-keystoneclient>=0.10.0
+python-novaclient>=2.18.0
+python-neutronclient>=2.3.6,<3
+python-cinderclient>=1.1.0
python-heatclient>=0.2.9
-python-ironicclient
-python-saharaclient>=0.6.0
-python-swiftclient>=2.0.2
+python-ironicclient>=0.2.1
+python-saharaclient>=0.7.3
+python-swiftclient>=2.2.0
testresources>=0.2.4
testrepository>=0.0.18
-oslo.config>=1.2.1
+oslo.config>=1.4.0 # Apache-2.0
six>=1.7.0
iso8601>=0.1.9
fixtures>=0.3.14
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index bd49fb2..9aa489c 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -17,6 +17,7 @@
import testtools
from tempest.api.compute import base
+from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
@@ -58,6 +59,8 @@
'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_ram(self):
+ # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
+ self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
resp, quota_set = self.quotas_client.get_default_quota_set(
@@ -78,6 +81,8 @@
'Resize not available.')
@test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_vcpus(self):
+ # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
+ self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
ram = 512
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index c7844a7..6c93d33 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -145,14 +145,19 @@
for server in cls.servers:
try:
cls.servers_client.delete_server(server['id'])
- except Exception:
+ except exceptions.NotFound:
+ # Something else already cleaned up the server, nothing to be
+ # worried about
pass
+ except Exception:
+ LOG.exception('Deleting server %s failed' % server['id'])
for server in cls.servers:
try:
cls.servers_client.wait_for_server_termination(server['id'])
except Exception:
- pass
+ LOG.exception('Waiting for deletion of server %s failed'
+ % server['id'])
@classmethod
def server_check_teardown(cls):
diff --git a/tempest/api/compute/v3/admin/test_servers_negative.py b/tempest/api/compute/v3/admin/test_servers_negative.py
index e48432b..f561ed3 100644
--- a/tempest/api/compute/v3/admin/test_servers_negative.py
+++ b/tempest/api/compute/v3/admin/test_servers_negative.py
@@ -17,6 +17,7 @@
import testtools
from tempest.api.compute import base
+from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
@@ -56,6 +57,8 @@
@test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_ram(self):
+ # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
+ self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
resp, quota_set = self.quotas_client.get_default_quota_set(
@@ -74,6 +77,8 @@
@test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_vcpus(self):
+ # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
+ self.useFixture(fixtures.LockFixture('compute_quotas'))
flavor_name = data_utils.rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
ram = 512
diff --git a/tempest/api/queuing/__init__.py b/tempest/api/messaging/__init__.py
similarity index 100%
rename from tempest/api/queuing/__init__.py
rename to tempest/api/messaging/__init__.py
diff --git a/tempest/api/queuing/base.py b/tempest/api/messaging/base.py
similarity index 78%
rename from tempest/api/queuing/base.py
rename to tempest/api/messaging/base.py
index 41a02f2..0e062c5 100644
--- a/tempest/api/queuing/base.py
+++ b/tempest/api/messaging/base.py
@@ -23,25 +23,25 @@
LOG = logging.getLogger(__name__)
-class BaseQueuingTest(test.BaseTestCase):
+class BaseMessagingTest(test.BaseTestCase):
"""
- Base class for the Queuing tests that use the Tempest Zaqar REST client
+ Base class for the Messaging tests that use the Tempest Zaqar REST client
It is assumed that the following option is defined in the
[service_available] section of etc/tempest.conf
- queuing as True
+ messaging as True
"""
@classmethod
def setUpClass(cls):
- super(BaseQueuingTest, cls).setUpClass()
+ super(BaseMessagingTest, cls).setUpClass()
if not CONF.service_available.zaqar:
raise cls.skipException("Zaqar support is required")
os = cls.get_client_manager()
- cls.queuing_cfg = CONF.queuing
- cls.client = os.queuing_client
+ cls.messaging_cfg = CONF.messaging
+ cls.client = os.messaging_client
@classmethod
def create_queue(cls, queue_name):
@@ -93,42 +93,42 @@
@classmethod
def post_messages(cls, queue_name, rbody):
- '''Wrapper utility that posts messages to a queue.'''
+ """Wrapper utility that posts messages to a queue."""
resp, body = cls.client.post_messages(queue_name, rbody)
return resp, body
@classmethod
def list_messages(cls, queue_name):
- '''Wrapper utility that lists the messages in a queue.'''
+ """Wrapper utility that lists the messages in a queue."""
resp, body = cls.client.list_messages(queue_name)
return resp, body
@classmethod
def get_single_message(cls, message_uri):
- '''Wrapper utility that gets a single message.'''
+ """Wrapper utility that gets a single message."""
resp, body = cls.client.get_single_message(message_uri)
return resp, body
@classmethod
def get_multiple_messages(cls, message_uri):
- '''Wrapper utility that gets multiple messages.'''
+ """Wrapper utility that gets multiple messages."""
resp, body = cls.client.get_multiple_messages(message_uri)
return resp, body
@classmethod
def delete_messages(cls, message_uri):
- '''Wrapper utility that deletes messages.'''
+ """Wrapper utility that deletes messages."""
resp, body = cls.client.delete_messages(message_uri)
return resp, body
@classmethod
def post_claims(cls, queue_name, rbody, url_params=False):
- '''Wrapper utility that claims messages.'''
+ """Wrapper utility that claims messages."""
resp, body = cls.client.post_claims(
queue_name, rbody, url_params=False)
@@ -136,33 +136,34 @@
@classmethod
def query_claim(cls, claim_uri):
- '''Wrapper utility that gets a claim.'''
+ """Wrapper utility that gets a claim."""
resp, body = cls.client.query_claim(claim_uri)
return resp, body
@classmethod
def update_claim(cls, claim_uri, rbody):
- '''Wrapper utility that updates a claim.'''
+ """Wrapper utility that updates a claim."""
resp, body = cls.client.update_claim(claim_uri, rbody)
return resp, body
@classmethod
def release_claim(cls, claim_uri):
- '''Wrapper utility that deletes a claim.'''
+ """Wrapper utility that deletes a claim."""
resp, body = cls.client.release_claim(claim_uri)
return resp, body
@classmethod
def generate_message_body(cls, repeat=1):
- '''Wrapper utility that sets the metadata of a queue.'''
- message_ttl = data_utils.rand_int_id(start=60,
- end=CONF.queuing.max_message_ttl)
+ """Wrapper utility that sets the metadata of a queue."""
+ message_ttl = data_utils.\
+ rand_int_id(start=60, end=CONF.messaging.max_message_ttl)
- key = data_utils.arbitrary_string(size=20, base_text='QueuingKey')
- value = data_utils.arbitrary_string(size=20, base_text='QueuingValue')
+ key = data_utils.arbitrary_string(size=20, base_text='MessagingKey')
+ value = data_utils.arbitrary_string(size=20,
+ base_text='MessagingValue')
message_body = {key: value}
rbody = ([{'body': message_body, 'ttl': message_ttl}] * repeat)
diff --git a/tempest/api/queuing/test_claims.py b/tempest/api/messaging/test_claims.py
similarity index 91%
rename from tempest/api/queuing/test_claims.py
rename to tempest/api/messaging/test_claims.py
index a306623..885f00e 100644
--- a/tempest/api/queuing/test_claims.py
+++ b/tempest/api/messaging/test_claims.py
@@ -16,7 +16,7 @@
import logging
import urlparse
-from tempest.api.queuing import base
+from tempest.api.messaging import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
@@ -26,7 +26,7 @@
CONF = config.CONF
-class TestClaims(base.BaseQueuingTest):
+class TestClaims(base.BaseMessagingTest):
_interface = 'json'
@classmethod
@@ -44,9 +44,9 @@
# Post Claim
claim_ttl = data_utils.rand_int_id(start=60,
- end=CONF.queuing.max_claim_ttl)
- claim_grace = data_utils.rand_int_id(start=60,
- end=CONF.queuing.max_claim_grace)
+ end=CONF.messaging.max_claim_ttl)
+ claim_grace = data_utils.\
+ rand_int_id(start=60, end=CONF.messaging.max_claim_grace)
claim_body = {"ttl": claim_ttl, "grace": claim_grace}
resp, body = self.client.post_claims(queue_name=self.queue_name,
rbody=claim_body)
@@ -90,7 +90,7 @@
# Update Claim
claim_ttl = data_utils.rand_int_id(start=60,
- end=CONF.queuing.max_claim_ttl)
+ end=CONF.messaging.max_claim_ttl)
update_rbody = {"ttl": claim_ttl}
self.client.update_claim(claim_uri, rbody=update_rbody)
diff --git a/tempest/api/queuing/test_messages.py b/tempest/api/messaging/test_messages.py
similarity index 96%
rename from tempest/api/queuing/test_messages.py
rename to tempest/api/messaging/test_messages.py
index 9546c91..3217361 100644
--- a/tempest/api/queuing/test_messages.py
+++ b/tempest/api/messaging/test_messages.py
@@ -15,7 +15,7 @@
import logging
-from tempest.api.queuing import base
+from tempest.api.messaging import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
@@ -25,7 +25,7 @@
CONF = config.CONF
-class TestMessages(base.BaseQueuingTest):
+class TestMessages(base.BaseMessagingTest):
_interface = 'json'
@classmethod
@@ -35,7 +35,7 @@
# Create Queue
cls.client.create_queue(cls.queue_name)
- def _post_messages(self, repeat=CONF.queuing.max_messages_per_page):
+ def _post_messages(self, repeat=CONF.messaging.max_messages_per_page):
message_body = self.generate_message_body(repeat=repeat)
resp, body = self.post_messages(queue_name=self.queue_name,
rbody=message_body)
diff --git a/tempest/api/queuing/test_queues.py b/tempest/api/messaging/test_queues.py
similarity index 96%
rename from tempest/api/queuing/test_queues.py
rename to tempest/api/messaging/test_queues.py
index b340b60..edfe10e 100644
--- a/tempest/api/queuing/test_queues.py
+++ b/tempest/api/messaging/test_queues.py
@@ -18,7 +18,7 @@
from six import moves
from testtools import matchers
-from tempest.api.queuing import base
+from tempest.api.messaging import base
from tempest.common.utils import data_utils
from tempest import test
@@ -26,7 +26,7 @@
LOG = logging.getLogger(__name__)
-class TestQueues(base.BaseQueuingTest):
+class TestQueues(base.BaseMessagingTest):
@test.attr(type='smoke')
def test_create_queue(self):
@@ -40,7 +40,7 @@
self.assertEqual('', body)
-class TestManageQueue(base.BaseQueuingTest):
+class TestManageQueue(base.BaseMessagingTest):
_interface = 'json'
@classmethod
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index ce0bb57..cdd3a29 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -128,7 +128,6 @@
for port in ports:
self.assertEqual(sorted(fields), sorted(port.keys()))
- @test.skip_because(bug="1364166")
@test.attr(type='smoke')
def test_update_port_with_second_ip(self):
# Create a network with two subnets
diff --git a/tempest/api/object_storage/test_account_bulk.py b/tempest/api/object_storage/test_account_bulk.py
index a94c883..743f1aa 100644
--- a/tempest/api/object_storage/test_account_bulk.py
+++ b/tempest/api/object_storage/test_account_bulk.py
@@ -50,16 +50,27 @@
return tarpath.name, container_name, object_name
- @test.attr(type='gate')
- def test_extract_archive(self):
- # Test bulk operation of file upload with an archived file
- filepath, container_name, object_name = self._create_archive()
-
+ def _upload_archive(self, filepath):
+ # upload an archived file
params = {'extract-archive': 'tar'}
with open(filepath) as fh:
mydata = fh.read()
resp, body = self.account_client.create_account(data=mydata,
params=params)
+ return resp, body
+
+ def _check_contents_deleted(self, container_name):
+ param = {'format': 'txt'}
+ resp, body = self.account_client.list_account_containers(param)
+ self.assertHeaders(resp, 'Account', 'GET')
+ self.assertNotIn(container_name, body)
+
+ @test.attr(type='gate')
+ @test.requires_ext(extension='bulk', service='object')
+ def test_extract_archive(self):
+ # Test bulk operation of file upload with an archived file
+ filepath, container_name, object_name = self._create_archive()
+ resp, _ = self._upload_archive(filepath)
self.containers.append(container_name)
@@ -95,23 +106,17 @@
self.assertIn(object_name, [c['name'] for c in contents_list])
@test.attr(type='gate')
+ @test.requires_ext(extension='bulk', service='object')
def test_bulk_delete(self):
# Test bulk operation of deleting multiple files
filepath, container_name, object_name = self._create_archive()
-
- params = {'extract-archive': 'tar'}
- with open(filepath) as fh:
- mydata = fh.read()
- resp, body = self.account_client.create_account(data=mydata,
- params=params)
+ self._upload_archive(filepath)
data = '%s/%s\n%s' % (container_name, object_name, container_name)
params = {'bulk-delete': ''}
resp, body = self.account_client.delete_account(data=data,
params=params)
- self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
-
# When deleting multiple files using the bulk operation, the response
# does not contain 'content-length' header. This is the special case,
# therefore the existence of response headers is checked without
@@ -124,11 +129,33 @@
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
- # Check if a container is deleted
- param = {'format': 'txt'}
- resp, body = self.account_client.list_account_containers(param)
+ # Check if uploaded contents are completely deleted
+ self._check_contents_deleted(container_name)
- self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
- self.assertHeaders(resp, 'Account', 'GET')
+ @test.attr(type='gate')
+ @test.requires_ext(extension='bulk', service='object')
+ def test_bulk_delete_by_POST(self):
+ # Test bulk operation of deleting multiple files
+ filepath, container_name, object_name = self._create_archive()
+ self._upload_archive(filepath)
- self.assertNotIn(container_name, body)
+ data = '%s/%s\n%s' % (container_name, object_name, container_name)
+ params = {'bulk-delete': ''}
+
+ resp, body = self.account_client.create_account_metadata(
+ {}, data=data, params=params)
+
+ # When deleting multiple files using the bulk operation, the response
+ # does not contain 'content-length' header. This is the special case,
+ # therefore the existence of response headers is checked without
+ # custom matcher.
+ self.assertIn('transfer-encoding', resp)
+ self.assertIn('content-type', resp)
+ self.assertIn('x-trans-id', resp)
+ self.assertIn('date', resp)
+
+ # Check only the format of common headers with custom matcher
+ self.assertThat(resp, custom_matchers.AreAllWellFormatted())
+
+ # Check if uploaded contents are completely deleted
+ self._check_contents_deleted(container_name)
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 43f48ff..8aad058 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -69,6 +69,7 @@
if not CONF.volume_feature_enabled.api_v2:
msg = "Volume API v2 is disabled"
raise cls.skipException(msg)
+ cls.snapshots_client = cls.os.snapshots_v2_client
cls.volumes_client = cls.os.volumes_v2_client
cls.volumes_extension_client = cls.os.volumes_v2_extension_client
cls.availability_zone_client = (
diff --git a/tempest/api/volume/test_snapshot_metadata.py b/tempest/api/volume/test_snapshot_metadata.py
index 94ba095..7040891 100644
--- a/tempest/api/volume/test_snapshot_metadata.py
+++ b/tempest/api/volume/test_snapshot_metadata.py
@@ -17,13 +17,12 @@
from tempest import test
-class SnapshotMetadataTest(base.BaseVolumeV1Test):
- _interface = "json"
+class SnapshotV2MetadataTestJSON(base.BaseVolumeTest):
@classmethod
@test.safe_setup
def setUpClass(cls):
- super(SnapshotMetadataTest, cls).setUpClass()
+ super(SnapshotV2MetadataTestJSON, cls).setUpClass()
cls.client = cls.snapshots_client
# Create a volume
cls.volume = cls.create_volume()
@@ -34,7 +33,7 @@
def tearDown(self):
# Update the metadata to {}
self.client.update_snapshot_metadata(self.snapshot_id, {})
- super(SnapshotMetadataTest, self).tearDown()
+ super(SnapshotV2MetadataTestJSON, self).tearDown()
@test.attr(type='gate')
def test_create_get_delete_snapshot_metadata(self):
@@ -100,5 +99,13 @@
self.assertEqual(expect, body)
-class SnapshotMetadataTestXML(SnapshotMetadataTest):
+class SnapshotV2MetadataTestXML(SnapshotV2MetadataTestJSON):
+ _interface = "xml"
+
+
+class SnapshotV1MetadataTestJSON(SnapshotV2MetadataTestJSON):
+ _api_version = 1
+
+
+class SnapshotV1MetadataTestXML(SnapshotV1MetadataTestJSON):
_interface = "xml"
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 7db1ef1..8390f03 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -20,21 +20,19 @@
CONF = config.CONF
-class VolumesSnapshotTest(base.BaseVolumeV1Test):
- _interface = "json"
+class VolumesV2SnapshotTestJSON(base.BaseVolumeTest):
@classmethod
@test.safe_setup
def setUpClass(cls):
- super(VolumesSnapshotTest, cls).setUpClass()
+ super(VolumesV2SnapshotTestJSON, cls).setUpClass()
cls.volume_origin = cls.create_volume()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
- @classmethod
- def tearDownClass(cls):
- super(VolumesSnapshotTest, cls).tearDownClass()
+ cls.name_field = cls.special_fields['name_field']
+ cls.descrip_field = cls.special_fields['descrip_field']
def _detach(self, volume_id):
"""Detach volume."""
@@ -90,8 +88,8 @@
def test_snapshot_create_get_list_update_delete(self):
# Create a snapshot
s_name = data_utils.rand_name('snap')
- snapshot = self.create_snapshot(self.volume_origin['id'],
- display_name=s_name)
+ params = {self.name_field: s_name}
+ snapshot = self.create_snapshot(self.volume_origin['id'], **params)
# Get the snap and check for some of its details
_, snap_get = self.snapshots_client.get_snapshot(snapshot['id'])
@@ -100,26 +98,26 @@
"Referred volume origin mismatch")
# Compare also with the output from the list action
- tracking_data = (snapshot['id'], snapshot['display_name'])
+ tracking_data = (snapshot['id'], snapshot[self.name_field])
_, snaps_list = self.snapshots_client.list_snapshots()
- snaps_data = [(f['id'], f['display_name']) for f in snaps_list]
+ snaps_data = [(f['id'], f[self.name_field]) for f in snaps_list]
self.assertIn(tracking_data, snaps_data)
# Updates snapshot with new values
new_s_name = data_utils.rand_name('new-snap')
new_desc = 'This is the new description of snapshot.'
+ params = {self.name_field: new_s_name,
+ self.descrip_field: new_desc}
_, update_snapshot = \
- self.snapshots_client.update_snapshot(snapshot['id'],
- display_name=new_s_name,
- display_description=new_desc)
+ self.snapshots_client.update_snapshot(snapshot['id'], **params)
# Assert response body for update_snapshot method
- self.assertEqual(new_s_name, update_snapshot['display_name'])
- self.assertEqual(new_desc, update_snapshot['display_description'])
+ self.assertEqual(new_s_name, update_snapshot[self.name_field])
+ self.assertEqual(new_desc, update_snapshot[self.descrip_field])
# Assert response body for get_snapshot method
_, updated_snapshot = \
self.snapshots_client.get_snapshot(snapshot['id'])
- self.assertEqual(new_s_name, updated_snapshot['display_name'])
- self.assertEqual(new_desc, updated_snapshot['display_description'])
+ self.assertEqual(new_s_name, updated_snapshot[self.name_field])
+ self.assertEqual(new_desc, updated_snapshot[self.descrip_field])
# Delete the snapshot
self.snapshots_client.delete_snapshot(snapshot['id'])
@@ -131,11 +129,11 @@
"""list snapshots with params."""
# Create a snapshot
display_name = data_utils.rand_name('snap')
- snapshot = self.create_snapshot(self.volume_origin['id'],
- display_name=display_name)
+ params = {self.name_field: display_name}
+ snapshot = self.create_snapshot(self.volume_origin['id'], **params)
# Verify list snapshots by display_name filter
- params = {'display_name': snapshot['display_name']}
+ params = {self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params)
# Verify list snapshots by status filter
@@ -144,7 +142,7 @@
# Verify list snapshots by status and display name filter
params = {'status': 'available',
- 'display_name': snapshot['display_name']}
+ self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params)
@test.attr(type='gate')
@@ -152,18 +150,18 @@
"""list snapshot details with params."""
# Create a snapshot
display_name = data_utils.rand_name('snap')
- snapshot = self.create_snapshot(self.volume_origin['id'],
- display_name=display_name)
+ params = {self.name_field: display_name}
+ snapshot = self.create_snapshot(self.volume_origin['id'], **params)
# Verify list snapshot details by display_name filter
- params = {'display_name': snapshot['display_name']}
+ params = {self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params, with_detail=True)
# Verify list snapshot details by status filter
params = {'status': 'available'}
self._list_by_param_values_and_assert(params, with_detail=True)
# Verify list snapshot details by status and display name filter
params = {'status': 'available',
- 'display_name': snapshot['display_name']}
+ self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params, with_detail=True)
@test.attr(type='gate')
@@ -181,5 +179,13 @@
self.clear_snapshots()
-class VolumesSnapshotTestXML(VolumesSnapshotTest):
+class VolumesV2SnapshotTestXML(VolumesV2SnapshotTestJSON):
+ _interface = "xml"
+
+
+class VolumesV1SnapshotTestJSON(VolumesV2SnapshotTestJSON):
+ _api_version = 1
+
+
+class VolumesV1SnapshotTestXML(VolumesV1SnapshotTestJSON):
_interface = "xml"
diff --git a/tempest/api/volume/test_volumes_snapshots_negative.py b/tempest/api/volume/test_volumes_snapshots_negative.py
index 61aa307..ddecda8 100644
--- a/tempest/api/volume/test_volumes_snapshots_negative.py
+++ b/tempest/api/volume/test_volumes_snapshots_negative.py
@@ -21,12 +21,11 @@
CONF = config.CONF
-class VolumesSnapshotNegativeTest(base.BaseVolumeV1Test):
- _interface = "json"
+class VolumesV2SnapshotNegativeTestJSON(base.BaseVolumeTest):
@classmethod
def setUpClass(cls):
- super(VolumesSnapshotNegativeTest, cls).setUpClass()
+ super(VolumesV2SnapshotNegativeTestJSON, cls).setUpClass()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
@@ -48,5 +47,13 @@
None, display_name=s_name)
-class VolumesSnapshotNegativeTestXML(VolumesSnapshotNegativeTest):
+class VolumesV2SnapshotNegativeTestXML(VolumesV2SnapshotNegativeTestJSON):
+ _interface = "xml"
+
+
+class VolumesV1SnapshotNegativeTestJSON(VolumesV2SnapshotNegativeTestJSON):
+ _api_version = 1
+
+
+class VolumesV1SnapshotNegativeTestXML(VolumesV1SnapshotNegativeTestJSON):
_interface = "xml"
diff --git a/tempest/api_schema/response/compute/v2/security_group_default_rule.py b/tempest/api_schema/response/compute/v2/security_group_default_rule.py
new file mode 100644
index 0000000..9246ab8
--- /dev/null
+++ b/tempest/api_schema/response/compute/v2/security_group_default_rule.py
@@ -0,0 +1,61 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+common_security_group_default_rule_info = {
+ 'type': 'object',
+ 'properties': {
+ 'from_port': {'type': 'integer'},
+ 'id': {'type': 'integer'},
+ 'ip_protocol': {'type': 'string'},
+ 'ip_range': {
+ 'type': 'object',
+ 'properties': {
+ 'cidr': {'type': 'string'}
+ },
+ 'required': ['cidr'],
+ },
+ 'to_port': {'type': 'integer'},
+ },
+ 'required': ['from_port', 'id', 'ip_protocol', 'ip_range', 'to_port'],
+}
+
+create_get_security_group_default_rule = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'security_group_default_rule':
+ common_security_group_default_rule_info
+ },
+ 'required': ['security_group_default_rule']
+ }
+}
+
+delete_security_group_default_rule = {
+ 'status_code': [204]
+}
+
+list_security_group_default_rules = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'security_group_default_rules': {
+ 'type': 'array',
+ 'items': common_security_group_default_rule_info
+ }
+ },
+ 'required': ['security_group_default_rules']
+ }
+}
diff --git a/tempest/api_schema/response/queuing/__init__.py b/tempest/api_schema/response/messaging/__init__.py
similarity index 100%
rename from tempest/api_schema/response/queuing/__init__.py
rename to tempest/api_schema/response/messaging/__init__.py
diff --git a/tempest/api_schema/response/queuing/v1/__init__.py b/tempest/api_schema/response/messaging/v1/__init__.py
similarity index 100%
rename from tempest/api_schema/response/queuing/v1/__init__.py
rename to tempest/api_schema/response/messaging/v1/__init__.py
diff --git a/tempest/api_schema/response/queuing/v1/queues.py b/tempest/api_schema/response/messaging/v1/queues.py
similarity index 100%
rename from tempest/api_schema/response/queuing/v1/queues.py
rename to tempest/api_schema/response/messaging/v1/queues.py
diff --git a/tempest/clients.py b/tempest/clients.py
index eab496e..89cffba 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -151,6 +151,8 @@
from tempest.services.identity.xml.identity_client import TokenClientXML
from tempest.services.image.v1.json.image_client import ImageClientJSON
from tempest.services.image.v2.json.image_client import ImageClientV2JSON
+from tempest.services.messaging.json.messaging_client import \
+ MessagingClientJSON
from tempest.services.network.json.network_client import NetworkClientJSON
from tempest.services.network.xml.network_client import NetworkClientXML
from tempest.services.object_storage.account_client import AccountClient
@@ -162,7 +164,6 @@
ObjectClientCustomizedHeader
from tempest.services.orchestration.json.orchestration_client import \
OrchestrationClient
-from tempest.services.queuing.json.queuing_client import QueuingClientJSON
from tempest.services.telemetry.json.telemetry_client import \
TelemetryClientJSON
from tempest.services.telemetry.xml.telemetry_client import \
@@ -188,11 +189,15 @@
from tempest.services.volume.v2.json.extensions_client import \
ExtensionsV2ClientJSON as VolumeV2ExtensionClientJSON
from tempest.services.volume.v2.json.qos_client import QosSpecsV2ClientJSON
+from tempest.services.volume.v2.json.snapshots_client import \
+ SnapshotsV2ClientJSON
from tempest.services.volume.v2.json.volumes_client import VolumesV2ClientJSON
from tempest.services.volume.v2.xml.availability_zone_client import \
VolumeV2AvailabilityZoneClientXML
from tempest.services.volume.v2.xml.extensions_client import \
ExtensionsV2ClientXML as VolumeV2ExtensionClientXML
+from tempest.services.volume.v2.xml.snapshots_client import \
+ SnapshotsV2ClientXML
from tempest.services.volume.v2.xml.volumes_client import VolumesV2ClientXML
from tempest.services.volume.xml.admin.volume_hosts_client import \
VolumeHostsClientXML
@@ -245,6 +250,7 @@
self.auth_provider)
self.backups_client = BackupsClientXML(self.auth_provider)
self.snapshots_client = SnapshotsClientXML(self.auth_provider)
+ self.snapshots_v2_client = SnapshotsV2ClientXML(self.auth_provider)
self.volumes_client = VolumesClientXML(self.auth_provider)
self.volumes_v2_client = VolumesV2ClientXML(self.auth_provider)
self.volume_types_client = VolumeTypesClientXML(
@@ -324,6 +330,8 @@
self.auth_provider)
self.backups_client = BackupsClientJSON(self.auth_provider)
self.snapshots_client = SnapshotsClientJSON(self.auth_provider)
+ self.snapshots_v2_client = SnapshotsV2ClientJSON(
+ self.auth_provider)
self.volumes_client = VolumesClientJSON(self.auth_provider)
self.volumes_v2_client = VolumesV2ClientJSON(self.auth_provider)
self.volume_types_client = VolumeTypesClientJSON(
@@ -384,7 +392,7 @@
self.auth_provider)
self.database_versions_client = DatabaseVersionsClientJSON(
self.auth_provider)
- self.queuing_client = QueuingClientJSON(self.auth_provider)
+ self.messaging_client = MessagingClientJSON(self.auth_provider)
if CONF.service_available.ceilometer:
self.telemetry_client = TelemetryClientJSON(
self.auth_provider)
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 3f8db3d..27e388d 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -213,9 +213,7 @@
self.check_users()
self.check_objects()
self.check_servers()
- # TODO(sdague): Volumes not yet working, bring it back once the
- # code is self testing.
- # self.check_volumes()
+ self.check_volumes()
self.check_telemetry()
def check_users(self):
@@ -300,15 +298,15 @@
LOG.info("checking volumes")
for volume in self.res['volumes']:
client = client_for_user(volume['owner'])
- found = _get_volume_by_name(client, volume['name'])
+ vol_body = _get_volume_by_name(client, volume['name'])
self.assertIsNotNone(
- found,
+ vol_body,
"Couldn't find expected volume %s" % volume['name'])
# Verify that a volume's attachment retrieved
server_id = _get_server_by_name(client, volume['server'])['id']
- attachment = self.client.get_attachment_from_volume(volume)
- self.assertEqual(volume['id'], attachment['volume_id'])
+ attachment = client.volumes.get_attachment_from_volume(vol_body)
+ self.assertEqual(vol_body['id'], attachment['volume_id'])
self.assertEqual(server_id, attachment['server_id'])
def _confirm_telemetry_sample(self, server, sample):
@@ -501,8 +499,8 @@
def _get_volume_by_name(client, name):
r, body = client.volumes.list_volumes()
- for volume in body['volumes']:
- if name == volume['name']:
+ for volume in body:
+ if name == volume['display_name']:
return volume
return None
@@ -512,26 +510,32 @@
client = client_for_user(volume['owner'])
# only create a volume if the name isn't here
- r, body = client.volumes.list_volumes()
- if any(item['name'] == volume['name'] for item in body):
+ if _get_volume_by_name(client, volume['name']):
+ LOG.info("volume '%s' already exists" % volume['name'])
continue
- client.volumes.create_volume(volume['name'], volume['size'])
+ size = volume['gb']
+ v_name = volume['name']
+ resp, body = client.volumes.create_volume(size=size,
+ display_name=v_name)
+ client.volumes.wait_for_volume_status(body['id'], 'available')
def destroy_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
volume_id = _get_volume_by_name(client, volume['name'])['id']
- r, body = client.volumes.delete_volume(volume_id)
+ client.volumes.detach_volume(volume_id)
+ client.volumes.delete_volume(volume_id)
def attach_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
-
server_id = _get_server_by_name(client, volume['server'])['id']
- client.volumes.attach_volume(volume['name'], server_id)
+ volume_id = _get_volume_by_name(client, volume['name'])['id']
+ device = volume['device']
+ client.volumes.attach_volume(volume_id, server_id, device)
#######################
@@ -552,10 +556,8 @@
create_objects(RES['objects'])
create_images(RES['images'])
create_servers(RES['servers'])
- # TODO(sdague): volumes definition doesn't work yet, bring it
- # back once we're actually executing the code
- # create_volumes(RES['volumes'])
- # attach_volumes(RES['volumes'])
+ create_volumes(RES['volumes'])
+ attach_volumes(RES['volumes'])
def destroy_resources():
diff --git a/tempest/cmd/resources.yaml b/tempest/cmd/resources.yaml
index 3450e1f..19ee6d5 100644
--- a/tempest/cmd/resources.yaml
+++ b/tempest/cmd/resources.yaml
@@ -36,11 +36,13 @@
- name: assegai
server: peltast
owner: javelin
- size: 1
+ gb: 1
+ device: /dev/vdb
- name: pifpouf
server: hoplite
owner: javelin
- size: 2
+ gb: 2
+ device: /dev/vdb
servers:
- name: peltast
owner: javelin
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index cd696a9..5046bff 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -247,7 +247,7 @@
'data_processing': 'sahara',
'baremetal': 'ironic',
'identity': 'keystone',
- 'queuing': 'zaqar',
+ 'messaging': 'zaqar',
'database': 'trove'
}
# Get catalog list for endpoints to use for validation
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index e584cbf..00fe8d2 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -17,11 +17,11 @@
import collections
import json
import re
-import string
import time
import jsonschema
from lxml import etree
+import six
from tempest.common import http
from tempest.common.utils import misc as misc_utils
@@ -40,6 +40,19 @@
HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206)
+# convert a structure into a string safely
+def safe_body(body, maxlen=2048):
+ try:
+ text = six.text_type(body)
+ except UnicodeDecodeError:
+ # if this isn't actually text, return marker that
+ return "<BinaryData: removed>"
+ if len(text) > maxlen:
+ return text[:maxlen]
+ else:
+ return text
+
+
class RestClient(object):
TYPE = "json"
@@ -258,6 +271,31 @@
self.LOG.debug('Starting Request (%s): %s %s' %
(caller_name, method, req_url))
+ def _log_request_full(self, method, req_url, resp,
+ secs="", req_headers=None,
+ req_body=None, resp_body=None,
+ caller_name=None, extra=None):
+ if 'X-Auth-Token' in req_headers:
+ req_headers['X-Auth-Token'] = '<omitted>'
+ log_fmt = """Request (%s): %s %s %s%s
+ Request - Headers: %s
+ Body: %s
+ Response - Headers: %s
+ Body: %s"""
+
+ self.LOG.debug(
+ log_fmt % (
+ caller_name,
+ resp['status'],
+ method,
+ req_url,
+ secs,
+ str(req_headers),
+ safe_body(req_body),
+ str(resp),
+ safe_body(resp_body)),
+ extra=extra)
+
def _log_request(self, method, req_url, resp,
secs="", req_headers=None,
req_body=None, resp_body=None):
@@ -281,32 +319,10 @@
secs),
extra=extra)
- # We intentionally duplicate the info content because in a parallel
- # world this is important to match
- trace_regex = CONF.debug.trace_requests
- if trace_regex and re.search(trace_regex, caller_name):
- if 'X-Auth-Token' in req_headers:
- req_headers['X-Auth-Token'] = '<omitted>'
- log_fmt = """Request (%s): %s %s %s%s
- Request - Headers: %s
- Body: %s
- Response - Headers: %s
- Body: %s"""
-
- self.LOG.debug(
- log_fmt % (
- caller_name,
- resp['status'],
- method,
- req_url,
- secs,
- str(req_headers),
- filter(lambda x: x in string.printable,
- str(req_body)[:2048]),
- str(resp),
- filter(lambda x: x in string.printable,
- str(resp_body)[:2048])),
- extra=extra)
+ # Also look everything at DEBUG if you want to filter this
+ # out, don't run at debug.
+ self._log_request_full(method, req_url, resp, secs, req_headers,
+ req_body, resp_body, caller_name, extra)
def _parse_resp(self, body):
if self._get_type() is "json":
diff --git a/tempest/config.py b/tempest/config.py
index d3449a7..cea9dec 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -470,13 +470,13 @@
)
]
-queuing_group = cfg.OptGroup(name='queuing',
- title='Queuing Service')
+messaging_group = cfg.OptGroup(name='messaging',
+ title='Messaging Service')
-QueuingGroup = [
+MessagingGroup = [
cfg.StrOpt('catalog_type',
- default='queuing',
- help='Catalog type of the Queuing service.'),
+ default='messaging',
+ help='Catalog type of the Messaging service.'),
cfg.IntOpt('max_queues_per_page',
default=20,
help='The maximum number of queue records per page when '
@@ -1034,7 +1034,7 @@
register_opt_group(cfg.CONF, network_group, NetworkGroup)
register_opt_group(cfg.CONF, network_feature_group,
NetworkFeaturesGroup)
- register_opt_group(cfg.CONF, queuing_group, QueuingGroup)
+ register_opt_group(cfg.CONF, messaging_group, MessagingGroup)
register_opt_group(cfg.CONF, volume_group, VolumeGroup)
register_opt_group(cfg.CONF, volume_feature_group,
VolumeFeaturesGroup)
@@ -1091,7 +1091,7 @@
'object-storage-feature-enabled']
self.database = cfg.CONF.database
self.orchestration = cfg.CONF.orchestration
- self.queuing = cfg.CONF.queuing
+ self.messaging = cfg.CONF.messaging
self.telemetry = cfg.CONF.telemetry
self.dashboard = cfg.CONF.dashboard
self.data_processing = cfg.CONF.data_processing
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 2d7bc24..9933646 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -18,11 +18,7 @@
import os
import subprocess
-from cinderclient import exceptions as cinder_exceptions
-import glanceclient
import netaddr
-from neutronclient.common import exceptions as exc
-from novaclient import exceptions as nova_exceptions
import six
from tempest.api.network import common as net_common
@@ -471,9 +467,7 @@
ping, CONF.compute.ping_timeout, 1)
-# TODO(yfried): change this class name to NetworkScenarioTest once client
-# migration is complete
-class NeutronScenarioTest(ScenarioTest):
+class NetworkScenarioTest(ScenarioTest):
"""Base class for network scenario tests.
This class provide helpers for network scenario tests, using the neutron
API. Helpers from ancestor which use the nova network API are overridden
@@ -486,22 +480,12 @@
@classmethod
def check_preconditions(cls):
- if CONF.service_available.neutron:
- cls.enabled = True
- # verify that neutron_available is telling the truth
- try:
- cls.network_client.list_networks()
- except exc.EndpointNotFound:
- cls.enabled = False
- raise
- else:
- cls.enabled = False
- msg = 'Neutron not available'
- raise cls.skipException(msg)
+ if not CONF.service_available.neutron:
+ raise cls.skipException('Neutron not available')
@classmethod
def setUpClass(cls):
- super(NeutronScenarioTest, cls).setUpClass()
+ super(NetworkScenarioTest, cls).setUpClass()
cls.tenant_id = cls.manager.identity_client.tenant_id
cls.check_preconditions()
@@ -809,9 +793,6 @@
]
msg = "No default security group for tenant %s." % (tenant_id)
self.assertTrue(len(sgs) > 0, msg)
- if len(sgs) > 1:
- msg = "Found %d default security groups" % len(sgs)
- raise exc.NeutronClientNoUniqueMatch(msg=msg)
return net_resources.DeletableSecurityGroup(client=client,
**sgs[0])
@@ -1005,511 +986,6 @@
return network, subnet, router
-class OfficialClientTest(tempest.test.BaseTestCase):
- """
- Official Client test base class for scenario testing.
-
- Official Client tests are tests that have the following characteristics:
-
- * Test basic operations of an API, typically in an order that
- a regular user would perform those operations
- * Test only the correct inputs and action paths -- no fuzz or
- random input data is sent, only valid inputs.
- * Use only the default client tool for calling an API
- """
-
- @classmethod
- def setUpClass(cls):
- super(OfficialClientTest, cls).setUpClass()
- cls.isolated_creds = isolated_creds.IsolatedCreds(
- cls.__name__, tempest_client=False,
- network_resources=cls.network_resources)
-
- cls.manager = clients.OfficialClientManager(
- credentials=cls.credentials())
- cls.compute_client = cls.manager.compute_client
- cls.image_client = cls.manager.image_client
- cls.baremetal_client = cls.manager.baremetal_client
- cls.identity_client = cls.manager.identity_client
- cls.network_client = cls.manager.network_client
- cls.volume_client = cls.manager.volume_client
- cls.object_storage_client = cls.manager.object_storage_client
- cls.orchestration_client = cls.manager.orchestration_client
- cls.data_processing_client = cls.manager.data_processing_client
- cls.ceilometer_client = cls.manager.ceilometer_client
-
- @classmethod
- def _get_credentials(cls, get_creds, ctype):
- if CONF.compute.allow_tenant_isolation:
- creds = get_creds()
- else:
- creds = auth.get_default_credentials(ctype)
- return creds
-
- @classmethod
- def credentials(cls):
- return cls._get_credentials(cls.isolated_creds.get_primary_creds,
- 'user')
-
- @classmethod
- def alt_credentials(cls):
- return cls._get_credentials(cls.isolated_creds.get_alt_creds,
- 'alt_user')
-
- @classmethod
- def admin_credentials(cls):
- return cls._get_credentials(cls.isolated_creds.get_admin_creds,
- 'identity_admin')
-
- def setUp(self):
- super(OfficialClientTest, self).setUp()
- self.cleanup_waits = []
- # NOTE(mtreinish) This is safe to do in setUp instead of setUp class
- # because scenario tests in the same test class should not share
- # resources. If resources were shared between test cases then it
- # should be a single scenario test instead of multiples.
-
- # NOTE(yfried): this list is cleaned at the end of test_methods and
- # not at the end of the class
- self.addCleanup(self._wait_for_cleanups)
-
- @staticmethod
- def not_found_exception(exception):
- """
- @return: True if exception is of NotFound type
- """
- NOT_FOUND_LIST = ['NotFound', 'HTTPNotFound']
- return (exception.__class__.__name__ in NOT_FOUND_LIST
- or
- hasattr(exception, 'status_code') and
- exception.status_code == 404)
-
- def delete_wrapper(self, thing):
- """Ignores NotFound exceptions for delete operations.
-
- @param thing: object with delete() method.
- OpenStack resources are assumed to have a delete() method which
- destroys the resource
- """
-
- try:
- thing.delete()
- except Exception as e:
- # If the resource is already missing, mission accomplished.
- if not self.not_found_exception(e):
- raise
-
- def _wait_for_cleanups(self):
- """To handle async delete actions, a list of waits is added
- which will be iterated over as the last step of clearing the
- cleanup queue. That way all the delete calls are made up front
- and the tests won't succeed unless the deletes are eventually
- successful. This is the same basic approach used in the api tests to
- limit cleanup execution time except here it is multi-resource,
- because of the nature of the scenario tests.
- """
- for wait in self.cleanup_waits:
- self.delete_timeout(**wait)
-
- def addCleanup_with_wait(self, things, thing_id,
- error_status='ERROR',
- exc_type=nova_exceptions.NotFound,
- cleanup_callable=None, cleanup_args=None,
- cleanup_kwargs=None):
- """Adds wait for ansyc resource deletion at the end of cleanups
-
- @param things: type of the resource to delete
- @param thing_id:
- @param error_status: see manager.delete_timeout()
- @param exc_type: see manager.delete_timeout()
- @param cleanup_callable: method to load pass to self.addCleanup with
- the following *cleanup_args, **cleanup_kwargs.
- usually a delete method. if not used, will try to use:
- things.delete(thing_id)
- """
- if cleanup_args is None:
- cleanup_args = []
- if cleanup_kwargs is None:
- cleanup_kwargs = {}
- if cleanup_callable is None:
- LOG.debug("no delete method passed. using {rclass}.delete({id}) as"
- " default".format(rclass=things, id=thing_id))
- self.addCleanup(things.delete, thing_id)
- else:
- self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
- wait_dict = {
- 'things': things,
- 'thing_id': thing_id,
- 'error_status': error_status,
- 'not_found_exception': exc_type,
- }
- self.cleanup_waits.append(wait_dict)
-
- def status_timeout(self, things, thing_id, expected_status,
- error_status='ERROR',
- not_found_exception=nova_exceptions.NotFound):
- """
- Given a thing and an expected status, do a loop, sleeping
- for a configurable amount of time, checking for the
- expected status to show. At any time, if the returned
- status of the thing is ERROR, fail out.
- """
- self._status_timeout(things, thing_id,
- expected_status=expected_status,
- error_status=error_status,
- not_found_exception=not_found_exception)
-
- def delete_timeout(self, things, thing_id,
- error_status='ERROR',
- not_found_exception=nova_exceptions.NotFound):
- """
- Given a thing, do a loop, sleeping
- for a configurable amount of time, checking for the
- deleted status to show. At any time, if the returned
- status of the thing is ERROR, fail out.
- """
- self._status_timeout(things,
- thing_id,
- allow_notfound=True,
- error_status=error_status,
- not_found_exception=not_found_exception)
-
- def _status_timeout(self,
- things,
- thing_id,
- expected_status=None,
- allow_notfound=False,
- error_status='ERROR',
- not_found_exception=nova_exceptions.NotFound):
-
- log_status = expected_status if expected_status else ''
- if allow_notfound:
- log_status += ' or NotFound' if log_status != '' else 'NotFound'
-
- def check_status():
- # python-novaclient has resources available to its client
- # that all implement a get() method taking an identifier
- # for the singular resource to retrieve.
- try:
- thing = things.get(thing_id)
- except not_found_exception:
- if allow_notfound:
- return True
- raise
- except Exception as e:
- if allow_notfound and self.not_found_exception(e):
- return True
- raise
-
- new_status = thing.status
-
- # Some components are reporting error status in lower case
- # so case sensitive comparisons can really mess things
- # up.
- if new_status.lower() == error_status.lower():
- message = ("%s failed to get to expected status (%s). "
- "In %s state.") % (thing, expected_status,
- new_status)
- raise exceptions.BuildErrorException(message,
- server_id=thing_id)
- elif new_status == expected_status and expected_status is not None:
- return True # All good.
- LOG.debug("Waiting for %s to get to %s status. "
- "Currently in %s status",
- thing, log_status, new_status)
- if not tempest.test.call_until_true(
- check_status,
- CONF.compute.build_timeout,
- CONF.compute.build_interval):
- message = ("Timed out waiting for thing %s "
- "to become %s") % (thing_id, log_status)
- raise exceptions.TimeoutException(message)
-
- def _create_loginable_secgroup_rule_nova(self, client=None,
- secgroup_id=None):
- if client is None:
- client = self.compute_client
- if secgroup_id is None:
- sgs = client.security_groups.list()
- for sg in sgs:
- if sg.name == 'default':
- secgroup_id = sg.id
-
- # These rules are intended to permit inbound ssh and icmp
- # traffic from all sources, so no group_id is provided.
- # Setting a group_id would only permit traffic from ports
- # belonging to the same security group.
- rulesets = [
- {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- },
- {
- # ssh -6
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '::/0',
- },
- {
- # ping
- 'ip_protocol': 'icmp',
- 'from_port': -1,
- 'to_port': -1,
- 'cidr': '0.0.0.0/0',
- },
- {
- # ping6
- 'ip_protocol': 'icmp',
- 'from_port': -1,
- 'to_port': -1,
- 'cidr': '::/0',
- }
- ]
- rules = list()
- for ruleset in rulesets:
- sg_rule = client.security_group_rules.create(secgroup_id,
- **ruleset)
- self.addCleanup(self.delete_wrapper, sg_rule)
- rules.append(sg_rule)
- return rules
-
- def _create_security_group_nova(self, client=None,
- namestart='secgroup-smoke-'):
- if client is None:
- client = self.compute_client
- # Create security group
- sg_name = data_utils.rand_name(namestart)
- sg_desc = sg_name + " description"
- secgroup = client.security_groups.create(sg_name, sg_desc)
- self.assertEqual(secgroup.name, sg_name)
- self.assertEqual(secgroup.description, sg_desc)
- self.addCleanup(self.delete_wrapper, secgroup)
-
- # Add rules to the security group
- self._create_loginable_secgroup_rule_nova(client, secgroup.id)
-
- return secgroup
-
- def rebuild_server(self, server, client=None, image=None,
- preserve_ephemeral=False, wait=True,
- rebuild_kwargs=None):
- if client is None:
- client = self.compute_client
- if image is None:
- image = CONF.compute.image_ref
- rebuild_kwargs = rebuild_kwargs or {}
-
- LOG.debug("Rebuilding server (name: %s, image: %s, preserve eph: %s)",
- server.name, image, preserve_ephemeral)
- server.rebuild(image, preserve_ephemeral=preserve_ephemeral,
- **rebuild_kwargs)
- if wait:
- self.status_timeout(client.servers, server.id, 'ACTIVE')
-
- def create_server(self, client=None, name=None, image=None, flavor=None,
- wait_on_boot=True, wait_on_delete=True,
- create_kwargs=None):
- """Creates VM instance.
-
- @param client: compute client to create the instance
- @param image: image from which to create the instance
- @param wait_on_boot: wait for status ACTIVE before continue
- @param wait_on_delete: force synchronous delete on cleanup
- @param create_kwargs: additional details for instance creation
- @return: client.server object
- """
- if client is None:
- client = self.compute_client
- if name is None:
- name = data_utils.rand_name('scenario-server-')
- if image is None:
- image = CONF.compute.image_ref
- if flavor is None:
- flavor = CONF.compute.flavor_ref
- if create_kwargs is None:
- create_kwargs = {}
-
- fixed_network_name = CONF.compute.fixed_network_name
- if 'nics' not in create_kwargs and fixed_network_name:
- networks = client.networks.list()
- # If several networks found, set the NetID on which to connect the
- # server to avoid the following error "Multiple possible networks
- # found, use a Network ID to be more specific."
- # See Tempest #1250866
- if len(networks) > 1:
- for network in networks:
- if network.label == fixed_network_name:
- create_kwargs['nics'] = [{'net-id': network.id}]
- break
- # If we didn't find the network we were looking for :
- else:
- msg = ("The network on which the NIC of the server must "
- "be connected can not be found : "
- "fixed_network_name=%s. Starting instance without "
- "specifying a network.") % fixed_network_name
- LOG.info(msg)
-
- LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
- name, image, flavor)
- server = client.servers.create(name, image, flavor, **create_kwargs)
- self.assertEqual(server.name, name)
- if wait_on_delete:
- self.addCleanup(self.delete_timeout,
- self.compute_client.servers,
- server.id)
- self.addCleanup_with_wait(self.compute_client.servers, server.id,
- cleanup_callable=self.delete_wrapper,
- cleanup_args=[server])
- if wait_on_boot:
- self.status_timeout(client.servers, server.id, 'ACTIVE')
- # The instance retrieved on creation is missing network
- # details, necessitating retrieval after it becomes active to
- # ensure correct details.
- server = client.servers.get(server.id)
- LOG.debug("Created server: %s", server)
- return server
-
- def create_volume(self, client=None, size=1, name=None,
- snapshot_id=None, imageRef=None, volume_type=None,
- wait_on_delete=True):
- if client is None:
- client = self.volume_client
- if name is None:
- name = data_utils.rand_name('scenario-volume-')
- LOG.debug("Creating a volume (size: %s, name: %s)", size, name)
- volume = client.volumes.create(size=size, display_name=name,
- snapshot_id=snapshot_id,
- imageRef=imageRef,
- volume_type=volume_type)
- if wait_on_delete:
- self.addCleanup(self.delete_timeout,
- self.volume_client.volumes,
- volume.id)
- self.addCleanup_with_wait(self.volume_client.volumes, volume.id,
- exc_type=cinder_exceptions.NotFound)
- self.assertEqual(name, volume.display_name)
- self.status_timeout(client.volumes, volume.id, 'available')
- LOG.debug("Created volume: %s", volume)
- return volume
-
- def create_server_snapshot(self, server, compute_client=None,
- image_client=None, name=None):
- if compute_client is None:
- compute_client = self.compute_client
- if image_client is None:
- image_client = self.image_client
- if name is None:
- name = data_utils.rand_name('scenario-snapshot-')
- LOG.debug("Creating a snapshot image for server: %s", server.name)
- image_id = compute_client.servers.create_image(server, name)
- self.addCleanup_with_wait(self.image_client.images, image_id,
- exc_type=glanceclient.exc.HTTPNotFound)
- self.status_timeout(image_client.images, image_id, 'active')
- snapshot_image = image_client.images.get(image_id)
- self.assertEqual(name, snapshot_image.name)
- LOG.debug("Created snapshot image %s for server %s",
- snapshot_image.name, server.name)
- return snapshot_image
-
- def create_keypair(self, client=None, name=None):
- if client is None:
- client = self.compute_client
- if name is None:
- name = data_utils.rand_name('scenario-keypair-')
- keypair = client.keypairs.create(name)
- self.assertEqual(keypair.name, name)
- self.addCleanup(self.delete_wrapper, keypair)
- return keypair
-
- def get_remote_client(self, server_or_ip, username=None, private_key=None):
- if isinstance(server_or_ip, six.string_types):
- ip = server_or_ip
- else:
- network_name_for_ssh = CONF.compute.network_for_ssh
- ip = server_or_ip.networks[network_name_for_ssh][0]
- if username is None:
- username = CONF.scenario.ssh_user
- if private_key is None:
- private_key = self.keypair.private_key
- linux_client = remote_client.RemoteClient(ip, username,
- pkey=private_key)
- try:
- linux_client.validate_authentication()
- except exceptions.SSHTimeout:
- LOG.exception('ssh connection to %s failed' % ip)
- debug.log_net_debug()
- raise
-
- return linux_client
-
- def _log_console_output(self, servers=None):
- if not CONF.compute_feature_enabled.console_output:
- LOG.debug('Console output not supported, cannot log')
- return
- if not servers:
- servers = self.compute_client.servers.list()
- for server in servers:
- LOG.debug('Console output for %s', server.id)
- LOG.debug(server.get_console_output())
-
- def wait_for_volume_status(self, status):
- volume_id = self.volume.id
- self.status_timeout(
- self.volume_client.volumes, volume_id, status)
-
- def _image_create(self, name, fmt, path, properties=None):
- if properties is None:
- properties = {}
- name = data_utils.rand_name('%s-' % name)
- image_file = open(path, 'rb')
- self.addCleanup(image_file.close)
- params = {
- 'name': name,
- 'container_format': fmt,
- 'disk_format': fmt,
- 'is_public': 'False',
- }
- params.update(properties)
- image = self.image_client.images.create(**params)
- self.addCleanup(self.image_client.images.delete, image)
- self.assertEqual("queued", image.status)
- image.update(data=image_file)
- return image.id
-
- def glance_image_create(self):
- img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
- aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
- ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
- ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
- img_container_format = CONF.scenario.img_container_format
- img_disk_format = CONF.scenario.img_disk_format
- LOG.debug("paths: img: %s, container_fomat: %s, disk_format: %s, "
- "ami: %s, ari: %s, aki: %s" %
- (img_path, img_container_format, img_disk_format,
- ami_img_path, ari_img_path, aki_img_path))
- try:
- self.image = self._image_create('scenario-img',
- img_container_format,
- img_path,
- properties={'disk_format':
- img_disk_format})
- except IOError:
- LOG.debug("A qcow2 image was not found. Try to get a uec image.")
- kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
- ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
- properties = {
- 'properties': {'kernel_id': kernel, 'ramdisk_id': ramdisk}
- }
- self.image = self._image_create('scenario-ami', 'ami',
- path=ami_img_path,
- properties=properties)
- LOG.debug("image:%s" % self.image)
-
-
# power/provision states as of icehouse
class BaremetalPowerStates(object):
"""Possible power states of an Ironic node."""
@@ -1582,13 +1058,12 @@
def wait_node(self, instance_id):
"""Waits for a node to be associated with instance_id."""
- from ironicclient import exc as ironic_exceptions
def _get_node():
node = None
try:
node = self.get_node(instance_id=instance_id)
- except ironic_exceptions.HTTPNotFound:
+ except exceptions.NotFound:
pass
return node is not None
@@ -1710,557 +1185,6 @@
control_location=control_location)
-class NetworkScenarioTest(OfficialClientTest):
- """
- Base class for network scenario tests
- """
-
- @classmethod
- def check_preconditions(cls):
- if (CONF.service_available.neutron):
- cls.enabled = True
- # verify that neutron_available is telling the truth
- try:
- cls.network_client.list_networks()
- except exc.EndpointNotFound:
- cls.enabled = False
- raise
- else:
- cls.enabled = False
- msg = 'Neutron not available'
- raise cls.skipException(msg)
-
- @classmethod
- def setUpClass(cls):
- super(NetworkScenarioTest, cls).setUpClass()
- cls.tenant_id = cls.manager.identity_client.tenant_id
-
- def _create_network(self, tenant_id, namestart='network-smoke-'):
- name = data_utils.rand_name(namestart)
- body = dict(
- network=dict(
- name=name,
- tenant_id=tenant_id,
- ),
- )
- result = self.network_client.create_network(body=body)
- network = net_common.DeletableNetwork(client=self.network_client,
- **result['network'])
- self.assertEqual(network.name, name)
- self.addCleanup(self.delete_wrapper, network)
- return network
-
- def _list_networks(self, **kwargs):
- nets = self.network_client.list_networks(**kwargs)
- return nets['networks']
-
- def _list_subnets(self, **kwargs):
- subnets = self.network_client.list_subnets(**kwargs)
- return subnets['subnets']
-
- def _list_routers(self, **kwargs):
- routers = self.network_client.list_routers(**kwargs)
- return routers['routers']
-
- def _list_ports(self, **kwargs):
- ports = self.network_client.list_ports(**kwargs)
- return ports['ports']
-
- def _get_tenant_own_network_num(self, tenant_id):
- nets = self._list_networks(tenant_id=tenant_id)
- return len(nets)
-
- def _get_tenant_own_subnet_num(self, tenant_id):
- subnets = self._list_subnets(tenant_id=tenant_id)
- return len(subnets)
-
- def _get_tenant_own_port_num(self, tenant_id):
- ports = self._list_ports(tenant_id=tenant_id)
- return len(ports)
-
- def _create_subnet(self, network, namestart='subnet-smoke-', **kwargs):
- """
- Create a subnet for the given network within the cidr block
- configured for tenant networks.
- """
-
- def cidr_in_use(cidr, tenant_id):
- """
- :return True if subnet with cidr already exist in tenant
- False else
- """
- cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr)
- return len(cidr_in_use) != 0
-
- tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
- result = None
- # Repeatedly attempt subnet creation with sequential cidr
- # blocks until an unallocated block is found.
- for subnet_cidr in tenant_cidr.subnet(
- CONF.network.tenant_network_mask_bits):
- str_cidr = str(subnet_cidr)
- if cidr_in_use(str_cidr, tenant_id=network.tenant_id):
- continue
-
- body = dict(
- subnet=dict(
- name=data_utils.rand_name(namestart),
- ip_version=4,
- network_id=network.id,
- tenant_id=network.tenant_id,
- cidr=str_cidr,
- ),
- )
- body['subnet'].update(kwargs)
- try:
- result = self.network_client.create_subnet(body=body)
- break
- except exc.NeutronClientException as e:
- is_overlapping_cidr = 'overlaps with another subnet' in str(e)
- if not is_overlapping_cidr:
- raise
- self.assertIsNotNone(result, 'Unable to allocate tenant network')
- subnet = net_common.DeletableSubnet(client=self.network_client,
- **result['subnet'])
- self.assertEqual(subnet.cidr, str_cidr)
- self.addCleanup(self.delete_wrapper, subnet)
- return subnet
-
- def _create_port(self, network, namestart='port-quotatest-'):
- name = data_utils.rand_name(namestart)
- body = dict(
- port=dict(name=name,
- network_id=network.id,
- tenant_id=network.tenant_id))
- result = self.network_client.create_port(body=body)
- self.assertIsNotNone(result, 'Unable to allocate port')
- port = net_common.DeletablePort(client=self.network_client,
- **result['port'])
- self.addCleanup(self.delete_wrapper, port)
- return port
-
- def _get_server_port_id(self, server, ip_addr=None):
- ports = self._list_ports(device_id=server.id, fixed_ip=ip_addr)
- self.assertEqual(len(ports), 1,
- "Unable to determine which port to target.")
- return ports[0]['id']
-
- def _get_network_by_name(self, network_name):
- net = self._list_networks(name=network_name)
- return net_common.AttributeDict(net[0])
-
- def _create_floating_ip(self, thing, external_network_id, port_id=None):
- if not port_id:
- port_id = self._get_server_port_id(thing)
- body = dict(
- floatingip=dict(
- floating_network_id=external_network_id,
- port_id=port_id,
- tenant_id=thing.tenant_id,
- )
- )
- result = self.network_client.create_floatingip(body=body)
- floating_ip = net_common.DeletableFloatingIp(
- client=self.network_client,
- **result['floatingip'])
- self.addCleanup(self.delete_wrapper, floating_ip)
- return floating_ip
-
- def _associate_floating_ip(self, floating_ip, server):
- port_id = self._get_server_port_id(server)
- floating_ip.update(port_id=port_id)
- self.assertEqual(port_id, floating_ip.port_id)
- return floating_ip
-
- def _disassociate_floating_ip(self, floating_ip):
- """
- :param floating_ip: type DeletableFloatingIp
- """
- floating_ip.update(port_id=None)
- self.assertIsNone(floating_ip.port_id)
- return floating_ip
-
- def _create_pool(self, lb_method, protocol, subnet_id):
- """Wrapper utility that returns a test pool."""
- name = data_utils.rand_name('pool-')
- body = {
- "pool": {
- "protocol": protocol,
- "name": name,
- "subnet_id": subnet_id,
- "lb_method": lb_method
- }
- }
- resp = self.network_client.create_pool(body=body)
- pool = net_common.DeletablePool(client=self.network_client,
- **resp['pool'])
- self.assertEqual(pool['name'], name)
- self.addCleanup(self.delete_wrapper, pool)
- return pool
-
- def _create_member(self, address, protocol_port, pool_id):
- """Wrapper utility that returns a test member."""
- body = {
- "member": {
- "protocol_port": protocol_port,
- "pool_id": pool_id,
- "address": address
- }
- }
- resp = self.network_client.create_member(body)
- member = net_common.DeletableMember(client=self.network_client,
- **resp['member'])
- self.addCleanup(self.delete_wrapper, member)
- return member
-
- def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
- """Wrapper utility that returns a test vip."""
- name = data_utils.rand_name('vip-')
- body = {
- "vip": {
- "protocol": protocol,
- "name": name,
- "subnet_id": subnet_id,
- "pool_id": pool_id,
- "protocol_port": protocol_port
- }
- }
- resp = self.network_client.create_vip(body)
- vip = net_common.DeletableVip(client=self.network_client,
- **resp['vip'])
- self.assertEqual(vip['name'], name)
- self.addCleanup(self.delete_wrapper, vip)
- return vip
-
- def _check_vm_connectivity(self, ip_address,
- username=None,
- private_key=None,
- should_connect=True):
- """
- :param ip_address: server to test against
- :param username: server's ssh username
- :param private_key: server's ssh private key to be used
- :param should_connect: True/False indicates positive/negative test
- positive - attempt ping and ssh
- negative - attempt ping and fail if succeed
-
- :raises: AssertError if the result of the connectivity check does
- not match the value of the should_connect param
- """
- if should_connect:
- msg = "Timed out waiting for %s to become reachable" % ip_address
- else:
- msg = "ip address %s is reachable" % ip_address
- self.assertTrue(self.ping_ip_address(ip_address,
- should_succeed=should_connect),
- msg=msg)
- if should_connect:
- # no need to check ssh for negative connectivity
- self.get_remote_client(ip_address, username, private_key)
-
- def _check_public_network_connectivity(self, ip_address, username,
- private_key, should_connect=True,
- msg=None, servers=None):
- # The target login is assumed to have been configured for
- # key-based authentication by cloud-init.
- LOG.debug('checking network connections to IP %s with user: %s' %
- (ip_address, username))
- try:
- self._check_vm_connectivity(ip_address,
- username,
- private_key,
- should_connect=should_connect)
- except Exception as e:
- ex_msg = 'Public network connectivity check failed'
- if msg:
- ex_msg += ": " + msg
- LOG.exception(ex_msg)
- self._log_console_output(servers)
- # network debug is called as part of ssh init
- if not isinstance(e, exceptions.SSHTimeout):
- debug.log_net_debug()
- raise
-
- def _check_tenant_network_connectivity(self, server,
- username,
- private_key,
- should_connect=True,
- servers_for_debug=None):
- if not CONF.network.tenant_networks_reachable:
- msg = 'Tenant networks not configured to be reachable.'
- LOG.info(msg)
- return
- # The target login is assumed to have been configured for
- # key-based authentication by cloud-init.
- try:
- for net_name, ip_addresses in server.networks.iteritems():
- for ip_address in ip_addresses:
- self._check_vm_connectivity(ip_address,
- username,
- private_key,
- should_connect=should_connect)
- except Exception as e:
- LOG.exception('Tenant network connectivity check failed')
- self._log_console_output(servers_for_debug)
- # network debug is called as part of ssh init
- if not isinstance(e, exceptions.SSHTimeout):
- debug.log_net_debug()
- raise
-
- def _check_remote_connectivity(self, source, dest, should_succeed=True):
- """
- check ping server via source ssh connection
-
- :param source: RemoteClient: an ssh connection from which to ping
- :param dest: and IP to ping against
- :param should_succeed: boolean should ping succeed or not
- :returns: boolean -- should_succeed == ping
- :returns: ping is false if ping failed
- """
- def ping_remote():
- try:
- source.ping_host(dest)
- except exceptions.SSHExecCommandFailed:
- LOG.warn('Failed to ping IP: %s via a ssh connection from: %s.'
- % (dest, source.ssh_client.host))
- return not should_succeed
- return should_succeed
-
- return tempest.test.call_until_true(ping_remote,
- CONF.compute.ping_timeout,
- 1)
-
- def _create_security_group_neutron(self, tenant_id, client=None,
- namestart='secgroup-smoke-'):
- if client is None:
- client = self.network_client
- secgroup = self._create_empty_security_group(namestart=namestart,
- client=client,
- tenant_id=tenant_id)
-
- # Add rules to the security group
- rules = self._create_loginable_secgroup_rule_neutron(secgroup=secgroup)
- for rule in rules:
- self.assertEqual(tenant_id, rule.tenant_id)
- self.assertEqual(secgroup.id, rule.security_group_id)
- return secgroup
-
- def _create_empty_security_group(self, tenant_id, client=None,
- namestart='secgroup-smoke-'):
- """Create a security group without rules.
-
- Default rules will be created:
- - IPv4 egress to any
- - IPv6 egress to any
-
- :param tenant_id: secgroup will be created in this tenant
- :returns: DeletableSecurityGroup -- containing the secgroup created
- """
- if client is None:
- client = self.network_client
- sg_name = data_utils.rand_name(namestart)
- sg_desc = sg_name + " description"
- sg_dict = dict(name=sg_name,
- description=sg_desc)
- sg_dict['tenant_id'] = tenant_id
- body = dict(security_group=sg_dict)
- result = client.create_security_group(body=body)
- secgroup = net_common.DeletableSecurityGroup(
- client=client,
- **result['security_group']
- )
- self.assertEqual(secgroup.name, sg_name)
- self.assertEqual(tenant_id, secgroup.tenant_id)
- self.assertEqual(secgroup.description, sg_desc)
- self.addCleanup(self.delete_wrapper, secgroup)
- return secgroup
-
- def _default_security_group(self, tenant_id, client=None):
- """Get default secgroup for given tenant_id.
-
- :returns: DeletableSecurityGroup -- default secgroup for given tenant
- """
- if client is None:
- client = self.network_client
- sgs = [
- sg for sg in client.list_security_groups().values()[0]
- if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
- ]
- msg = "No default security group for tenant %s." % (tenant_id)
- self.assertTrue(len(sgs) > 0, msg)
- if len(sgs) > 1:
- msg = "Found %d default security groups" % len(sgs)
- raise exc.NeutronClientNoUniqueMatch(msg=msg)
- return net_common.DeletableSecurityGroup(client=client,
- **sgs[0])
-
- def _create_security_group_rule(self, client=None, secgroup=None,
- tenant_id=None, **kwargs):
- """Create a rule from a dictionary of rule parameters.
-
- Create a rule in a secgroup. if secgroup not defined will search for
- default secgroup in tenant_id.
-
- :param secgroup: type DeletableSecurityGroup.
- :param secgroup_id: search for secgroup by id
- default -- choose default secgroup for given tenant_id
- :param tenant_id: if secgroup not passed -- the tenant in which to
- search for default secgroup
- :param kwargs: a dictionary containing rule parameters:
- for example, to allow incoming ssh:
- rule = {
- direction: 'ingress'
- protocol:'tcp',
- port_range_min: 22,
- port_range_max: 22
- }
- """
- if client is None:
- client = self.network_client
- if secgroup is None:
- secgroup = self._default_security_group(tenant_id)
-
- ruleset = dict(security_group_id=secgroup.id,
- tenant_id=secgroup.tenant_id,
- )
- ruleset.update(kwargs)
-
- body = dict(security_group_rule=dict(ruleset))
- sg_rule = client.create_security_group_rule(body=body)
- sg_rule = net_common.DeletableSecurityGroupRule(
- client=client,
- **sg_rule['security_group_rule']
- )
- self.addCleanup(self.delete_wrapper, sg_rule)
- self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
- self.assertEqual(secgroup.id, sg_rule.security_group_id)
-
- return sg_rule
-
- def _create_loginable_secgroup_rule_neutron(self, client=None,
- secgroup=None):
- """These rules are intended to permit inbound ssh and icmp
- traffic from all sources, so no group_id is provided.
- Setting a group_id would only permit traffic from ports
- belonging to the same security group.
- """
-
- if client is None:
- client = self.network_client
- rules = []
- rulesets = [
- dict(
- # ssh
- protocol='tcp',
- port_range_min=22,
- port_range_max=22,
- ),
- dict(
- # ping
- protocol='icmp',
- )
- ]
- for ruleset in rulesets:
- for r_direction in ['ingress', 'egress']:
- ruleset['direction'] = r_direction
- try:
- sg_rule = self._create_security_group_rule(
- client=client, secgroup=secgroup, **ruleset)
- except exc.NeutronClientException as ex:
- # if rule already exist - skip rule and continue
- if not (ex.status_code is 409 and 'Security group rule'
- ' already exists' in ex.message):
- raise ex
- else:
- self.assertEqual(r_direction, sg_rule.direction)
- rules.append(sg_rule)
-
- return rules
-
- def _ssh_to_server(self, server, private_key):
- ssh_login = CONF.compute.image_ssh_user
- return self.get_remote_client(server,
- username=ssh_login,
- private_key=private_key)
-
- def _show_quota_network(self, tenant_id):
- quota = self.network_client.show_quota(tenant_id)
- return quota['quota']['network']
-
- def _show_quota_subnet(self, tenant_id):
- quota = self.network_client.show_quota(tenant_id)
- return quota['quota']['subnet']
-
- def _show_quota_port(self, tenant_id):
- quota = self.network_client.show_quota(tenant_id)
- return quota['quota']['port']
-
- def _get_router(self, tenant_id):
- """Retrieve a router for the given tenant id.
-
- If a public router has been configured, it will be returned.
-
- If a public router has not been configured, but a public
- network has, a tenant router will be created and returned that
- routes traffic to the public network.
- """
- router_id = CONF.network.public_router_id
- network_id = CONF.network.public_network_id
- if router_id:
- result = self.network_client.show_router(router_id)
- return net_common.AttributeDict(**result['router'])
- elif network_id:
- router = self._create_router(tenant_id)
- router.add_gateway(network_id)
- return router
- else:
- raise Exception("Neither of 'public_router_id' or "
- "'public_network_id' has been defined.")
-
- def _create_router(self, tenant_id, namestart='router-smoke-'):
- name = data_utils.rand_name(namestart)
- body = dict(
- router=dict(
- name=name,
- admin_state_up=True,
- tenant_id=tenant_id,
- ),
- )
- result = self.network_client.create_router(body=body)
- router = net_common.DeletableRouter(client=self.network_client,
- **result['router'])
- self.assertEqual(router.name, name)
- self.addCleanup(self.delete_wrapper, router)
- return router
-
- def create_networks(self, tenant_id=None):
- """Create a network with a subnet connected to a router.
-
- The baremetal driver is a special case since all nodes are
- on the same shared network.
-
- :returns: network, subnet, router
- """
- if CONF.baremetal.driver_enabled:
- # NOTE(Shrews): This exception is for environments where tenant
- # credential isolation is available, but network separation is
- # not (the current baremetal case). Likely can be removed when
- # test account mgmt is reworked:
- # https://blueprints.launchpad.net/tempest/+spec/test-accounts
- network = self._get_network_by_name(
- CONF.compute.fixed_network_name)
- router = None
- subnet = None
- else:
- if tenant_id is None:
- tenant_id = self.tenant_id
- network = self._create_network(tenant_id)
- router = self._get_router(tenant_id)
- subnet = self._create_subnet(network)
- subnet.add_to_router(router.id)
- return network, subnet, router
-
-
class OrchestrationScenarioTest(ScenarioTest):
"""
Base class for orchestration scenario tests
@@ -2324,38 +1248,38 @@
cls.container_client = cls.manager.container_client
cls.object_client = cls.manager.object_client
- def _get_swift_stat(self):
+ def get_swift_stat(self):
"""get swift status for our user account."""
self.account_client.list_account_containers()
LOG.debug('Swift status information obtained successfully')
- def _create_container(self, container_name=None):
+ def create_container(self, container_name=None):
name = container_name or data_utils.rand_name(
'swift-scenario-container')
self.container_client.create_container(name)
# look for the container to assure it is created
- self._list_and_check_container_objects(name)
+ self.list_and_check_container_objects(name)
LOG.debug('Container %s created' % (name))
return name
- def _delete_container(self, container_name):
+ def delete_container(self, container_name):
self.container_client.delete_container(container_name)
LOG.debug('Container %s deleted' % (container_name))
- def _upload_object_to_container(self, container_name, obj_name=None):
+ def upload_object_to_container(self, container_name, obj_name=None):
obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
obj_data = data_utils.arbitrary_string()
self.object_client.create_object(container_name, obj_name, obj_data)
return obj_name, obj_data
- def _delete_object(self, container_name, filename):
+ def delete_object(self, container_name, filename):
self.object_client.delete_object(container_name, filename)
- self._list_and_check_container_objects(container_name,
- not_present_obj=[filename])
+ self.list_and_check_container_objects(container_name,
+ not_present_obj=[filename])
- def _list_and_check_container_objects(self, container_name,
- present_obj=None,
- not_present_obj=None):
+ def list_and_check_container_objects(self, container_name,
+ present_obj=None,
+ not_present_obj=None):
"""
List objects for a given container and assert which are present and
which are not.
@@ -2373,7 +1297,7 @@
for obj in not_present_obj:
self.assertNotIn(obj, object_list)
- def _change_container_acl(self, container_name, acl):
+ def change_container_acl(self, container_name, acl):
metadata_param = {'metadata_prefix': 'x-container-',
'metadata': {'read': acl}}
self.container_client.update_container_metadata(container_name,
@@ -2381,6 +1305,6 @@
resp, _ = self.container_client.list_container_metadata(container_name)
self.assertEqual(resp['x-container-read'], acl)
- def _download_and_verify(self, container_name, obj_name, expected_data):
+ def download_and_verify(self, container_name, obj_name, expected_data):
_, obj = self.object_client.get_object(container_name, obj_name)
self.assertEqual(obj, expected_data)
diff --git a/tempest/scenario/orchestration/test_server_cfn_init.py b/tempest/scenario/orchestration/test_server_cfn_init.py
index dd7e7d4..0ab4311 100644
--- a/tempest/scenario/orchestration/test_server_cfn_init.py
+++ b/tempest/scenario/orchestration/test_server_cfn_init.py
@@ -86,7 +86,8 @@
if not self.ping_ip_address(server_ip):
self._log_console_output(servers=[server])
self.fail(
- "Timed out waiting for %s to become reachable" % server_ip)
+ "(CfnInitScenarioTest:test_server_cfn_init) Timed out waiting "
+ "for %s to become reachable" % server_ip)
try:
self.client.wait_for_resource_status(
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index a7ea70f..71b8a7f 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -39,6 +39,9 @@
@classmethod
def setUpClass(cls):
+ if CONF.scenario.large_ops_number < 1:
+ raise cls.skipException("large_ops_number not set to multiple "
+ "instances")
cls.set_network_resources()
super(TestLargeOpsScenario, cls).setUpClass()
@@ -75,8 +78,6 @@
self._wait_for_server_status('ACTIVE')
def _large_ops_scenario(self):
- if CONF.scenario.large_ops_number < 1:
- return
self.glance_image_create()
self.nova_boot()
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 5e83ff9..6ab870e 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -28,7 +28,7 @@
config = config.CONF
-class TestLoadBalancerBasic(manager.NeutronScenarioTest):
+class TestLoadBalancerBasic(manager.NetworkScenarioTest):
"""
This test checks basic load balancing.
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 84e1048..c764b39 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -25,7 +25,7 @@
LOG = logging.getLogger(__name__)
-class TestNetworkAdvancedServerOps(manager.NeutronScenarioTest):
+class TestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
"""
This test case checks VM connectivity after some advanced
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 10dfb66..904f248 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -34,7 +34,7 @@
['floating_ip', 'server'])
-class TestNetworkBasicOps(manager.NeutronScenarioTest):
+class TestNetworkBasicOps(manager.NetworkScenarioTest):
"""
This smoke test suite assumes that Nova has been configured to
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 20505eb..658d336 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -26,7 +26,7 @@
LOG = logging.getLogger(__name__)
-class TestSecurityGroupsBasicOps(manager.NeutronScenarioTest):
+class TestSecurityGroupsBasicOps(manager.NetworkScenarioTest):
"""
This test suite assumes that Nova has been configured to
diff --git a/tempest/scenario/test_swift_basic_ops.py b/tempest/scenario/test_swift_basic_ops.py
index ad74ec4..9e0fee0 100644
--- a/tempest/scenario/test_swift_basic_ops.py
+++ b/tempest/scenario/test_swift_basic_ops.py
@@ -41,13 +41,13 @@
@test.services('object_storage')
def test_swift_basic_ops(self):
- self._get_swift_stat()
- container_name = self._create_container()
- obj_name, obj_data = self._upload_object_to_container(container_name)
- self._list_and_check_container_objects(container_name, [obj_name])
- self._download_and_verify(container_name, obj_name, obj_data)
- self._delete_object(container_name, obj_name)
- self._delete_container(container_name)
+ self.get_swift_stat()
+ container_name = self.create_container()
+ obj_name, obj_data = self.upload_object_to_container(container_name)
+ self.list_and_check_container_objects(container_name, [obj_name])
+ self.download_and_verify(container_name, obj_name, obj_data)
+ self.delete_object(container_name, obj_name)
+ self.delete_container(container_name)
@test.services('object_storage')
def test_swift_acl_anonymous_download(self):
@@ -58,15 +58,15 @@
4. Check if the object can be download by anonymous user
5. Delete the object and container
"""
- container_name = self._create_container()
- obj_name, _ = self._upload_object_to_container(container_name)
+ container_name = self.create_container()
+ obj_name, _ = self.upload_object_to_container(container_name)
obj_url = '%s/%s/%s' % (self.object_client.base_url,
container_name, obj_name)
http_client = http.ClosingHttp()
resp, _ = http_client.request(obj_url, 'GET')
self.assertEqual(resp.status, 401)
- self._change_container_acl(container_name, '.r:*')
+ self.change_container_acl(container_name, '.r:*')
resp, _ = http_client.request(obj_url, 'GET')
self.assertEqual(resp.status, 200)
- self._delete_object(container_name, obj_name)
- self._delete_container(container_name)
+ self.delete_object(container_name, obj_name)
+ self.delete_container(container_name)
diff --git a/tempest/scenario/utils.py b/tempest/scenario/utils.py
index e2adb34..c20f20c 100644
--- a/tempest/scenario/utils.py
+++ b/tempest/scenario/utils.py
@@ -40,33 +40,33 @@
self.non_ssh_image_pattern = \
CONF.input_scenario.non_ssh_image_regex
# Setup clients
- ocm = clients.OfficialClientManager(
- auth.get_default_credentials('user'))
- self.client = ocm.compute_client
+ os = clients.Manager()
+ self.images_client = os.images_client
+ self.flavors_client = os.flavors_client
def ssh_user(self, image_id):
- _image = self.client.images.get(image_id)
+ _, _image = self.images_client.get_image(image_id)
for regex, user in self.ssh_users:
# First match wins
- if re.match(regex, _image.name) is not None:
+ if re.match(regex, _image['name']) is not None:
return user
else:
return self.default_ssh_user
def _is_sshable_image(self, image):
return not re.search(pattern=self.non_ssh_image_pattern,
- string=str(image.name))
+ string=str(image['name']))
def is_sshable_image(self, image_id):
- _image = self.client.images.get(image_id)
+ _, _image = self.images_client.get_image(image_id)
return self._is_sshable_image(_image)
def _is_flavor_enough(self, flavor, image):
- return image.minDisk <= flavor.disk
+ return image['minDisk'] <= flavor['disk']
def is_flavor_enough(self, flavor_id, image_id):
- _image = self.client.images.get(image_id)
- _flavor = self.client.flavors.get(flavor_id)
+ _, _image = self.images_client.get_image(image_id)
+ _, _flavor = self.flavors_client.get_flavor_details(flavor_id)
return self._is_flavor_enough(_flavor, _image)
@@ -81,7 +81,7 @@
load_tests = testscenarios.load_tests_apply_scenarios
- class TestInputScenario(manager.OfficialClientTest):
+ class TestInputScenario(manager.ScenarioTest):
scenario_utils = utils.InputScenarioUtils()
scenario_flavor = scenario_utils.scenario_flavors
@@ -91,17 +91,18 @@
def test_create_server_metadata(self):
name = rand_name('instance')
- _ = self.compute_client.servers.create(name=name,
- flavor=self.flavor_ref,
- image=self.image_ref)
+ self.servers_client.create_server(name=name,
+ flavor_ref=self.flavor_ref,
+ image_ref=self.image_ref)
"""
validchars = "-_.{ascii}{digit}".format(ascii=string.ascii_letters,
digit=string.digits)
def __init__(self):
- ocm = clients.OfficialClientManager(
+ os = clients.Manager(
auth.get_default_credentials('user', fill_in=False))
- self.client = ocm.compute_client
+ self.images_client = os.images_client
+ self.flavors_client = os.flavors_client
self.image_pattern = CONF.input_scenario.image_regex
self.flavor_pattern = CONF.input_scenario.flavor_regex
@@ -118,10 +119,11 @@
if not CONF.service_available.glance:
return []
if not hasattr(self, '_scenario_images'):
- images = self.client.images.list(detailed=False)
+ _, images = self.images_client.list_images()
self._scenario_images = [
- (self._normalize_name(i.name), dict(image_ref=i.id))
- for i in images if re.search(self.image_pattern, str(i.name))
+ (self._normalize_name(i['name']), dict(image_ref=i['id']))
+ for i in images if re.search(self.image_pattern,
+ str(i['name']))
]
return self._scenario_images
@@ -131,10 +133,11 @@
:return: a scenario with name and uuid of flavors
"""
if not hasattr(self, '_scenario_flavors'):
- flavors = self.client.flavors.list(detailed=False)
+ _, flavors = self.flavors_client.list_flavors()
self._scenario_flavors = [
- (self._normalize_name(f.name), dict(flavor_ref=f.id))
- for f in flavors if re.search(self.flavor_pattern, str(f.name))
+ (self._normalize_name(f['name']), dict(flavor_ref=f['id']))
+ for f in flavors if re.search(self.flavor_pattern,
+ str(f['name']))
]
return self._scenario_flavors
diff --git a/tempest/services/compute/json/security_group_default_rules_client.py b/tempest/services/compute/json/security_group_default_rules_client.py
index 6d29837..7743f9c 100644
--- a/tempest/services/compute/json/security_group_default_rules_client.py
+++ b/tempest/services/compute/json/security_group_default_rules_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.response.compute.v2 import \
+ security_group_default_rule as schema
from tempest.common import rest_client
from tempest import config
@@ -46,8 +48,9 @@
post_body = json.dumps({'security_group_default_rule': post_body})
url = 'os-security-group-default-rules'
resp, body = self.post(url, post_body)
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.create_get_security_group_default_rule,
+ resp, body)
return resp, body['security_group_default_rule']
def delete_security_group_default_rule(self,
@@ -55,20 +58,23 @@
"""Deletes the provided Security Group default rule."""
resp, body = self.delete('os-security-group-default-rules/%s' % str(
security_group_default_rule_id))
- self.expected_success(204, resp.status)
+ self.validate_response(schema.delete_security_group_default_rule,
+ resp, body)
return resp, body
def list_security_group_default_rules(self):
"""List all Security Group default rules."""
resp, body = self.get('os-security-group-default-rules')
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.list_security_group_default_rules,
+ resp, body)
return resp, body['security_group_default_rules']
def get_security_group_default_rule(self, security_group_default_rule_id):
"""Return the details of provided Security Group default rule."""
resp, body = self.get('os-security-group-default-rules/%s' % str(
security_group_default_rule_id))
- self.expected_success(200, resp.status)
body = json.loads(body)
+ self.validate_response(schema.create_get_security_group_default_rule,
+ resp, body)
return resp, body['security_group_default_rule']
diff --git a/tempest/services/queuing/__init__.py b/tempest/services/messaging/__init__.py
similarity index 100%
rename from tempest/services/queuing/__init__.py
rename to tempest/services/messaging/__init__.py
diff --git a/tempest/services/queuing/json/__init__.py b/tempest/services/messaging/json/__init__.py
similarity index 100%
rename from tempest/services/queuing/json/__init__.py
rename to tempest/services/messaging/json/__init__.py
diff --git a/tempest/services/queuing/json/queuing_client.py b/tempest/services/messaging/json/messaging_client.py
similarity index 95%
rename from tempest/services/queuing/json/queuing_client.py
rename to tempest/services/messaging/json/messaging_client.py
index 14960ad..3e82399 100644
--- a/tempest/services/queuing/json/queuing_client.py
+++ b/tempest/services/messaging/json/messaging_client.py
@@ -16,7 +16,7 @@
import json
import urllib
-from tempest.api_schema.response.queuing.v1 import queues as queues_schema
+from tempest.api_schema.response.messaging.v1 import queues as queues_schema
from tempest.common import rest_client
from tempest.common.utils import data_utils
from tempest import config
@@ -25,11 +25,11 @@
CONF = config.CONF
-class QueuingClientJSON(rest_client.RestClient):
+class MessagingClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
- super(QueuingClientJSON, self).__init__(auth_provider)
- self.service = CONF.queuing.catalog_type
+ super(MessagingClientJSON, self).__init__(auth_provider)
+ self.service = CONF.messaging.catalog_type
self.version = '1'
self.uri_prefix = 'v{0}'.format(self.version)
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index eca57c0..4dc588f 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -58,8 +58,6 @@
"""Delete an account."""
url = ''
if params:
- if 'bulk-delete' in params:
- url += 'bulk-delete&'
url = '?%s%s' % (url, urllib.urlencode(params))
resp, body = self.delete(url, headers={}, body=data)
@@ -74,13 +72,19 @@
return resp, body
def create_account_metadata(self, metadata,
- metadata_prefix='X-Account-Meta-'):
+ metadata_prefix='X-Account-Meta-',
+ data=None, params=None):
"""Creates an account metadata entry."""
headers = {}
- for key in metadata:
- headers[metadata_prefix + key] = metadata[key]
+ if metadata:
+ for key in metadata:
+ headers[metadata_prefix + key] = metadata[key]
- resp, body = self.post('', headers=headers, body=None)
+ url = ''
+ if params:
+ url = '?%s%s' % (url, urllib.urlencode(params))
+
+ resp, body = self.post(url, headers=headers, body=data)
return resp, body
def delete_account_metadata(self, metadata,
diff --git a/tempest/services/volume/json/snapshots_client.py b/tempest/services/volume/json/snapshots_client.py
index f50ba2f..1f8065b 100644
--- a/tempest/services/volume/json/snapshots_client.py
+++ b/tempest/services/volume/json/snapshots_client.py
@@ -24,15 +24,16 @@
LOG = logging.getLogger(__name__)
-class SnapshotsClientJSON(rest_client.RestClient):
- """Client class to send CRUD Volume API requests."""
+class BaseSnapshotsClientJSON(rest_client.RestClient):
+ """Base Client class to send CRUD Volume API requests."""
def __init__(self, auth_provider):
- super(SnapshotsClientJSON, self).__init__(auth_provider)
+ super(BaseSnapshotsClientJSON, self).__init__(auth_provider)
self.service = CONF.volume.catalog_type
self.build_interval = CONF.volume.build_interval
self.build_timeout = CONF.volume.build_timeout
+ self.create_resp = 200
def list_snapshots(self, params=None):
"""List all the snapshot."""
@@ -77,7 +78,7 @@
post_body = json.dumps({'snapshot': post_body})
resp, body = self.post('snapshots', post_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.expected_success(self.create_resp, resp.status)
return resp, body['snapshot']
def update_snapshot(self, snapshot_id, **kwargs):
@@ -203,3 +204,7 @@
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
self.expected_success(202, resp.status)
return resp, body
+
+
+class SnapshotsClientJSON(BaseSnapshotsClientJSON):
+ """Client class to send CRUD Volume V1 API requests."""
diff --git a/tempest/services/volume/v2/json/snapshots_client.py b/tempest/services/volume/v2/json/snapshots_client.py
new file mode 100644
index 0000000..553176b
--- /dev/null
+++ b/tempest/services/volume/v2/json/snapshots_client.py
@@ -0,0 +1,23 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.services.volume.json import snapshots_client
+
+
+class SnapshotsV2ClientJSON(snapshots_client.BaseSnapshotsClientJSON):
+ """Client class to send CRUD Volume V2 API requests."""
+
+ def __init__(self, auth_provider):
+ super(SnapshotsV2ClientJSON, self).__init__(auth_provider)
+
+ self.api_version = "v2"
+ self.create_resp = 202
diff --git a/tempest/services/volume/v2/xml/snapshots_client.py b/tempest/services/volume/v2/xml/snapshots_client.py
new file mode 100644
index 0000000..b29d86c
--- /dev/null
+++ b/tempest/services/volume/v2/xml/snapshots_client.py
@@ -0,0 +1,23 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.services.volume.xml import snapshots_client
+
+
+class SnapshotsV2ClientXML(snapshots_client.BaseSnapshotsClientXML):
+ """Client class to send CRUD Volume V2 API requests."""
+
+ def __init__(self, auth_provider):
+ super(SnapshotsV2ClientXML, self).__init__(auth_provider)
+
+ self.api_version = "v2"
+ self.create_resp = 202
diff --git a/tempest/services/volume/xml/snapshots_client.py b/tempest/services/volume/xml/snapshots_client.py
index 7636707..ce98eea 100644
--- a/tempest/services/volume/xml/snapshots_client.py
+++ b/tempest/services/volume/xml/snapshots_client.py
@@ -26,16 +26,17 @@
LOG = logging.getLogger(__name__)
-class SnapshotsClientXML(rest_client.RestClient):
- """Client class to send CRUD Volume API requests."""
+class BaseSnapshotsClientXML(rest_client.RestClient):
+ """Base Client class to send CRUD Volume API requests."""
TYPE = "xml"
def __init__(self, auth_provider):
- super(SnapshotsClientXML, self).__init__(auth_provider)
+ super(BaseSnapshotsClientXML, self).__init__(auth_provider)
self.service = CONF.volume.catalog_type
self.build_interval = CONF.volume.build_interval
self.build_timeout = CONF.volume.build_timeout
+ self.create_resp = 200
def list_snapshots(self, params=None):
"""List all snapshot."""
@@ -90,7 +91,7 @@
resp, body = self.post('snapshots',
str(common.Document(snapshot)))
body = common.xml_to_json(etree.fromstring(body))
- self.expected_success(200, resp.status)
+ self.expected_success(self.create_resp, resp.status)
return resp, body
def update_snapshot(self, snapshot_id, **kwargs):
@@ -243,3 +244,7 @@
body = common.xml_to_json(etree.fromstring(body))
self.expected_success(202, resp.status)
return resp, body
+
+
+class SnapshotsClientXML(BaseSnapshotsClientXML):
+ """Client class to send CRUD Volume V1 API requests."""
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index c0d3f7a..ee904c7 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -200,6 +200,29 @@
instance.terminate()
self.assertInstanceStateWait(instance, '_GONE')
+ def test_run_reboot_terminate_instance(self):
+ # EC2 run, await till it reaches to running state, then reboot,
+ # and wait untill its state is running, and then terminate
+ image_ami = self.ec2_client.get_image(self.images["ami"]
+ ["image_id"])
+ reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
+ ramdisk_id=self.images["ari"]["image_id"],
+ instance_type=self.instance_type)
+
+ self.assertEqual(1, len(reservation.instances))
+
+ instance = reservation.instances[0]
+ if instance.state != "running":
+ self.assertInstanceStateWait(instance, "running")
+
+ instance.reboot()
+ if instance.state != "running":
+ self.assertInstanceStateWait(instance, "running")
+ LOG.debug("Instance rebooted - state: %s", instance.state)
+
+ instance.terminate()
+ self.assertInstanceStateWait(instance, '_GONE')
+
def test_compute_with_volumes(self):
# EC2 1. integration test (not strict)
image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
diff --git a/tempest/thirdparty/boto/test_s3_buckets.py b/tempest/thirdparty/boto/test_s3_buckets.py
index 3a8dc89..1576492 100644
--- a/tempest/thirdparty/boto/test_s3_buckets.py
+++ b/tempest/thirdparty/boto/test_s3_buckets.py
@@ -14,7 +14,6 @@
# under the License.
from tempest.common.utils import data_utils
-from tempest import test
from tempest.thirdparty.boto import test as boto_test
@@ -25,7 +24,6 @@
super(S3BucketsTest, cls).setUpClass()
cls.client = cls.os.s3_client
- @test.skip_because(bug="1076965")
def test_create_and_get_delete_bucket(self):
# S3 Create, get and delete bucket
bucket_name = data_utils.rand_name("s3bucket-")
diff --git a/test-requirements.txt b/test-requirements.txt
index cd8154b..ba70259 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,10 +1,13 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
hacking>=0.9.2,<0.10
# needed for doc build
sphinx>=1.1.2,!=1.2.0,<1.3
python-subunit>=0.0.18
-oslosphinx
+oslosphinx>=2.2.0 # Apache-2.0
mox>=0.5.3
mock>=1.0
coverage>=3.6
-oslotest
-stevedore>=0.14
+oslotest>=1.1.0 # Apache-2.0
+stevedore>=1.0.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index 492c4f6..cab59a8 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,6 +8,8 @@
OS_TEST_PATH=./tempest/test_discover
usedevelop = True
install_command = pip install -U {opts} {packages}
+whitelist_externals = bash
+
[testenv:py26]
setenv = OS_TEST_PATH=./tempest/tests
@@ -17,6 +19,11 @@
setenv = OS_TEST_PATH=./tempest/tests
commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
+[testenv:py34]
+setenv = OS_TEST_PATH=./tempest/tests
+ PYTHONHASHSEED=0
+commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
+
[testenv:py27]
setenv = OS_TEST_PATH=./tempest/tests
commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'