Merge "Fix create container method"
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index e62e25e..058e7e6 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -15,6 +15,7 @@
from tempest.api.compute import base
from tempest import config
+from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -86,5 +87,9 @@
# 4 byte utf-8 character.
utf8_name = data_utils.rand_name(b'\xe2\x82\xa1'.decode('utf-8'))
body = self.client.create_image(self.server_id, name=utf8_name)
- image_id = data_utils.parse_image_id(body.response['location'])
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.45", body.response, "lt"):
+ image_id = body['image_id']
+ else:
+ image_id = data_utils.parse_image_id(body.response['location'])
self.addCleanup(self.client.delete_image, image_id)
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 7ecfa0a..4de00ce 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -19,6 +19,7 @@
from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
+from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -105,7 +106,11 @@
self.assertRaises(lib_exc.Conflict, self.create_image_from_server,
self.server_id)
- image_id = data_utils.parse_image_id(image.response['location'])
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
+ image_id = image['image_id']
+ else:
+ image_id = data_utils.parse_image_id(image.response['location'])
self.client.delete_image(image_id)
@decorators.attr(type=['negative'])
@@ -123,7 +128,11 @@
# Return an error while trying to delete an image what is creating
image = self.create_image_from_server(self.server_id)
- image_id = data_utils.parse_image_id(image.response['location'])
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
+ image_id = image['image_id']
+ else:
+ image_id = data_utils.parse_image_id(image.response['location'])
self.addCleanup(self._reset_server)
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index 1eb76a0..de28a30 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -13,12 +13,15 @@
# License for the specific language governing permissions and limitations
# under the License.
+import time
+
import testtools
from tempest.api.volume import base
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
CONF = config.CONF
@@ -53,3 +56,129 @@
resized_volume = self.volumes_client.show_volume(
volume['id'])['volume']
self.assertEqual(extend_size, resized_volume['size'])
+
+
+class VolumesExtendAttachedTest(base.BaseVolumeTest):
+ """Tests extending the size of an attached volume."""
+
+ # We need admin credentials for getting instance action event details. By
+ # default a non-admin can list and show instance actions if they own the
+ # server instance, but since the event details can contain error messages
+ # and tracebacks, like an instance fault, those are not viewable by
+ # non-admins. This is obviously not a great user experience since the user
+ # may not know when the operation is actually complete. A microversion in
+ # the compute API will be added so that non-admins can see instance action
+ # events but will continue to hide the traceback field.
+ # TODO(mriedem): Change this to not rely on the admin user to get the event
+ # details once that microversion is available in Nova.
+ credentials = ['primary', 'admin']
+
+ _api_version = 3
+ # NOTE(mriedem): The minimum required volume API version is 3.42 and the
+ # minimum required compute API microversion is 2.51, but the compute call
+ # is implicit - Cinder calls Nova at that microversion, Tempest does not.
+ min_microversion = '3.42'
+
+ @classmethod
+ def setup_clients(cls):
+ super(VolumesExtendAttachedTest, cls).setup_clients()
+ cls.admin_servers_client = cls.os_admin.servers_client
+
+ def _find_extend_volume_instance_action(self, server_id):
+ actions = self.servers_client.list_instance_actions(
+ server_id)['instanceActions']
+ for action in actions:
+ if action['action'] == 'extend_volume':
+ return action
+
+ def _find_extend_volume_instance_action_finish_event(self, action):
+ # This has to be called by an admin client otherwise
+ # the events don't show up.
+ action = self.admin_servers_client.show_instance_action(
+ action['instance_uuid'], action['request_id'])['instanceAction']
+ for event in action['events']:
+ if (event['event'] == 'compute_extend_volume' and
+ event['finish_time']):
+ return event
+
+ @decorators.idempotent_id('301f5a30-1c6f-4ea0-be1a-91fd28d44354')
+ @testtools.skipUnless(CONF.volume_feature_enabled.extend_attached_volume,
+ "Attached volume extend is disabled.")
+ def test_extend_attached_volume(self):
+ """This is a happy path test which does the following:
+
+ * Create a volume at the configured volume_size.
+ * Create a server instance.
+ * Attach the volume to the server.
+ * Wait for the volume status to be "in-use".
+ * Extend the size of the volume and wait for the volume status to go
+ back to "in-use".
+ * Assert the volume size change is reflected in the volume API.
+ * Wait for the "compute_extend_volume" instance action event to show
+ up in the compute API with the success or failure status. We fail
+ if we timeout waiting for the instance action event to show up, or
+ if the action on the server fails.
+ """
+ # Create a test volume. Will be automatically cleaned up on teardown.
+ volume = self.create_volume()
+ # Create a test server. Will be automatically cleaned up on teardown.
+ server = self.create_server()
+ # Attach the volume to the server and wait for the volume status to be
+ # "in-use".
+ self.attach_volume(server['id'], volume['id'])
+ # Extend the size of the volume. If this is successful, the volume API
+ # will change the status on the volume to "extending" before doing an
+ # RPC cast to the volume manager on the backend. Note that we multiply
+ # the size of the volume since certain Cinder backends, e.g. ScaleIO,
+ # require multiples of 8GB.
+ extend_size = volume['size'] * 2
+ self.volumes_client.extend_volume(volume['id'], new_size=extend_size)
+ # The volume status should go back to in-use since it is still attached
+ # to the server instance.
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'in-use')
+ # Assert that the volume size has changed in the volume API.
+ volume = self.volumes_client.show_volume(volume['id'])['volume']
+ self.assertEqual(extend_size, volume['size'])
+ # Now we wait for the "compute_extend_volume" instance action event
+ # to show up for the server instance. This is our indication that the
+ # asynchronous operation is complete on the compute side.
+ start_time = int(time.time())
+ timeout = self.servers_client.build_timeout
+ action = self._find_extend_volume_instance_action(server['id'])
+ while action is None and int(time.time()) - start_time < timeout:
+ time.sleep(self.servers_client.build_interval)
+ action = self._find_extend_volume_instance_action(server['id'])
+
+ if action is None:
+ msg = ("Timed out waiting to get 'extend_volume' instance action "
+ "record for server %(server)s after %(timeout)s seconds." %
+ {'server': server['id'], 'timeout': timeout})
+ raise lib_exc.TimeoutException(msg)
+
+ # Now that we found the extend_volume instance action, we can wait for
+ # the compute_extend_volume instance action event to show up to
+ # indicate the operation is complete.
+ start_time = int(time.time())
+ event = self._find_extend_volume_instance_action_finish_event(action)
+ while event is None and int(time.time()) - start_time < timeout:
+ time.sleep(self.servers_client.build_interval)
+ event = self._find_extend_volume_instance_action_finish_event(
+ action)
+
+ if event is None:
+ msg = ("Timed out waiting to get 'compute_extend_volume' instance "
+ "action event record for server %(server)s and request "
+ "%(request_id)s after %(timeout)s seconds." %
+ {'server': server['id'],
+ 'request_id': action['request_id'],
+ 'timeout': timeout})
+ raise lib_exc.TimeoutException(msg)
+
+ # Finally, assert that the action completed successfully.
+ self.assertTrue(
+ event['result'].lower() == 'success',
+ "Unexpected compute_extend_volume result '%(result)s' for request "
+ "%(request_id)s." %
+ {'result': event['result'],
+ 'request_id': action['request_id']})
diff --git a/tempest/config.py b/tempest/config.py
index 4d0839a..024a638 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -836,7 +836,14 @@
help="Is the v2 volume API enabled"),
cfg.BoolOpt('api_v3',
default=True,
- help="Is the v3 volume API enabled")
+ help="Is the v3 volume API enabled"),
+ cfg.BoolOpt('extend_attached_volume',
+ default=False,
+ help='Does the cloud support extending the size of a volume '
+ 'which is currently attached to a server instance? This '
+ 'depends on the 3.42 volume API microversion and the '
+ '2.51 compute API microversion. Also, not all volume or '
+ 'compute backends support this operation.')
]