Merge "Update shrink scenario tests"
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index aa97335..a6523e5 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -1,17 +1,35 @@
-If you would like to contribute to the development of OpenStack, you must
-follow the steps in this page:
+Contributor Guidelines
+======================
- https://docs.openstack.org/infra/manual/developers.html
+The source repository for this project can be found at:
-If you already have a good understanding of how the system works and your
-OpenStack accounts are set up, you can skip to the development workflow
-section of this documentation to learn how changes to OpenStack should be
-submitted for review via the Gerrit tool:
+ https://opendev.org/openstack/manila-tempest-plugin
- https://docs.openstack.org/infra/manual/developers.html#development-workflow
+Pull requests submitted through GitHub are not monitored.
-Pull requests submitted through GitHub will be ignored.
+To start contributing to OpenStack, follow the steps in the contribution guide
+to set up and use Gerrit:
-Bugs should be filed on Launchpad, not GitHub:
+ https://docs.openstack.org/contributors/code-and-documentation/quick-start.html
- https://bugs.launchpad.net/manila-tempest-plugin
+Bugs should be filed against the Manila project on Launchpad:
+
+ https://bugs.launchpad.net/manila
+
+For more specific information about contributing to this repository, see the
+manila contributor guide. That guide has information regarding the team's task
+trackers, communicating with other project developers and contacting the
+core team. It also has plenty of tips and tricks to get started with a
+development environment to contribute to, and test changes within this
+tempest plugin repository:
+
+ https://docs.openstack.org/manila/latest/contributor/contributing.html
+
+Information regarding running tests from this repository is here:
+
+ https://docs.openstack.org/manila/latest/contributor/tempest_tests.html
+
+This project makes extensive use of tempest-lib, a stable interface to the
+Tempest project. You can read more about it here:
+
+ https://docs.openstack.org/tempest/latest/library.html
diff --git a/HACKING.rst b/HACKING.rst
index cd3c49c..da529fb 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -1,4 +1,6 @@
-openstack Style Commandments
-===============================================
+OpenStack Style Commandments
+============================
-Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/
+Read the OpenStack Style Commandments here:
+
+ https://docs.openstack.org/hacking/latest/
diff --git a/README.rst b/README.rst
index de0ec32..cf2f7fb 100644
--- a/README.rst
+++ b/README.rst
@@ -2,9 +2,10 @@
manila-tempest-plugin
=====================
-Tempest plugin manila-tempest-plugin
+This repository contains a `Tempest`_ `test plugin`_ to verify the
+functionality of the `OpenStack Shared File System Service`_, Manila.
-Tempest plugin to test Manila's features
+See `contributor information`_ to learn more.
* Free software: Apache license
* Documentation: https://docs.openstack.org/manila/latest/
@@ -12,7 +13,7 @@
* Source: https://opendev.org/openstack/manila-tempest-plugin
* Bugs: https://bugs.launchpad.net/manila
-Features
---------
-
-* TODO
+.. _Tempest: https://docs.openstack.org/tempest
+.. _test plugin: https://docs.openstack.org/tempest/latest/plugin.html
+.. _contributor information: CONTRIBUTING.rst
+.. _OpenStack Shared File System Service: https://docs.openstack.org/manila
\ No newline at end of file
diff --git a/manila_tempest_tests/README.rst b/manila_tempest_tests/README.rst
deleted file mode 100644
index 7569d96..0000000
--- a/manila_tempest_tests/README.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-====================
-Tempest Integration
-====================
-
-This directory contains Tempest tests to cover Manila project.
-
diff --git a/manila_tempest_tests/common/constants.py b/manila_tempest_tests/common/constants.py
index 69cd1ff..4c8cbf7 100644
--- a/manila_tempest_tests/common/constants.py
+++ b/manila_tempest_tests/common/constants.py
@@ -71,6 +71,7 @@
# Share groups
MIN_SHARE_GROUP_MICROVERSION = '2.31'
+SHARE_GROUPS_GRADUATION_VERSION = '2.55'
SHARE_GROUP_SIMPLE_KEYS = {
'id', 'name', 'links',
}
diff --git a/manila_tempest_tests/config.py b/manila_tempest_tests/config.py
index e0e8ea3..2205f6b 100644
--- a/manila_tempest_tests/config.py
+++ b/manila_tempest_tests/config.py
@@ -30,7 +30,7 @@
help="The minimum api microversion is configured to be the "
"value of the minimum microversion supported by Manila."),
cfg.StrOpt("max_api_microversion",
- default="2.51",
+ default="2.55",
help="The maximum api microversion is configured to be the "
"value of the latest microversion supported by Manila."),
cfg.StrOpt("region",
@@ -240,6 +240,11 @@
cfg.BoolOpt("run_mount_snapshot_tests",
default=False,
help="Enable or disable mountable snapshot tests."),
+ cfg.BoolOpt("run_create_share_from_snapshot_in_another_pool_or_az_tests",
+ default=False,
+ help="Defines whether to run tests that create share from "
+ "snapshots in another pool or az. Enable this "
+ "option if the used driver supports it."),
cfg.StrOpt("image_with_share_tools",
default="manila-service-image-master",
diff --git a/manila_tempest_tests/services/share/v2/json/shares_client.py b/manila_tempest_tests/services/share/v2/json/shares_client.py
index 1c7095c..5e2f8df 100644
--- a/manila_tempest_tests/services/share/v2/json/shares_client.py
+++ b/manila_tempest_tests/services/share/v2/json/shares_client.py
@@ -1033,8 +1033,9 @@
snapshots=None, gigabytes=None, snapshot_gigabytes=None,
share_networks=None,
share_groups=None, share_group_snapshots=None,
- force=True, share_type=None,
- url=None, version=LATEST_MICROVERSION):
+ force=True, share_type=None, share_replicas=None,
+ replica_gigabytes=None, url=None,
+ version=LATEST_MICROVERSION):
if url is None:
url = self._get_quotas_url(version)
url += '/%s' % tenant_id
@@ -1057,6 +1058,10 @@
put_body["share_groups"] = share_groups
if share_group_snapshots is not None:
put_body["share_group_snapshots"] = share_group_snapshots
+ if share_replicas is not None:
+ put_body["share_replicas"] = share_replicas
+ if replica_gigabytes is not None:
+ put_body["replica_gigabytes"] = replica_gigabytes
put_body = json.dumps({"quota_set": put_body})
resp, body = self.put(url, put_body, version=version)
@@ -1074,6 +1079,8 @@
"""Create a new share group."""
uri = 'share-groups'
post_body = {}
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
if name:
post_body['name'] = name
if description:
@@ -1091,8 +1098,8 @@
post_body['availability_zone'] = availability_zone
body = json.dumps({'share_group': post_body})
- resp, body = self.post(uri, body, headers=EXPERIMENTAL,
- extra_headers=True, version=version)
+ resp, body = self.post(uri, body, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
@@ -1100,8 +1107,10 @@
def delete_share_group(self, share_group_id, version=LATEST_MICROVERSION):
"""Delete a share group."""
uri = 'share-groups/%s' % share_group_id
- resp, body = self.delete(uri, headers=EXPERIMENTAL,
- extra_headers=True, version=version)
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
+ resp, body = self.delete(uri, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
@@ -1110,16 +1119,20 @@
"""Get list of share groups w/o filters."""
uri = 'share-groups%s' % ('/detail' if detailed else '')
uri += '?%s' % (parse.urlencode(params) if params else '')
- resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
- version=version)
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
+ resp, body = self.get(uri, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_share_group(self, share_group_id, version=LATEST_MICROVERSION):
"""Get share group info."""
uri = 'share-groups/%s' % share_group_id
- resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
- version=version)
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
+ resp, body = self.get(uri, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
@@ -1127,6 +1140,8 @@
version=LATEST_MICROVERSION, **kwargs):
"""Update an existing share group."""
uri = 'share-groups/%s' % share_group_id
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
post_body = {}
if name:
post_body['name'] = name
@@ -1136,21 +1151,25 @@
post_body.update(kwargs)
body = json.dumps({'share_group': post_body})
- resp, body = self.put(uri, body, headers=EXPERIMENTAL,
- extra_headers=True, version=version)
+ resp, body = self.put(uri, body, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def share_group_reset_state(self, share_group_id, status='error',
version=LATEST_MICROVERSION):
+ headers, _junk = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
self.reset_state(share_group_id, status=status, s_type='groups',
- headers=EXPERIMENTAL, version=version)
+ headers=headers, version=version)
def share_group_force_delete(self, share_group_id,
version=LATEST_MICROVERSION):
+ headers, _junk = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
self.force_delete(share_group_id, s_type='share-groups',
- headers=EXPERIMENTAL, version=version)
+ headers=headers, version=version)
def wait_for_share_group_status(self, share_group_id, status):
"""Waits for a share group to reach a given status."""
@@ -1182,6 +1201,8 @@
version=LATEST_MICROVERSION):
"""Create a new share group type."""
uri = 'share-group-types'
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
post_body = {}
if isinstance(share_types, (tuple, list)):
share_types = list(share_types)
@@ -1196,8 +1217,8 @@
if group_specs:
post_body['group_specs'] = group_specs
body = json.dumps({'share_group_type': post_body})
- resp, body = self.post(uri, body, headers=EXPERIMENTAL,
- extra_headers=True, version=version)
+ resp, body = self.post(uri, body, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
@@ -1206,8 +1227,10 @@
"""Get list of share group types."""
uri = 'share-group-types%s' % ('/detail' if detailed else '')
uri += '?%s' % (parse.urlencode(params) if params else '')
- resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
- version=version)
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
+ resp, body = self.get(uri, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
@@ -1215,16 +1238,20 @@
version=LATEST_MICROVERSION):
"""Get share group type info."""
uri = 'share-group-types/%s' % share_group_type_id
- resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
- version=version)
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
+ resp, body = self.get(uri, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def get_default_share_group_type(self, version=LATEST_MICROVERSION):
"""Get default share group type info."""
uri = 'share-group-types/default'
- resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
- version=version)
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
+ resp, body = self.get(uri, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
@@ -1232,18 +1259,22 @@
version=LATEST_MICROVERSION):
"""Delete an existing share group type."""
uri = 'share-group-types/%s' % share_group_type_id
- resp, body = self.delete(uri, headers=EXPERIMENTAL,
- extra_headers=True, version=version)
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
+ resp, body = self.delete(uri, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(204, resp.status)
return self._parse_resp(body)
def add_access_to_share_group_type(self, share_group_type_id, project_id,
version=LATEST_MICROVERSION):
uri = 'share-group-types/%s/action' % share_group_type_id
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
post_body = {'project': project_id}
post_body = json.dumps({'addProjectAccess': post_body})
- resp, body = self.post(uri, post_body, headers=EXPERIMENTAL,
- extra_headers=True, version=version)
+ resp, body = self.post(uri, post_body, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
@@ -1251,18 +1282,22 @@
project_id,
version=LATEST_MICROVERSION):
uri = 'share-group-types/%s/action' % share_group_type_id
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
post_body = {'project': project_id}
post_body = json.dumps({'removeProjectAccess': post_body})
- resp, body = self.post(uri, post_body, headers=EXPERIMENTAL,
- extra_headers=True, version=version)
+ resp, body = self.post(uri, post_body, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
def list_access_to_share_group_type(self, share_group_type_id,
version=LATEST_MICROVERSION):
uri = 'share-group-types/%s/access' % share_group_type_id
- resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
- version=version)
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
+ resp, body = self.get(uri, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
@@ -1272,9 +1307,11 @@
group_specs_dict,
version=LATEST_MICROVERSION):
url = "share-group-types/%s/group-specs" % share_group_type_id
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
post_body = json.dumps({'group_specs': group_specs_dict})
- resp, body = self.post(url, post_body, headers=EXPERIMENTAL,
- extra_headers=True, version=version)
+ resp, body = self.post(url, post_body, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
@@ -1282,18 +1319,22 @@
version=LATEST_MICROVERSION):
uri = "group-types/%s/group_specs/%s" % (
share_group_type_id, group_spec_key)
- resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
- version=version)
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
+ resp, body = self.get(uri, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def list_share_group_type_specs(self, share_group_type_id, params=None,
version=LATEST_MICROVERSION):
uri = "share-group-types/%s/group_specs" % share_group_type_id
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
if params is not None:
uri += '?%s' % parse.urlencode(params)
- resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
- version=version)
+ resp, body = self.get(uri, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
@@ -1302,10 +1343,12 @@
version=LATEST_MICROVERSION):
uri = "share-group-types/%s/group-specs/%s" % (
share_group_type_id, group_spec_key)
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
group_spec = {group_spec_key: group_spec_value}
post_body = json.dumps(group_spec)
- resp, body = self.put(uri, post_body, headers=EXPERIMENTAL,
- extra_headers=True, version=version)
+ resp, body = self.put(uri, post_body, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
@@ -1319,8 +1362,10 @@
version=LATEST_MICROVERSION):
uri = "share-group-types/%s/group-specs/%s" % (
share_type_id, group_spec_key)
- resp, body = self.delete(uri, headers=EXPERIMENTAL, extra_headers=True,
- version=version)
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
+ resp, body = self.delete(uri, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(204, resp.status)
return body
@@ -1331,14 +1376,16 @@
version=LATEST_MICROVERSION):
"""Create a new share group snapshot of an existing share group."""
uri = 'share-group-snapshots'
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
post_body = {'share_group_id': share_group_id}
if name:
post_body['name'] = name
if description:
post_body['description'] = description
body = json.dumps({'share_group_snapshot': post_body})
- resp, body = self.post(uri, body, headers=EXPERIMENTAL,
- extra_headers=True, version=version)
+ resp, body = self.post(uri, body, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(202, resp.status)
return self._parse_resp(body)
@@ -1346,8 +1393,10 @@
version=LATEST_MICROVERSION):
"""Delete an existing share group snapshot."""
uri = 'share-group-snapshots/%s' % share_group_snapshot_id
- resp, body = self.delete(uri, headers=EXPERIMENTAL,
- extra_headers=True, version=version)
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
+ resp, body = self.delete(uri, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(202, resp.status)
return body
@@ -1356,8 +1405,10 @@
"""Get list of share group snapshots w/o filters."""
uri = 'share-group-snapshots%s' % ('/detail' if detailed else '')
uri += '?%s' % (parse.urlencode(params) if params else '')
- resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
- version=version)
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
+ resp, body = self.get(uri, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
@@ -1365,8 +1416,10 @@
version=LATEST_MICROVERSION):
"""Get share group snapshot info."""
uri = 'share-group-snapshots/%s' % share_group_snapshot_id
- resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,
- version=version)
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
+ resp, body = self.get(uri, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
@@ -1375,29 +1428,35 @@
version=LATEST_MICROVERSION):
"""Update an existing share group snapshot."""
uri = 'share-group-snapshots/%s' % share_group_snapshot_id
+ headers, extra_headers = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
post_body = {}
if name:
post_body['name'] = name
if description:
post_body['description'] = description
body = json.dumps({'share_group_snapshot': post_body})
- resp, body = self.put(uri, body, headers=EXPERIMENTAL,
- extra_headers=True, version=version)
+ resp, body = self.put(uri, body, headers=headers,
+ extra_headers=extra_headers, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
def share_group_snapshot_reset_state(self, share_group_snapshot_id,
status='error',
version=LATEST_MICROVERSION):
+ headers, _junk = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
self.reset_state(
share_group_snapshot_id, status=status,
- s_type='group-snapshots', headers=EXPERIMENTAL, version=version)
+ headers=headers, s_type='group-snapshots', version=version)
def share_group_snapshot_force_delete(self, share_group_snapshot_id,
version=LATEST_MICROVERSION):
+ headers, _junk = utils.get_extra_headers(
+ version, constants.SHARE_GROUPS_GRADUATION_VERSION)
self.force_delete(
share_group_snapshot_id, s_type='share-group-snapshots',
- headers=EXPERIMENTAL, version=version)
+ headers=headers, version=version)
def wait_for_share_group_snapshot_status(self, share_group_snapshot_id,
status):
diff --git a/manila_tempest_tests/tests/api/admin/test_migration.py b/manila_tempest_tests/tests/api/admin/test_migration.py
index 6ceae58..1bc67fb 100644
--- a/manila_tempest_tests/tests/api/admin/test_migration.py
+++ b/manila_tempest_tests/tests/api/admin/test_migration.py
@@ -176,6 +176,11 @@
self.assertIn(r, filtered_rules)
self.assertEqual(len(expected_rules), len(filtered_rules))
+ # In v 2.54 and beyond, we expect key 'progress' in the destination
+ # share data
+ if utils.is_microversion_supported('2.54'):
+ self.assertEqual('100%', share['progress'])
+
# Share not migrated yet
else:
self.assertNotEqual(dest_pool, share['host'])
diff --git a/manila_tempest_tests/tests/api/admin/test_quotas.py b/manila_tempest_tests/tests/api/admin/test_quotas.py
index ab0d665..215b25d 100644
--- a/manila_tempest_tests/tests/api/admin/test_quotas.py
+++ b/manila_tempest_tests/tests/api/admin/test_quotas.py
@@ -26,6 +26,8 @@
CONF = config.CONF
PRE_SHARE_GROUPS_MICROVERSION = "2.39"
SHARE_GROUPS_MICROVERSION = "2.40"
+PRE_SHARE_REPLICA_QUOTAS_MICROVERSION = "2.52"
+SHARE_REPLICA_QUOTAS_MICROVERSION = "2.53"
@ddt.ddt
@@ -54,6 +56,9 @@
if utils.is_microversion_supported(SHARE_GROUPS_MICROVERSION):
self.assertGreater(int(quotas["share_groups"]), -2)
self.assertGreater(int(quotas["share_group_snapshots"]), -2)
+ if utils.share_replica_quotas_are_supported():
+ self.assertGreater(int(quotas["share_replicas"]), -2)
+ self.assertGreater(int(quotas["replica_gigabytes"]), -2)
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_show_quotas(self):
@@ -66,6 +71,9 @@
if utils.is_microversion_supported(SHARE_GROUPS_MICROVERSION):
self.assertGreater(int(quotas["share_groups"]), -2)
self.assertGreater(int(quotas["share_group_snapshots"]), -2)
+ if utils.share_replica_quotas_are_supported():
+ self.assertGreater(int(quotas["share_replicas"]), -2)
+ self.assertGreater(int(quotas["replica_gigabytes"]), -2)
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_show_quotas_for_user(self):
@@ -79,6 +87,9 @@
if utils.is_microversion_supported(SHARE_GROUPS_MICROVERSION):
self.assertGreater(int(quotas["share_groups"]), -2)
self.assertGreater(int(quotas["share_group_snapshots"]), -2)
+ if utils.share_replica_quotas_are_supported():
+ self.assertGreater(int(quotas["share_replicas"]), -2)
+ self.assertGreater(int(quotas["replica_gigabytes"]), -2)
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
@base.skip_if_microversion_not_supported(PRE_SHARE_GROUPS_MICROVERSION)
@@ -99,6 +110,17 @@
for key in ('share_groups', 'share_group_snapshots'):
self.assertNotIn(key, quotas)
+ @tc.attr(base.TAG_POSITIVE, base.TAG_API)
+ @base.skip_if_microversion_not_supported(
+ PRE_SHARE_REPLICA_QUOTAS_MICROVERSION)
+ def test_show_replica_quotas_for_user_using_too_old_microversion(self):
+ quotas = self.shares_v2_client.show_quotas(
+ self.tenant_id, self.user_id,
+ version=PRE_SHARE_REPLICA_QUOTAS_MICROVERSION)
+
+ for key in('share_replicas', 'replica_gigabytes'):
+ self.assertNotIn(key, quotas)
+
@ddt.data(
('id', True),
('name', False),
@@ -107,6 +129,10 @@
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
@base.skip_if_microversion_lt("2.39")
def test_show_share_type_quotas(self, share_type_key, is_st_public):
+ # Check if the used microversion supports 'share_replica' and
+ # 'replica_gigabytes' quotas
+ replica_quotas_supported = utils.share_replica_quotas_are_supported()
+
# Create share type
share_type = self.create_share_type(
data_utils.rand_name("tempest-manila"),
@@ -117,6 +143,12 @@
if 'share_type' in share_type:
share_type = share_type['share_type']
+ keys = ['shares', 'gigabytes', 'snapshots', 'snapshot_gigabytes']
+
+ if replica_quotas_supported:
+ keys.append('share_replicas')
+ keys.append('replica_gigabytes')
+
# Get current project quotas
p_quotas = self.shares_v2_client.show_quotas(self.tenant_id)
@@ -125,7 +157,7 @@
self.tenant_id, share_type=share_type[share_type_key])
# Share type quotas have values equal to project's
- for key in ('shares', 'gigabytes', 'snapshots', 'snapshot_gigabytes'):
+ for key in keys:
self.assertEqual(st_quotas[key], p_quotas[key])
# Verify that we do not have share groups related quotas
@@ -215,6 +247,31 @@
self.tenant_id, self.user_id, **{quota_key: new_quota})
self.assertEqual(new_quota, int(updated[quota_key]))
+ @ddt.data(("share_replicas", False),
+ ("share_replicas", True),
+ ("replica_gigabytes", False),
+ ("replica_gigabytes", True),
+ )
+ @ddt.unpack
+ @tc.attr(base.TAG_POSITIVE, base.TAG_API)
+ @base.skip_if_microversion_not_supported(SHARE_REPLICA_QUOTAS_MICROVERSION)
+ def test_update_user_quota_replica_related(self, quota_key, use_user_id):
+ kwargs = {}
+
+ # Update the kwargs with user_id in case the user_id need to be
+ # specified in the request
+ kwargs.update({'user_id': self.user_id}) if use_user_id else None
+ quotas = self.client.show_quotas(self.tenant_id, **kwargs)
+ new_quota = int(quotas[quota_key]) - 1
+
+ # Add the updated quota into the kwargs
+ kwargs.update({quota_key: new_quota})
+
+ # Set the new quota based on tenant or tenant and user_id
+ updated = self.client.update_quotas(
+ self.tenant_id, **kwargs)
+ self.assertEqual(new_quota, int(updated[quota_key]))
+
@ddt.data(
('id', True),
('name', False),
@@ -223,14 +280,22 @@
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
@base.skip_if_microversion_lt("2.39")
def test_update_share_type_quota(self, share_type_key, is_st_public):
+ # Check if the used microversion supports 'share_replica' and
+ # 'replica_gigabytes' quotas
+ replica_quotas_supported = utils.share_replica_quotas_are_supported()
share_type = self._create_share_type()
# Get current quotas
quotas = self.client.show_quotas(
self.tenant_id, share_type=share_type[share_type_key])
+ quota_keys = ['shares', 'gigabytes', 'snapshots', 'snapshot_gigabytes']
+
+ if replica_quotas_supported:
+ quota_keys.append('share_replicas')
+ quota_keys.append('replica_gigabytes')
# Update quotas
- for q in ('shares', 'gigabytes', 'snapshots', 'snapshot_gigabytes'):
+ for q in quota_keys:
new_quota = int(quotas[q]) - 1
# Set new quota
@@ -242,7 +307,7 @@
current_quotas = self.client.show_quotas(
self.tenant_id, share_type=share_type[share_type_key])
- for q in ('shares', 'gigabytes', 'snapshots', 'snapshot_gigabytes'):
+ for q in quota_keys:
self.assertEqual(int(quotas[q]) - 1, current_quotas[q])
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
@@ -368,6 +433,9 @@
data["share_groups"] = int(custom["share_groups"]) + 2
data["share_group_snapshots"] = (
int(custom["share_group_snapshots"]) + 2)
+ if utils.share_replica_quotas_are_supported():
+ data["share_replicas"] = int(custom["share_replicas"]) + 2
+ data["replica_gigabytes"] = int(custom["replica_gigabytes"]) + 2
# set new quota
updated = self.client.update_quotas(self.tenant_id, **data)
@@ -385,6 +453,11 @@
self.assertEqual(
data["share_group_snapshots"],
int(updated["share_group_snapshots"]))
+ if utils.share_replica_quotas_are_supported():
+ self.assertEqual(
+ data["share_replicas"], int(updated["share_replicas"]))
+ self.assertEqual(
+ data["replica_gigabytes"], int(updated["replica_gigabytes"]))
# Reset customized quotas
self.client.reset_quotas(self.tenant_id)
@@ -406,6 +479,21 @@
self.assertEqual(
int(default["share_group_snapshots"]),
int(reseted["share_group_snapshots"]))
+ if utils.share_replica_quotas_are_supported():
+ self.assertEqual(
+ int(default["share_replicas"]), int(reseted["share_replicas"]))
+ self.assertEqual(
+ int(default["replica_gigabytes"]),
+ int(reseted["replica_gigabytes"]))
+
+ def _get_new_replica_quota_values(self, default_quotas, value_to_set):
+ new_values = {
+ 'share_replicas': int(
+ default_quotas['share_replicas']) + value_to_set,
+ 'replica_gigabytes': int(
+ default_quotas['replica_gigabytes']) + value_to_set
+ }
+ return new_values
@ddt.data(
('id', True),
@@ -416,17 +504,34 @@
@base.skip_if_microversion_lt("2.39")
def test_reset_share_type_quotas(self, share_type_key, is_st_public):
share_type = self._create_share_type()
+ quota_keys = ['shares', 'snapshots', 'gigabytes', 'snapshot_gigabytes']
# get default_quotas
default_quotas = self.client.default_quotas(self.tenant_id)
+ kwargs = {}
+
+ # check if the replica_gigabytes and share_replicas quotas are
+ # supported
+ if utils.share_replica_quotas_are_supported():
+ kwargs.update(self._get_new_replica_quota_values(
+ default_quotas, 5))
+ quota_keys.append('share_replicas')
+ quota_keys.append('replica_gigabytes')
+
# set new quota for project
updated_p_quota = self.client.update_quotas(
self.tenant_id,
shares=int(default_quotas['shares']) + 5,
snapshots=int(default_quotas['snapshots']) + 5,
gigabytes=int(default_quotas['gigabytes']) + 5,
- snapshot_gigabytes=int(default_quotas['snapshot_gigabytes']) + 5)
+ snapshot_gigabytes=int(default_quotas['snapshot_gigabytes']) + 5,
+ **kwargs
+ )
+
+ if utils.share_replica_quotas_are_supported():
+ kwargs.update(self._get_new_replica_quota_values(
+ default_quotas, 3))
# set new quota for project
self.client.update_quotas(
@@ -435,7 +540,9 @@
shares=int(default_quotas['shares']) + 3,
snapshots=int(default_quotas['snapshots']) + 3,
gigabytes=int(default_quotas['gigabytes']) + 3,
- snapshot_gigabytes=int(default_quotas['snapshot_gigabytes']) + 3)
+ snapshot_gigabytes=int(default_quotas['snapshot_gigabytes']) + 3,
+ **kwargs
+ )
# reset share type quotas
self.client.reset_quotas(
@@ -445,7 +552,7 @@
current_p_quota = self.client.show_quotas(self.tenant_id)
current_st_quota = self.client.show_quotas(
self.tenant_id, share_type=share_type[share_type_key])
- for key in ('shares', 'snapshots', 'gigabytes', 'snapshot_gigabytes'):
+ for key in quota_keys:
self.assertEqual(updated_p_quota[key], current_p_quota[key])
# Default share type quotas are current project quotas
@@ -561,6 +668,30 @@
self.assertEqual(-1, quotas.get('share_group_snapshots'))
+ @ddt.data("share_replicas", "replica_gigabytes")
+ @tc.attr(base.TAG_POSITIVE, base.TAG_API)
+ @utils.skip_if_microversion_not_supported(
+ SHARE_REPLICA_QUOTAS_MICROVERSION)
+ def test_unlimited_quota_for_replica_quotas(self, quota_key):
+ kwargs = {quota_key: -1}
+ self.client.update_quotas(self.tenant_id, **kwargs)
+
+ quotas = self.client.show_quotas(self.tenant_id)
+
+ self.assertEqual(-1, quotas.get(quota_key))
+
+ @ddt.data("share_replicas", "replica_gigabytes")
+ @tc.attr(base.TAG_POSITIVE, base.TAG_API)
+ @utils.skip_if_microversion_not_supported(
+ SHARE_REPLICA_QUOTAS_MICROVERSION)
+ def test_unlimited_user_quota_for_replica_quotas(self, quota_key):
+ kwargs = {quota_key: -1}
+ self.client.update_quotas(self.tenant_id, self.user_id, **kwargs)
+
+ quotas = self.client.show_quotas(self.tenant_id, self.user_id)
+
+ self.assertEqual(-1, quotas.get(quota_key))
+
@ddt.data(11, -1)
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_update_user_quotas_bigger_than_project_quota(self, user_quota):
diff --git a/manila_tempest_tests/tests/api/admin/test_quotas_negative.py b/manila_tempest_tests/tests/api/admin/test_quotas_negative.py
index 299d54d..ae5e227 100644
--- a/manila_tempest_tests/tests/api/admin/test_quotas_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_quotas_negative.py
@@ -14,6 +14,7 @@
# under the License.
import ddt
+import manila_tempest_tests.tests.api.test_replication_negative as rep_neg_test
from tempest import config
from tempest.lib import exceptions as lib_exc
import testtools
@@ -25,6 +26,8 @@
CONF = config.CONF
PRE_SHARE_GROUPS_MICROVERSION = "2.39"
SHARE_GROUPS_MICROVERSION = "2.40"
+PRE_SHARE_REPLICA_QUOTAS_MICROVERSION = "2.52"
+SHARE_REPLICA_QUOTAS_MICROVERSION = "2.53"
@ddt.ddt
@@ -88,6 +91,21 @@
lib_exc.BadRequest,
client.update_quotas, client.tenant_id, **kwargs)
+ @ddt.data(
+ {"share_replicas": -2},
+ {"replica_gigabytes": -2},
+ )
+ @tc.attr(base.TAG_NEGATIVE, base.TAG_API)
+ @utils.skip_if_microversion_not_supported(
+ SHARE_REPLICA_QUOTAS_MICROVERSION)
+ def test_update_replica_quotas_wrong_data(self, kwargs):
+ # -1 is acceptable value as unlimited
+
+ client = self.get_client_with_isolated_creds(client_version='2')
+ self.assertRaises(
+ lib_exc.BadRequest,
+ client.update_quotas, client.tenant_id, **kwargs)
+
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_create_share_with_size_bigger_than_quota(self):
quotas = self.shares_client.show_quotas(
@@ -196,6 +214,26 @@
force=False,
share_networks=bigger_value)
+ @ddt.data("share_replicas", "replica_gigabytes")
+ @tc.attr(base.TAG_NEGATIVE, base.TAG_API)
+ @utils.skip_if_microversion_not_supported(
+ SHARE_REPLICA_QUOTAS_MICROVERSION)
+ def test_try_set_user_quota_replicas_bigger_than_tenant_quota(self, key):
+ client = self.get_client_with_isolated_creds(client_version='2')
+
+ # get current quotas for tenant
+ tenant_quotas = client.show_quotas(client.tenant_id)
+
+ # try set user quota for snapshots bigger than tenant quota
+ bigger_value = int(tenant_quotas[key]) + 2
+ kwargs = {key: bigger_value}
+ self.assertRaises(lib_exc.BadRequest,
+ client.update_quotas,
+ client.tenant_id,
+ client.user_id,
+ force=False,
+ **kwargs)
+
@ddt.data(
('quota-sets', '2.0', 'show_quotas'),
('quota-sets', '2.0', 'default_quotas'),
@@ -302,6 +340,24 @@
client.tenant_id,
**kwargs)
+ @ddt.data("share_replicas", "replica_gigabytes")
+ @tc.attr(base.TAG_NEGATIVE, base.TAG_API)
+ @base.skip_if_microversion_not_supported(SHARE_REPLICA_QUOTAS_MICROVERSION)
+ def test_share_replica_quotas_using_too_old_microversion(self, quota_key):
+ client = self.get_client_with_isolated_creds(client_version='2')
+ tenant_quotas = client.show_quotas(
+ client.tenant_id, version=SHARE_REPLICA_QUOTAS_MICROVERSION)
+ kwargs = {
+ "version": PRE_SHARE_REPLICA_QUOTAS_MICROVERSION,
+ quota_key: tenant_quotas[quota_key],
+ }
+
+ self.assertRaises(
+ lib_exc.BadRequest,
+ client.update_quotas,
+ client.tenant_id,
+ **kwargs)
+
@ddt.data('show', 'reset', 'update')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
@base.skip_if_microversion_lt("2.38")
@@ -366,3 +422,77 @@
self.assertRaises(lib_exc.OverLimit,
self.create_share,
share_type_id=self.share_type_id)
+
+
+@ddt.ddt
+class ReplicaQuotasNegativeTest(rep_neg_test.ReplicationNegativeBase):
+
+ @classmethod
+ def skip_checks(cls):
+ super(ReplicaQuotasNegativeTest, cls).skip_checks()
+ if not CONF.share.run_quota_tests:
+ msg = "Quota tests are disabled."
+ raise cls.skipException(msg)
+
+ utils.check_skip_if_microversion_lt(SHARE_REPLICA_QUOTAS_MICROVERSION)
+
+ def _modify_quotas_for_test(self, quota_key, new_limit):
+ kwargs = {quota_key: new_limit}
+
+ # Get the original quota values
+ original_quota = self.admin_client.show_quotas(self.tenant_id)
+
+ # Update the current quotas
+ self.admin_client.update_quotas(self.tenant_id, **kwargs)
+
+ # Save the previous value
+ old_quota_values = {quota_key: original_quota[quota_key]}
+
+ # Get the updated quotas and add a cleanup
+ updated_quota = self.admin_client.show_quotas(self.tenant_id)
+ self.addCleanup(self.admin_client.update_quotas,
+ self.tenant_id,
+ **old_quota_values)
+
+ # Make sure that the new value was properly set
+ self.assertEqual(new_limit, updated_quota[quota_key])
+
+ @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
+ @ddt.data(('share_replicas', 2), ('replica_gigabytes', None))
+ @ddt.unpack
+ def test_create_replica_over_replica_limit(self, quota_key, new_limit):
+ # Define the quota values to be updated
+ new_limit = (int(self.share1['size'] * 2)
+ if quota_key == 'replica_gigabytes' else new_limit)
+
+ # Create an inactive share replica
+ self.create_share_replica(
+ self.share1["id"], self.replica_zone, cleanup_in_class=False)
+
+ # Modify the quota limit for this test
+ self._modify_quotas_for_test(quota_key, new_limit)
+
+ # Make sure that the request to create a third one will fail
+ self.assertRaises(lib_exc.OverLimit,
+ self.create_share_replica,
+ self.share1['id'],
+ availability_zone=self.replica_zone)
+
+ @testtools.skipUnless(
+ CONF.share.run_extend_tests,
+ "Share extend tests are disabled.")
+ @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
+ def test_extend_replica_over_limit(self):
+ # Define the quota values to be updated
+ quota_key = 'replica_gigabytes'
+
+ # Modify the quota limit for this test
+ self._modify_quotas_for_test(quota_key, new_limit=self.share1['size'])
+
+ new_size = self.share1['size'] + 1
+
+ # Make sure that the request to create a third one will fail
+ self.assertRaises(lib_exc.OverLimit,
+ self.shares_v2_client.extend_share,
+ self.share1['id'],
+ new_size)
diff --git a/manila_tempest_tests/tests/api/admin/test_share_group_types.py b/manila_tempest_tests/tests/api/admin/test_share_group_types.py
index 6474f59..d05c079 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_group_types.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_group_types.py
@@ -14,6 +14,7 @@
# under the License.
import ddt
+import itertools
from tempest import config
from tempest.lib.common.utils import data_utils
from testtools import testcase as tc
@@ -54,8 +55,13 @@
cls.share_type2 = share_type['share_type']
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
- @ddt.data('id', 'name')
- def test_create_get_delete_share_group_type_min(self, st_key):
+ @ddt.data(
+ *itertools.product(('id', 'name'), set(
+ [LATEST_MICROVERSION, constants.MIN_SHARE_GROUP_MICROVERSION,
+ constants.SHARE_GROUPS_GRADUATION_VERSION])))
+ @ddt.unpack
+ def test_create_get_delete_share_group_type(self, st_key, version):
+ self.skip_if_microversion_not_supported(version)
name = data_utils.rand_name("tempest-manila")
# Create share group type
@@ -63,7 +69,7 @@
name=name,
share_types=self.share_type[st_key],
cleanup_in_class=False,
- version=constants.MIN_SHARE_GROUP_MICROVERSION)
+ version=version)
self.assertEqual(
[self.share_type['id']],
@@ -71,7 +77,8 @@
'Share type not applied correctly.')
# Read share group type
- sg_type_r = self.shares_v2_client.get_share_group_type(sg_type_c['id'])
+ sg_type_r = self.shares_v2_client.get_share_group_type(
+ sg_type_c['id'], version=version)
keys = set(sg_type_r.keys())
self.assertTrue(
constants.SHARE_GROUP_TYPE_REQUIRED_KEYS.issubset(keys),
@@ -82,7 +89,7 @@
# Delete share group type
self.shares_v2_client.delete_share_group_type(
- sg_type_r['id'], version=constants.MIN_SHARE_GROUP_MICROVERSION)
+ sg_type_r['id'], version=version)
self.shares_v2_client.wait_for_resource_deletion(
share_group_type_id=sg_type_r['id'])
@@ -131,7 +138,11 @@
self.assertDictMatch(group_specs, sg_type['group_specs'])
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
- def test_update_single_share_group_type_spec_min(self):
+ @ddt.data(
+ *set([constants.MIN_SHARE_GROUP_MICROVERSION,
+ constants.SHARE_GROUPS_GRADUATION_VERSION, LATEST_MICROVERSION]))
+ def test_update_single_share_group_type_spec(self, version):
+ self.skip_if_microversion_not_supported(version)
name = data_utils.rand_name("tempest-manila")
group_specs = {'key1': 'value1', 'key2': 'value2'}
@@ -140,14 +151,14 @@
share_types=self.share_type['id'],
group_specs=group_specs,
cleanup_in_class=False,
- version=constants.MIN_SHARE_GROUP_MICROVERSION)
+ version=version)
self.assertDictMatch(group_specs, sg_type['group_specs'])
group_specs = {'key1': 'value1', 'key2': 'value2'}
self.shares_v2_client.update_share_group_type_spec(
- sg_type['id'], 'key1', 'value3')
+ sg_type['id'], 'key1', 'value3', version=version)
sg_type = self.shares_v2_client.get_share_group_type(sg_type['id'])
self.assertIn('key1', sg_type['group_specs'])
@@ -180,7 +191,11 @@
self.assertEqual(v, sg_type['group_specs'][k])
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
- def test_delete_single_share_group_type_spec_min(self):
+ @ddt.data(
+ *set([constants.MIN_SHARE_GROUP_MICROVERSION,
+ constants.SHARE_GROUPS_GRADUATION_VERSION, LATEST_MICROVERSION]))
+ def test_delete_single_share_group_type_spec_min(self, version):
+ self.skip_if_microversion_not_supported(version)
name = data_utils.rand_name("tempest-manila")
group_specs = {'key1': 'value1', 'key2': 'value2'}
@@ -189,7 +204,7 @@
share_types=self.share_type['id'],
group_specs=group_specs,
cleanup_in_class=False,
- version=constants.MIN_SHARE_GROUP_MICROVERSION)
+ version=version)
self.assertDictMatch(group_specs, sg_type['group_specs'])
@@ -197,14 +212,18 @@
group_specs.pop(key_to_delete)
self.shares_v2_client.delete_share_group_type_spec(
- sg_type['id'], key_to_delete)
+ sg_type['id'], key_to_delete, version=version)
sg_type = self.shares_v2_client.get_share_group_type(
- sg_type['id'])
+ sg_type['id'], version=version)
self.assertDictMatch(group_specs, sg_type['group_specs'])
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
- def test_private_share_group_type_access(self):
+ @ddt.data(
+ *set([constants.MIN_SHARE_GROUP_MICROVERSION,
+ constants.SHARE_GROUPS_GRADUATION_VERSION, LATEST_MICROVERSION]))
+ def test_private_share_group_type_access(self, version):
+ self.skip_if_microversion_not_supported(version)
name = data_utils.rand_name("tempest-manila")
group_specs = {"key1": "value1", "key2": "value2"}
project_id = self.shares_v2_client.tenant_id
@@ -215,41 +234,48 @@
share_types=[self.share_type['id']],
is_public=False,
group_specs=group_specs,
+ version=version
)
self.assertEqual(name, sgt_create['name'])
sgt_id = sgt_create["id"]
# It should not be listed without access
- sgt_list = self.shares_v2_client.list_share_group_types()
+ sgt_list = self.shares_v2_client.list_share_group_types(
+ version=version)
self.assertFalse(any(sgt_id == sgt["id"] for sgt in sgt_list))
# List projects that have access for share group type - none expected
- access = self.shares_v2_client.list_access_to_share_group_type(sgt_id)
+ access = self.shares_v2_client.list_access_to_share_group_type(
+ sgt_id, version=version)
self.assertEmpty(access)
# Add project access to share group type
access = self.shares_v2_client.add_access_to_share_group_type(
- sgt_id, project_id)
+ sgt_id, project_id, version=version)
# Now it should be listed
- sgt_list = self.shares_v2_client.list_share_group_types()
+ sgt_list = self.shares_v2_client.list_share_group_types(
+ version=version)
self.assertTrue(any(sgt_id == sgt["id"] for sgt in sgt_list))
# List projects that have access for share group type - one expected
- access = self.shares_v2_client.list_access_to_share_group_type(sgt_id)
+ access = self.shares_v2_client.list_access_to_share_group_type(
+ sgt_id, version=version)
expected = [{'share_group_type_id': sgt_id, 'project_id': project_id}]
self.assertEqual(expected, access)
# Remove project access from share group type
access = self.shares_v2_client.remove_access_from_share_group_type(
- sgt_id, project_id)
+ sgt_id, project_id, version=version)
# It should not be listed without access
- sgt_list = self.shares_v2_client.list_share_group_types()
+ sgt_list = self.shares_v2_client.list_share_group_types(
+ version=version)
self.assertFalse(any(sgt_id == sgt["id"] for sgt in sgt_list))
# List projects that have access for share group type - none expected
- access = self.shares_v2_client.list_access_to_share_group_type(sgt_id)
+ access = self.shares_v2_client.list_access_to_share_group_type(
+ sgt_id, version=version)
self.assertEmpty(access)
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
diff --git a/manila_tempest_tests/tests/api/admin/test_share_groups.py b/manila_tempest_tests/tests/api/admin/test_share_groups.py
index 39925ba..c7b3a42 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_groups.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_groups.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import ddt
from tempest import config
from tempest.lib.common.utils import data_utils
import testtools
@@ -23,8 +24,10 @@
from manila_tempest_tests import utils
CONF = config.CONF
+LATEST_MICROVERSION = CONF.share.max_api_microversion
+@ddt.ddt
class ShareGroupsTest(base.BaseSharesAdminTest):
@classmethod
@@ -56,12 +59,16 @@
cls.sg_type_id = cls.sg_type['id']
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
- def test_create_share_group_with_single_share_type_min(self):
+ @ddt.data(
+ *set([constants.MIN_SHARE_GROUP_MICROVERSION,
+ constants.SHARE_GROUPS_GRADUATION_VERSION, LATEST_MICROVERSION]))
+ def test_create_share_group_with_single_share_type_min(self, version):
+ self.skip_if_microversion_not_supported(version)
share_group = self.create_share_group(
share_group_type_id=self.sg_type_id,
cleanup_in_class=False,
share_type_ids=[self.share_type_id],
- version=constants.MIN_SHARE_GROUP_MICROVERSION)
+ version=version)
keys = set(share_group.keys())
self.assertTrue(
@@ -124,14 +131,21 @@
@testtools.skipUnless(
CONF.share.default_share_type_name, "Only if defaults are defined.")
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
- def test_default_share_group_type_applied(self):
- default_type = self.shares_v2_client.get_default_share_group_type()
+ @ddt.data(
+ *set([constants.MIN_SHARE_GROUP_MICROVERSION,
+ constants.SHARE_GROUPS_GRADUATION_VERSION, LATEST_MICROVERSION]))
+ def test_default_share_group_type_applied(self, version):
+ self.skip_if_microversion_not_supported(version)
+
+ default_type = self.shares_v2_client.get_default_share_group_type(
+ version=version
+ )
default_share_types = default_type['share_types']
share_group = self.create_share_group(
cleanup_in_class=False,
share_type_ids=default_share_types,
- version=constants.MIN_SHARE_GROUP_MICROVERSION)
+ version=version)
keys = set(share_group.keys())
self.assertTrue(
diff --git a/manila_tempest_tests/tests/api/admin/test_share_instances.py b/manila_tempest_tests/tests/api/admin/test_share_instances.py
index 89f0124..4474ffd 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_instances.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_instances.py
@@ -63,9 +63,11 @@
self.assertIn(self.share['id'], share_ids, msg)
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
- @ddt.data('2.3', '2.9', '2.10', '2.30')
+ @ddt.data('2.3', '2.9', '2.10', '2.30', '2.54')
def test_get_share_instance(self, version):
"""Test that we get the proper keys back for the instance."""
+ self.skip_if_microversion_not_supported(version)
+
share_instances = self.shares_v2_client.get_instances_of_share(
self.share['id'], version=version,
)
@@ -87,6 +89,8 @@
expected_keys.append("share_type_id")
if utils.is_microversion_ge(version, '2.30'):
expected_keys.append("cast_rules_to_readonly")
+ if utils.is_microversion_ge(version, '2.54'):
+ expected_keys.append("progress")
expected_keys = sorted(expected_keys)
actual_keys = sorted(si.keys())
self.assertEqual(expected_keys, actual_keys,
diff --git a/manila_tempest_tests/tests/api/admin/test_user_messages.py b/manila_tempest_tests/tests/api/admin/test_user_messages.py
index 8a11b1c..83c5e37 100644
--- a/manila_tempest_tests/tests/api/admin/test_user_messages.py
+++ b/manila_tempest_tests/tests/api/admin/test_user_messages.py
@@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
+
from oslo_utils import timeutils
from oslo_utils import uuidutils
from tempest import config
@@ -21,6 +23,7 @@
CONF = config.CONF
MICROVERSION = '2.37'
+QUERY_BY_TIMESTAMP_MICROVERSION = '2.52'
MESSAGE_KEYS = (
'created_at',
'action_id',
@@ -111,3 +114,55 @@
self.shares_v2_client.delete_message(self.message['id'])
self.shares_v2_client.wait_for_resource_deletion(
message_id=self.message['id'])
+
+ @decorators.attr(type=[base.TAG_POSITIVE, base.TAG_API])
+ @base.skip_if_microversion_not_supported(QUERY_BY_TIMESTAMP_MICROVERSION)
+ def test_list_messages_with_since_and_before_filters(self):
+ new_message = self.create_user_message()
+ created_at_1 = timeutils.parse_strtime(self.message['created_at'])
+ created_at_2 = timeutils.parse_strtime(new_message['created_at'])
+ time_1 = created_at_1 - datetime.timedelta(seconds=1)
+ time_2 = created_at_2 - datetime.timedelta(seconds=1)
+
+ params1 = {'created_since': str(time_1)}
+ # should return all user messages created by this test including
+ # self.message
+ messages = self.shares_v2_client.list_messages(params=params1)
+ ids = [x['id'] for x in messages]
+ self.assertGreaterEqual(len(ids), 2)
+ self.assertIn(self.message['id'], ids)
+ self.assertIn(new_message['id'], ids)
+ for message in messages:
+ time_diff_with_created_since = timeutils.delta_seconds(
+ time_1, timeutils.parse_strtime(message['created_at']))
+ self.assertGreaterEqual(time_diff_with_created_since, 0)
+
+ params2 = {'created_since': str(time_1),
+ 'created_before': str(time_2)}
+ # should not return new_message, but return a list that is equal to 1
+ # and include self.message
+ messages = self.shares_v2_client.list_messages(params=params2)
+ self.assertIsInstance(messages, list)
+ ids = [x['id'] for x in messages]
+ self.assertGreaterEqual(len(ids), 1)
+ self.assertIn(self.message['id'], ids)
+ self.assertNotIn(new_message['id'], ids)
+ for message in messages:
+ time_diff_with_created_since = timeutils.delta_seconds(
+ time_1, timeutils.parse_strtime(message['created_at']))
+ time_diff_with_created_before = timeutils.delta_seconds(
+ time_2, timeutils.parse_strtime(message['created_at']))
+ self.assertGreaterEqual(time_diff_with_created_since, 0)
+ self.assertGreaterEqual(0, time_diff_with_created_before)
+
+ params3 = {'created_before': str(time_2)}
+ # should not include self.message
+ messages = self.shares_v2_client.list_messages(params=params3)
+ ids = [x['id'] for x in messages]
+ self.assertGreaterEqual(len(ids), 1)
+ self.assertNotIn(new_message['id'], ids)
+ self.assertIn(self.message['id'], ids)
+ for message in messages:
+ time_diff_with_created_before = timeutils.delta_seconds(
+ time_2, timeutils.parse_strtime(message['created_at']))
+ self.assertGreaterEqual(0, time_diff_with_created_before)
diff --git a/manila_tempest_tests/tests/api/admin/test_user_messages_negative.py b/manila_tempest_tests/tests/api/admin/test_user_messages_negative.py
index c9debdd..cf592e4 100644
--- a/manila_tempest_tests/tests/api/admin/test_user_messages_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_user_messages_negative.py
@@ -21,6 +21,7 @@
CONF = config.CONF
MICROVERSION = '2.37'
+QUERY_BY_TIMESTAMP_MICROVERSION = '2.52'
class UserMessageNegativeTest(base.BaseSharesAdminTest):
@@ -61,3 +62,13 @@
self.assertRaises(lib_exc.NotFound,
self.shares_v2_client.delete_message,
six.text_type(uuidutils.generate_uuid()))
+
+ @decorators.attr(type=[base.TAG_NEGATIVE, base.TAG_API])
+ @base.skip_if_microversion_not_supported(QUERY_BY_TIMESTAMP_MICROVERSION)
+ def test_list_messages_with_invalid_time_format(self):
+ params_key = ['created_since', 'created_before']
+ for key in params_key:
+ params = {key: 'invalid_time'}
+ self.assertRaises(lib_exc.BadRequest,
+ self.shares_v2_client.list_messages,
+ params=params)
diff --git a/manila_tempest_tests/tests/api/base.py b/manila_tempest_tests/tests/api/base.py
index 22b1d72..2ed43c7 100644
--- a/manila_tempest_tests/tests/api/base.py
+++ b/manila_tempest_tests/tests/api/base.py
@@ -609,8 +609,7 @@
if kwargs.get('source_share_group_snapshot_id'):
new_share_group_shares = client.list_shares(
detailed=True,
- params={'share_group_id': share_group['id']},
- experimental=True)
+ params={'share_group_id': share_group['id']})
for share in new_share_group_shares:
resource = {"type": "share",
@@ -712,13 +711,14 @@
service['binary'] == 'manila-share' and
service['state'] == 'up' and
re.search(backends, service['host'])]
- return zones
+ return list(set(zones))
@classmethod
def get_pools_matching_share_type(cls, share_type, client=None):
client = client or cls.admin_shares_v2_client
if utils.is_microversion_supported('2.23'):
return client.list_pools(
+ detail=True,
search_opts={'share_type': share_type['id']})['pools']
pools = client.list_pools(detail=True)['pools']
diff --git a/manila_tempest_tests/tests/api/test_quotas.py b/manila_tempest_tests/tests/api/test_quotas.py
index 83c776f..32152e0 100644
--- a/manila_tempest_tests/tests/api/test_quotas.py
+++ b/manila_tempest_tests/tests/api/test_quotas.py
@@ -15,12 +15,15 @@
import ddt
import itertools
+from manila_tempest_tests import utils
from tempest import config
from testtools import testcase as tc
from manila_tempest_tests.tests.api import base
CONF = config.CONF
+PRE_SHARE_REPLICAS_MICROVERSION = "2.52"
+SHARE_REPLICAS_MICROVERSION = "2.53"
@ddt.ddt
@@ -39,50 +42,75 @@
@ddt.data('shares_client', 'shares_v2_client')
def test_default_quotas(self, client_name):
quotas = getattr(self, client_name).default_quotas(self.tenant_id)
+ uses_v2_client = client_name == 'shares_v2_client'
self.assertGreater(int(quotas["gigabytes"]), -2)
self.assertGreater(int(quotas["snapshot_gigabytes"]), -2)
self.assertGreater(int(quotas["shares"]), -2)
self.assertGreater(int(quotas["snapshots"]), -2)
self.assertGreater(int(quotas["share_networks"]), -2)
+ if utils.share_replica_quotas_are_supported() and uses_v2_client:
+ self.assertGreater(int(quotas["share_replicas"]), -2)
+ self.assertGreater(int(quotas["replica_gigabytes"]), -2)
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
@ddt.data('shares_client', 'shares_v2_client')
def test_show_quotas(self, client_name):
quotas = getattr(self, client_name).show_quotas(self.tenant_id)
+ uses_v2_client = client_name == 'shares_v2_client'
self.assertGreater(int(quotas["gigabytes"]), -2)
self.assertGreater(int(quotas["snapshot_gigabytes"]), -2)
self.assertGreater(int(quotas["shares"]), -2)
self.assertGreater(int(quotas["snapshots"]), -2)
self.assertGreater(int(quotas["share_networks"]), -2)
+ if utils.share_replica_quotas_are_supported() and uses_v2_client:
+ self.assertGreater(int(quotas["share_replicas"]), -2)
+ self.assertGreater(int(quotas["replica_gigabytes"]), -2)
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
@ddt.data('shares_client', 'shares_v2_client')
def test_show_quotas_for_user(self, client_name):
quotas = getattr(self, client_name).show_quotas(
self.tenant_id, self.user_id)
+ uses_v2_client = client_name == 'shares_v2_client'
self.assertGreater(int(quotas["gigabytes"]), -2)
self.assertGreater(int(quotas["snapshot_gigabytes"]), -2)
self.assertGreater(int(quotas["shares"]), -2)
self.assertGreater(int(quotas["snapshots"]), -2)
self.assertGreater(int(quotas["share_networks"]), -2)
+ if utils.share_replica_quotas_are_supported() and uses_v2_client:
+ self.assertGreater(int(quotas["share_replicas"]), -2)
+ self.assertGreater(int(quotas["replica_gigabytes"]), -2)
@ddt.data(
- *itertools.product(set(("2.25", CONF.share.max_api_microversion)),
- (True, False))
+ *itertools.product(set(
+ ["2.25", "2.53", CONF.share.max_api_microversion]), (True, False))
)
@ddt.unpack
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
- @base.skip_if_microversion_not_supported("2.25")
def test_show_quotas_detail(self, microversion, with_user):
+ self.skip_if_microversion_not_supported(microversion)
quota_args = {"tenant_id": self.tenant_id, "version": microversion, }
+ keys = ['gigabytes', 'snapshot_gigabytes', 'shares',
+ 'snapshots', 'share_networks']
+ if utils.is_microversion_ge(microversion, SHARE_REPLICAS_MICROVERSION):
+ keys.append('share_replicas')
+ keys.append('replica_gigabytes')
if with_user:
quota_args.update({"user_id": self.user_id})
quotas = self.shares_v2_client.detail_quotas(**quota_args)
quota_keys = list(quotas.keys())
- for outer in ('gigabytes', 'snapshot_gigabytes', 'shares',
- 'snapshots', 'share_networks'):
+ for outer in keys:
self.assertIn(outer, quota_keys)
outer_keys = list(quotas[outer].keys())
for inner in ('in_use', 'limit', 'reserved'):
self.assertIn(inner, outer_keys)
self.assertGreater(int(quotas[outer][inner]), -2)
+
+ @tc.attr(base.TAG_POSITIVE, base.TAG_API)
+ @base.skip_if_microversion_not_supported(PRE_SHARE_REPLICAS_MICROVERSION)
+ def test_quota_detail_2_52_no_share_replica_quotas(self):
+ quota_args = {"tenant_id": self.tenant_id,
+ "version": PRE_SHARE_REPLICAS_MICROVERSION}
+ quotas = self.shares_v2_client.detail_quotas(**quota_args)
+ self.assertNotIn('share_replicas', quotas.keys())
+ self.assertNotIn('replica_gigabytes', quotas.keys())
diff --git a/manila_tempest_tests/tests/api/test_replication_negative.py b/manila_tempest_tests/tests/api/test_replication_negative.py
index a71449e..3215641 100644
--- a/manila_tempest_tests/tests/api/test_replication_negative.py
+++ b/manila_tempest_tests/tests/api/test_replication_negative.py
@@ -28,11 +28,10 @@
_MIN_SUPPORTED_MICROVERSION = '2.11'
-class ReplicationNegativeTest(base.BaseSharesMixedTest):
-
+class ReplicationNegativeBase(base.BaseSharesMixedTest):
@classmethod
def skip_checks(cls):
- super(ReplicationNegativeTest, cls).skip_checks()
+ super(ReplicationNegativeBase, cls).skip_checks()
if not CONF.share.run_replication_tests:
raise cls.skipException('Replication tests are disabled.')
@@ -40,7 +39,7 @@
@classmethod
def resource_setup(cls):
- super(ReplicationNegativeTest, cls).resource_setup()
+ super(ReplicationNegativeBase, cls).resource_setup()
cls.admin_client = cls.admin_shares_v2_client
cls.replication_type = CONF.share.backend_replication_type
cls.multitenancy_enabled = (
@@ -80,6 +79,9 @@
instance_id = share_instances[0]["id"]
return share, instance_id
+
+class ReplicationNegativeTest(ReplicationNegativeBase):
+
def _is_replication_type_promotable(self):
if (self.replication_type
not in constants.REPLICATION_PROMOTION_CHOICES):
diff --git a/manila_tempest_tests/tests/api/test_rules.py b/manila_tempest_tests/tests/api/test_rules.py
index dc63429..7ad57a9 100644
--- a/manila_tempest_tests/tests/api/test_rules.py
+++ b/manila_tempest_tests/tests/api/test_rules.py
@@ -81,12 +81,16 @@
protocol = "nfs"
@classmethod
- def resource_setup(cls):
- super(ShareIpRulesForNFSTest, cls).resource_setup()
+ def skip_checks(cls):
+ super(ShareIpRulesForNFSTest, cls).skip_checks()
if (cls.protocol not in CONF.share.enable_protocols or
cls.protocol not in CONF.share.enable_ip_rules_for_protocols):
msg = "IP rule tests for %s protocol are disabled" % cls.protocol
raise cls.skipException(msg)
+
+ @classmethod
+ def resource_setup(cls):
+ super(ShareIpRulesForNFSTest, cls).resource_setup()
# create share type
cls.share_type = cls._create_share_type()
cls.share_type_id = cls.share_type['id']
@@ -230,13 +234,18 @@
protocol = "nfs"
@classmethod
- def resource_setup(cls):
- super(ShareUserRulesForNFSTest, cls).resource_setup()
+ def skip_checks(cls):
+ super(ShareUserRulesForNFSTest, cls).skip_checks()
if (cls.protocol not in CONF.share.enable_protocols or
cls.protocol not in
CONF.share.enable_user_rules_for_protocols):
msg = "USER rule tests for %s protocol are disabled" % cls.protocol
raise cls.skipException(msg)
+
+ @classmethod
+ def resource_setup(cls):
+ super(ShareUserRulesForNFSTest, cls).resource_setup()
+
# create share type
cls.share_type = cls._create_share_type()
cls.share_type_id = cls.share_type['id']
@@ -320,13 +329,17 @@
protocol = "glusterfs"
@classmethod
- def resource_setup(cls):
- super(ShareCertRulesForGLUSTERFSTest, cls).resource_setup()
+ def skip_checks(cls):
+ super(ShareCertRulesForGLUSTERFSTest, cls).skip_checks()
if (cls.protocol not in CONF.share.enable_protocols or
cls.protocol not in
CONF.share.enable_cert_rules_for_protocols):
msg = "Cert rule tests for %s protocol are disabled" % cls.protocol
raise cls.skipException(msg)
+
+ @classmethod
+ def resource_setup(cls):
+ super(ShareCertRulesForGLUSTERFSTest, cls).resource_setup()
# create share type
cls.share_type = cls._create_share_type()
cls.share_type_id = cls.share_type['id']
@@ -436,14 +449,18 @@
protocol = "cephfs"
@classmethod
- def resource_setup(cls):
- super(ShareCephxRulesForCephFSTest, cls).resource_setup()
+ def skip_checks(cls):
+ super(ShareCephxRulesForCephFSTest, cls).skip_checks()
if (cls.protocol not in CONF.share.enable_protocols or
cls.protocol not in
CONF.share.enable_cephx_rules_for_protocols):
msg = ("Cephx rule tests for %s protocol are disabled." %
cls.protocol)
raise cls.skipException(msg)
+
+ @classmethod
+ def resource_setup(cls):
+ super(ShareCephxRulesForCephFSTest, cls).resource_setup()
# create share type
cls.share_type = cls._create_share_type()
cls.share_type_id = cls.share_type['id']
@@ -483,8 +500,8 @@
class ShareRulesTest(base.BaseSharesMixedTest):
@classmethod
- def resource_setup(cls):
- super(ShareRulesTest, cls).resource_setup()
+ def skip_checks(cls):
+ super(ShareRulesTest, cls).skip_checks()
if not (any(p in CONF.share.enable_ip_rules_for_protocols
for p in cls.protocols) or
any(p in CONF.share.enable_user_rules_for_protocols
@@ -495,6 +512,10 @@
for p in cls.protocols)):
cls.message = "Rule tests are disabled"
raise cls.skipException(cls.message)
+
+ @classmethod
+ def resource_setup(cls):
+ super(ShareRulesTest, cls).resource_setup()
if CONF.share.enable_ip_rules_for_protocols:
cls.protocol = CONF.share.enable_ip_rules_for_protocols[0]
cls.access_type = "ip"
diff --git a/manila_tempest_tests/tests/api/test_share_group_actions.py b/manila_tempest_tests/tests/api/test_share_group_actions.py
index 169d767..c1f4e6f 100644
--- a/manila_tempest_tests/tests/api/test_share_group_actions.py
+++ b/manila_tempest_tests/tests/api/test_share_group_actions.py
@@ -24,6 +24,7 @@
from manila_tempest_tests import utils
CONF = config.CONF
+LATEST_MICROVERSION = CONF.share.max_api_microversion
@ddt.ddt
@@ -81,7 +82,6 @@
'size': size,
'share_type_id': cls.share_type_id,
'share_group_id': sg_id,
- 'experimental': True,
}} for size, sg_id in ((cls.share_size, cls.share_group['id']),
(cls.share_size2, cls.share_group['id']),
(cls.share_size, cls.share_group2['id']))
@@ -104,13 +104,15 @@
)
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
- def test_get_share_group_min_supported_sg_microversion(self):
+ @ddt.data(
+ *set([constants.MIN_SHARE_GROUP_MICROVERSION,
+ constants.SHARE_GROUPS_GRADUATION_VERSION, LATEST_MICROVERSION]))
+ def test_get_share_group(self, version):
+ self.skip_if_microversion_not_supported(version)
# Get share group
share_group = self.shares_v2_client.get_share_group(
- self.share_group['id'],
- version=constants.MIN_SHARE_GROUP_MICROVERSION,
- )
+ self.share_group['id'], version=version)
# Verify keys
actual_keys = set(share_group.keys())
@@ -132,8 +134,7 @@
# Get share
share = self.shares_v2_client.get_share(
self.shares[0]['id'],
- version=constants.MIN_SHARE_GROUP_MICROVERSION,
- experimental=True)
+ version=constants.MIN_SHARE_GROUP_MICROVERSION)
# Verify keys
expected_keys = {
@@ -155,11 +156,15 @@
self.assertEqual(self.share_group["id"], share["share_group_id"])
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
- def test_list_share_groups_min(self):
+ @ddt.data(
+ *set([constants.MIN_SHARE_GROUP_MICROVERSION,
+ constants.SHARE_GROUPS_GRADUATION_VERSION, LATEST_MICROVERSION]))
+ def test_list_share_groups(self, version):
+ self.skip_if_microversion_not_supported(version)
# List share groups
share_groups = self.shares_v2_client.list_share_groups(
- version=constants.MIN_SHARE_GROUP_MICROVERSION)
+ version=version)
# Verify keys
self.assertGreater(len(share_groups), 0)
@@ -181,8 +186,11 @@
self.assertEqual(1, len(gen), msg)
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
- @ddt.data(constants.MIN_SHARE_GROUP_MICROVERSION, '2.36')
+ @ddt.data(
+ *set([constants.MIN_SHARE_GROUP_MICROVERSION, '2.36',
+ constants.SHARE_GROUPS_GRADUATION_VERSION, LATEST_MICROVERSION]))
def test_list_share_groups_with_detail_min(self, version):
+ self.skip_if_microversion_not_supported(version)
params = None
if utils.is_microversion_ge(version, '2.36'):
params = {'name~': 'tempest', 'description~': 'tempest'}
@@ -218,7 +226,6 @@
detailed=True,
params={'share_group_id': self.share_group['id']},
version=constants.MIN_SHARE_GROUP_MICROVERSION,
- experimental=True,
)
share_ids = [share['id'] for share in shares]
@@ -237,11 +244,16 @@
self.shares[0]['id'], share_ids))
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
- def test_get_share_group_snapshot_min(self):
+ @ddt.data(
+ *set([constants.MIN_SHARE_GROUP_MICROVERSION,
+ constants.SHARE_GROUPS_GRADUATION_VERSION, LATEST_MICROVERSION]))
+ def test_get_share_group_snapshot(self, version):
+ self.skip_if_microversion_not_supported(version)
+
# Get share group snapshot
sg_snapshot = self.shares_v2_client.get_share_group_snapshot(
self.sg_snapshot['id'],
- version=constants.MIN_SHARE_GROUP_MICROVERSION,
+ version=version,
)
# Verify keys
@@ -286,24 +298,29 @@
self.assertEqual(share['size'], member['size'])
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
- def test_create_share_group_from_populated_share_group_snapshot_min(self):
+ @ddt.data(
+ *set([constants.MIN_SHARE_GROUP_MICROVERSION,
+ constants.SHARE_GROUPS_GRADUATION_VERSION, LATEST_MICROVERSION]))
+ def test_create_share_group_from_populated_share_group_snapshot(self,
+ version):
+ self.skip_if_microversion_not_supported(version)
sg_snapshot = self.shares_v2_client.get_share_group_snapshot(
self.sg_snapshot['id'],
- version=constants.MIN_SHARE_GROUP_MICROVERSION,
+ version=version,
)
snapshot_members = sg_snapshot['members']
new_share_group = self.create_share_group(
cleanup_in_class=False,
source_share_group_snapshot_id=self.sg_snapshot['id'],
- version=constants.MIN_SHARE_GROUP_MICROVERSION,
+ version=version,
share_group_type_id=self.share_group_type_id,
)
new_share_group = self.shares_v2_client.get_share_group(
new_share_group['id'],
- version=constants.MIN_SHARE_GROUP_MICROVERSION,
+ version=version,
)
# Verify that share_network information matches source share group
@@ -314,8 +331,7 @@
new_shares = self.shares_v2_client.list_shares(
params={'share_group_id': new_share_group['id']},
detailed=True,
- version=constants.MIN_SHARE_GROUP_MICROVERSION,
- experimental=True,
+ version=version,
)
# Verify each new share is available
@@ -343,6 +359,7 @@
share['share_network_id'])
+@ddt.ddt
class ShareGroupRenameTest(base.BaseSharesMixedTest):
@classmethod
@@ -376,13 +393,25 @@
share_type_ids=[cls.share_type_id]
)
+ def _rollback_share_group_update(self, version):
+ self.shares_v2_client.update_share_group(
+ self.share_group["id"],
+ name=self.share_group_name,
+ description=self.share_group_desc,
+ version=version,
+ )
+
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
- def test_update_share_group_min(self):
+ @ddt.data(
+ *set([constants.MIN_SHARE_GROUP_MICROVERSION,
+ constants.SHARE_GROUPS_GRADUATION_VERSION, LATEST_MICROVERSION]))
+ def test_update_share_group(self, version):
+ self.skip_if_microversion_not_supported(version)
# Get share_group
share_group = self.shares_v2_client.get_share_group(
self.share_group['id'],
- version=constants.MIN_SHARE_GROUP_MICROVERSION
+ version=version
)
self.assertEqual(self.share_group_name, share_group["name"])
self.assertEqual(self.share_group_desc, share_group["description"])
@@ -394,7 +423,7 @@
share_group["id"],
name=new_name,
description=new_desc,
- version=constants.MIN_SHARE_GROUP_MICROVERSION,
+ version=version,
)
self.assertEqual(new_name, updated["name"])
self.assertEqual(new_desc, updated["description"])
@@ -402,13 +431,22 @@
# Get share_group
share_group = self.shares_v2_client.get_share_group(
self.share_group['id'],
- version=constants.MIN_SHARE_GROUP_MICROVERSION,
+ version=version,
)
self.assertEqual(new_name, share_group["name"])
self.assertEqual(new_desc, share_group["description"])
+ # Rollback the update since this is a ddt and the class resources are
+ # going to be reused
+ self._rollback_share_group_update(version)
+
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
- def test_create_update_read_share_group_with_unicode_min(self):
+ @ddt.data(
+ *set([constants.MIN_SHARE_GROUP_MICROVERSION,
+ constants.SHARE_GROUPS_GRADUATION_VERSION, LATEST_MICROVERSION]))
+ def test_create_update_read_share_group_with_unicode(self, version):
+ self.skip_if_microversion_not_supported(version)
+
value1 = u'ಠ_ಠ'
value2 = u'ಠ_ರೃ'
@@ -417,7 +455,7 @@
cleanup_in_class=False,
name=value1,
description=value1,
- version=constants.MIN_SHARE_GROUP_MICROVERSION,
+ version=version,
share_group_type_id=self.share_group_type_id,
share_type_ids=[self.share_type_id]
)
@@ -429,13 +467,17 @@
share_group["id"],
name=value2,
description=value2,
- version=constants.MIN_SHARE_GROUP_MICROVERSION,
+ version=version,
)
self.assertEqual(value2, updated["name"])
self.assertEqual(value2, updated["description"])
# Get share group
share_group = self.shares_v2_client.get_share_group(
- share_group['id'], version=constants.MIN_SHARE_GROUP_MICROVERSION)
+ share_group['id'], version=version)
self.assertEqual(value2, share_group["name"])
self.assertEqual(value2, share_group["description"])
+
+ # Rollback the update since this is a ddt and the class resources are
+ # going to be reused
+ self._rollback_share_group_update(version)
diff --git a/manila_tempest_tests/tests/api/test_share_groups.py b/manila_tempest_tests/tests/api/test_share_groups.py
index 330c40e..5b2ae9f 100644
--- a/manila_tempest_tests/tests/api/test_share_groups.py
+++ b/manila_tempest_tests/tests/api/test_share_groups.py
@@ -72,8 +72,7 @@
share_type_id=self.share_type_id,
share_group_id=share_group['id'],
cleanup_in_class=False,
- version=constants.MIN_SHARE_GROUP_MICROVERSION,
- experimental=True)
+ version=constants.MIN_SHARE_GROUP_MICROVERSION)
# Delete
params = {"share_group_id": share_group['id']}
@@ -163,7 +162,7 @@
new_shares = self.shares_v2_client.list_shares(
params={'share_group_id': new_share_group['id']},
- version=constants.MIN_SHARE_GROUP_MICROVERSION, experimental=True)
+ version=constants.MIN_SHARE_GROUP_MICROVERSION)
self.assertEmpty(
new_shares, 'Expected 0 new shares, got %s' % len(new_shares))
@@ -236,7 +235,6 @@
'share_group_id': share_group['id'],
'version': '2.33',
'cleanup_in_class': False,
- 'experimental': True,
}
if where_specify_az == 'sg_and_share':
s_kwargs['availability_zone'] = azs[0]
diff --git a/manila_tempest_tests/tests/api/test_share_groups_negative.py b/manila_tempest_tests/tests/api/test_share_groups_negative.py
index 60e7b18..a0a6b4b 100644
--- a/manila_tempest_tests/tests/api/test_share_groups_negative.py
+++ b/manila_tempest_tests/tests/api/test_share_groups_negative.py
@@ -67,7 +67,6 @@
size=cls.share_size,
share_type_id=cls.share_type_id,
share_group_id=cls.share_group['id'],
- experimental=True,
)
# Create a share group snapshot of the share group
cls.sg_snap_name = data_utils.rand_name("tempest-sg-snap-name")
diff --git a/manila_tempest_tests/tests/api/test_shares.py b/manila_tempest_tests/tests/api/test_shares.py
index 786648e..49af8e2 100644
--- a/manila_tempest_tests/tests/api/test_shares.py
+++ b/manila_tempest_tests/tests/api/test_shares.py
@@ -104,6 +104,12 @@
detailed_elements.add('create_share_from_snapshot_support')
self.assertTrue(detailed_elements.issubset(share.keys()), msg)
+ # In v 2.54 and beyond, we expect key 'progress' in the share data
+ # returned by the share create API.
+ if utils.is_microversion_supported('2.54'):
+ detailed_elements.add('progress')
+ self.assertTrue(detailed_elements.issubset(share.keys()), msg)
+
# Delete share
self.shares_v2_client.delete_share(share['id'])
self.shares_v2_client.wait_for_resource_deletion(share_id=share['id'])
diff --git a/manila_tempest_tests/tests/api/test_shares_from_snapshot_across_pools.py b/manila_tempest_tests/tests/api/test_shares_from_snapshot_across_pools.py
new file mode 100644
index 0000000..ce7f46d
--- /dev/null
+++ b/manila_tempest_tests/tests/api/test_shares_from_snapshot_across_pools.py
@@ -0,0 +1,167 @@
+# Copyright 2020 NetApp Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from collections import defaultdict
+
+from tempest import config
+from testtools import testcase as tc
+
+from manila_tempest_tests.tests.api import base
+from manila_tempest_tests import utils
+
+CONF = config.CONF
+
+
+class SharesFromSnapshotAcrossPools(base.BaseSharesMixedTest):
+ """Test class for share creation from a snapshot across pools."""
+
+ @classmethod
+ def resource_setup(cls):
+ super(SharesFromSnapshotAcrossPools, cls).resource_setup()
+ # create share_type
+ extra_specs = {"create_share_from_snapshot_support": True,
+ "snapshot_support": True}
+ cls.share_type = cls._create_share_type(extra_specs)
+ cls.share_type_id = cls.share_type['id']
+ cls.admin_client = cls.admin_shares_v2_client
+ cls.pools = cls.get_pools_matching_share_type(cls.share_type,
+ client=cls.admin_client)
+ if len(cls.pools) < 2:
+ msg = ("Could not find the necessary pools. At least two "
+ "compatibles pools are needed to run the tests to create "
+ "share from snapshot across pools.")
+ raise cls.skipException(msg)
+
+ # Availability zones grouped by 'replication_domain'
+ cls.rep_domain_azs = defaultdict(set)
+ for pool in cls.pools:
+ backend = pool['name'].split("#")[0]
+ rep_domain = pool['capabilities'].get('replication_domain')
+
+ if rep_domain is not None:
+ # Update pools with the availability zone
+ pool['availability_zone'] = (
+ cls.get_availability_zones(backends=[backend])[0])
+ cls.rep_domain_azs[rep_domain].add(pool['availability_zone'])
+
+ @classmethod
+ def skip_checks(cls):
+ super(SharesFromSnapshotAcrossPools, cls).skip_checks()
+ if not CONF.share.capability_create_share_from_snapshot_support:
+ raise cls.skipException(
+ 'Create share from snapshot tests are disabled.')
+ if (not CONF.share
+ .run_create_share_from_snapshot_in_another_pool_or_az_tests):
+ raise cls.skipException(
+ 'Create share from snapshot in another pool or az tests are '
+ 'disabled.')
+ utils.check_skip_if_microversion_lt("2.54")
+
+ @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
+ def test_create_share_from_snapshot_across_pools_within_backend(self):
+ backends = [pool['backend'] for pool in self.pools]
+ duplicated_backend_names = [x for n, x in enumerate(backends)
+ if x in backends[:n]]
+ if not duplicated_backend_names:
+ msg = ("Could not find the necessary pools. At least two pools in"
+ " the same backend are needed to run the tests to create"
+ " share from snapshot in another pool in the same backend.")
+ raise self.skipException(msg)
+
+ # This filter will return the pool_names of the first duplicated
+ # backend
+ pool_names = [x['pool'] for x in filter(
+ lambda x: x['backend'] == duplicated_backend_names[0], self.pools)]
+
+ # Creating share type setting up the pool_name and backend_name
+ extra_specs = {"pool_name": pool_names[0]}
+ self.admin_client.update_share_type_extra_specs(
+ self.share_type['id'], extra_specs)
+ share_type_a_get = self.admin_client.get_share_type(
+ self.share_type['id'])
+
+ self.addCleanup(
+ self.admin_shares_v2_client.delete_share_type_extra_spec,
+ self.share_type['id'], 'pool_name')
+
+ # Create source share
+ share_a = self.create_share(
+ share_type_id=share_type_a_get["share_type"]["id"])
+
+ # Retrieving the share using admin client because the shares's host
+ # field is necessary to do the assert
+ share_get_a = self.admin_client.get_share(share_a["id"])
+
+ # Create snapshot from source share
+ snap = self.create_snapshot_wait_for_active(share_get_a["id"])
+
+ # There's really no other way of deterministically ensuring a snapshot
+ # can be cloned in a different pool, because the scheduler will ensure
+ # it finds the best pool with knowledge that make senses at that point
+ # in time. Force the creation in another pool using the same share type
+ self.admin_client.update_share_type_extra_spec(
+ self.share_type['id'], "pool_name", pool_names[1])
+
+ # Create share from snapshot another pool
+ share_b = self.create_share(snapshot_id=snap["id"])
+
+ # Retrieving the share using admin client because the shares's host
+ # field is necessary to do the assert
+ share_get_b = self.admin_client.get_share(share_b['id'])
+
+ # Verify share created from snapshot
+ msg = ("Expected snapshot_id %s as "
+ "source of share %s" % (snap["id"], share_get_b["snapshot_id"]))
+ self.assertEqual(share_get_b["snapshot_id"], snap["id"], msg)
+
+ # Verify different pools
+ pool_name_a = share_get_a["host"].split("#")[1]
+ pool_name_b = share_get_b["host"].split("#")[1]
+ msg = ("The snapshot clone share was created on the same pool as the"
+ " source share %s" % pool_name_a)
+ self.assertNotEqual(pool_name_a, pool_name_b, msg)
+
+ @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
+ def test_share_from_snapshot_across_azs(self):
+ azs = next(self.rep_domain_azs[rep] for rep in self.rep_domain_azs if
+ len(self.rep_domain_azs[rep]) > 1)
+ if azs is None:
+ msg = ("Could not find the necessary azs. At least two azs "
+ "are needed to run the test to create share from snapshot "
+ "across azs.")
+ raise self.skipException(msg)
+ azs = list(azs)
+ share_a = self.create_share(share_type_id=self.share_type_id,
+ is_public=True,
+ availability_zone=azs[0])
+
+ # Create snapshot
+ snap = self.create_snapshot_wait_for_active(share_a["id"])
+
+ # Create share from snapshot
+ share_b = self.create_share(availability_zone=azs[1],
+ snapshot_id=snap["id"])
+
+ # Verify share created from snapshot
+ msg = ("Expected snapshot_id %s as "
+ "source of share: %s" % (snap["id"], share_b["snapshot_id"]))
+ self.assertEqual(share_b["snapshot_id"], snap["id"], msg)
+
+ # Verify different azs
+ msg = ("The snapshot clone share was created on the same AZ as the"
+ " source share %s" % share_a["availability_zone"])
+ self.assertNotEqual(share_b["availability_zone"],
+ share_a["availability_zone"],
+ msg)
diff --git a/manila_tempest_tests/tests/scenario/manager.py b/manila_tempest_tests/tests/scenario/manager.py
index e767b02..dede47a 100644
--- a/manila_tempest_tests/tests/scenario/manager.py
+++ b/manila_tempest_tests/tests/scenario/manager.py
@@ -330,31 +330,19 @@
return image['id']
def glance_image_create(self):
- img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
- aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
- ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
- ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
+ img_path = CONF.scenario.img_file
img_container_format = CONF.scenario.img_container_format
img_disk_format = CONF.scenario.img_disk_format
img_properties = CONF.scenario.img_properties
LOG.debug("paths: img: %s, container_format: %s, disk_format: %s, "
- "properties: %s, ami: %s, ari: %s, aki: %s",
+ "properties: %s",
img_path, img_container_format, img_disk_format,
- img_properties, ami_img_path, ari_img_path, aki_img_path)
- try:
- image = self._image_create('scenario-img',
- img_container_format,
- img_path,
- disk_format=img_disk_format,
- properties=img_properties)
- except IOError:
- LOG.debug("A qcow2 image was not found. Try to get a uec image.")
- kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
- ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
- properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk}
- image = self._image_create('scenario-ami', 'ami',
- path=ami_img_path,
- properties=properties)
+ img_properties)
+ image = self._image_create('scenario-img',
+ img_container_format,
+ img_path,
+ disk_format=img_disk_format,
+ properties=img_properties)
LOG.debug("image:%s", image)
return image
@@ -693,28 +681,36 @@
return subnet
def _get_server_port_id_and_ip4(self, server, ip_addr=None):
- ports = self.os_admin.ports_client.list_ports(
- device_id=server['id'], fixed_ip=ip_addr)['ports']
+ if ip_addr:
+ ports = self.os_admin.ports_client.list_ports(
+ device_id=server['id'],
+ fixed_ips='ip_address=%s' % ip_addr)['ports']
+ else:
+ ports = self.os_admin.ports_client.list_ports(
+ device_id=server['id'])['ports']
# A port can have more than one IP address in some cases.
# If the network is dual-stack (IPv4 + IPv6), this port is associated
# with 2 subnets
- p_status = ['ACTIVE']
- # NOTE(vsaienko) With Ironic, instances live on separate hardware
- # servers. Neutron does not bind ports for Ironic instances, as a
- # result the port remains in the DOWN state.
- # TODO(vsaienko) remove once bug: #1599836 is resolved.
- if getattr(CONF.service_available, 'ironic', False):
- p_status.append('DOWN')
+
+ def _is_active(port):
+ # NOTE(vsaienko) With Ironic, instances live on separate hardware
+ # servers. Neutron does not bind ports for Ironic instances, as a
+ # result the port remains in the DOWN state. This has been fixed
+ # with the introduction of the networking-baremetal plugin but
+ # it's not mandatory (and is not used on all stable branches).
+ return (port['status'] == 'ACTIVE' or
+ port.get('binding:vnic_type') == 'baremetal')
+
port_map = [(p["id"], fxip["ip_address"])
for p in ports
for fxip in p["fixed_ips"]
- if netutils.is_valid_ipv4(fxip["ip_address"])
- and p['status'] in p_status]
+ if (netutils.is_valid_ipv4(fxip["ip_address"]) and
+ _is_active(p))]
inactive = [p for p in ports if p['status'] != 'ACTIVE']
if inactive:
LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
- self.assertNotEqual(0, len(port_map),
+ self.assertNotEmpty(port_map,
"No IPv4 addresses found in: %s" % ports)
self.assertEqual(len(port_map), 1,
"Found multiple IPv4 addresses: %s. "
diff --git a/manila_tempest_tests/tests/scenario/manager_share.py b/manila_tempest_tests/tests/scenario/manager_share.py
index c4cf027..eaffa35 100644
--- a/manila_tempest_tests/tests/scenario/manager_share.py
+++ b/manila_tempest_tests/tests/scenario/manager_share.py
@@ -26,7 +26,6 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions
from tempfile import mkstemp
@@ -61,18 +60,14 @@
super(ShareScenarioTest, cls).skip_checks()
if not CONF.service_available.manila:
raise cls.skipException("Manila support is required")
+ if cls.ip_version == 6 and not CONF.share.run_ipv6_tests:
+ raise cls.skipException("IPv6 tests are disabled")
+ if cls.protocol not in CONF.share.enable_protocols:
+ message = "%s tests are disabled" % cls.protocol
+ raise cls.skipException(message)
def setUp(self):
base.verify_test_has_appropriate_tags(self)
- if self.ipv6_enabled and not CONF.share.run_ipv6_tests:
- raise self.skipException("IPv6 tests are disabled")
- if self.protocol not in CONF.share.enable_protocols:
- message = "%s tests are disabled" % self.protocol
- raise self.skipException(message)
- if self.protocol not in CONF.share.enable_ip_rules_for_protocols:
- message = ("%s tests for access rules other than IP are disabled" %
- self.protocol)
- raise self.skipException(message)
super(ShareScenarioTest, self).setUp()
self.image_id = None
@@ -166,10 +161,9 @@
server_ip = self._get_ipv6_server_ip(instance)
if not server_ip:
# Obtain a floating IP
- floating_ip = (
- self.compute_floating_ips_client.create_floating_ip()
- ['floating_ip'])
+ floating_ip = self.create_floating_ip(instance)
self.floating_ips[instance['id']] = floating_ip
+ server_ip = floating_ip['floating_ip_address']
if self.storage_network:
storage_net_nic = instance['addresses'].get(
@@ -178,15 +172,10 @@
self.storage_network_nic_ips[instance['id']] = (
storage_net_nic[0]['addr']
)
-
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.compute_floating_ips_client.delete_floating_ip,
- floating_ip['id'])
# Attach a floating IP
self.compute_floating_ips_client.associate_floating_ip_to_server(
- floating_ip['ip'], instance['id'])
- server_ip = floating_ip['ip']
+ floating_ip['floating_ip_address'], instance['id'])
+
self.assertIsNotNone(server_ip)
# Check ssh
remote_client = self.get_remote_client(
@@ -341,10 +330,12 @@
if self.ipv6_enabled and not self.storage_network:
server_ip = self._get_ipv6_server_ip(instance)
else:
- server_ip = (CONF.share.override_ip_for_nfs_access or
- self.storage_network_nic_ips.get(
- instance['id']) or
- self.floating_ips[instance['id']]['ip'])
+ server_ip = (
+ CONF.share.override_ip_for_nfs_access
+ or self.storage_network_nic_ips.get(instance['id'])
+ or self.floating_ips[instance['id']]['floating_ip_address']
+ )
+
self.assertIsNotNone(server_ip)
return self.allow_access_ip(
share['id'], ip=server_ip,
@@ -381,6 +372,13 @@
locations = [x['path'] for x in exports]
return locations
+ def _get_snapshot_export_locations(self, snapshot):
+ exports = (self.shares_v2_client.
+ list_snapshot_export_locations(snapshot['id']))
+ locations = [x['path'] for x in exports]
+
+ return locations
+
def _get_ipv6_server_ip(self, instance):
ipv6_addrs = []
for network_name, nic_list in instance['addresses'].items():
@@ -583,3 +581,51 @@
LOG.info('Creating Glance image using the downloaded image file')
return self._image_create('centos', 'bare', imagepath, 'qcow2')
+
+ def get_share_export_location_for_mount(self, share):
+ exports = self.get_user_export_locations(
+ share=share,
+ error_on_invalid_ip_version=True)
+ return exports[0]
+
+ def get_user_export_locations(self, share=None, snapshot=None,
+ error_on_invalid_ip_version=False):
+ locations = None
+ if share:
+ locations = self.get_share_export_locations(share)
+ elif snapshot:
+ locations = self._get_snapshot_export_locations(snapshot)
+
+ self.assertNotEmpty(locations)
+ locations = self._get_export_locations_according_to_ip_version(
+ locations, error_on_invalid_ip_version)
+ self.assertNotEmpty(locations)
+
+ return locations
+
+ def _get_export_locations_according_to_ip_version(
+ self, all_locations, error_on_invalid_ip_version):
+ locations = [
+ x for x in all_locations
+ if self.get_ip_and_version_from_export_location(
+ x)[1] == self.ip_version]
+
+ if len(locations) == 0 and not error_on_invalid_ip_version:
+ message = ("Configured backend does not support "
+ "ip_version %s" % self.ip_version)
+ raise self.skipException(message)
+ return locations
+
+ def get_ip_and_version_from_export_location(self, export):
+ export = export.replace('[', '').replace(']', '')
+ if self.protocol == 'nfs' and ':/' in export:
+ ip = export.split(':/')[0]
+ version = 6 if ip.count(':') > 1 else 4
+ elif self.protocol == 'cifs' and export.startswith(r'\\'):
+ ip = export.split('\\')[2]
+ version = 6 if (ip.count(':') > 1 or
+ ip.endswith('ipv6-literal.net')) else 4
+ else:
+ message = ("Protocol %s is not supported" % self.protocol)
+ raise self.skipException(message)
+ return ip, version
diff --git a/manila_tempest_tests/tests/scenario/test_share_basic_ops.py b/manila_tempest_tests/tests/scenario/test_share_basic_ops.py
index 3ead5df..8a55b2e 100644
--- a/manila_tempest_tests/tests/scenario/test_share_basic_ops.py
+++ b/manila_tempest_tests/tests/scenario/test_share_basic_ops.py
@@ -44,19 +44,13 @@
* Terminate the instance
"""
- def get_ip_and_version_from_export_location(self, export):
- export = export.replace('[', '').replace(']', '')
- if self.protocol == 'nfs' and ':/' in export:
- ip = export.split(':/')[0]
- version = 6 if ip.count(':') > 1 else 4
- elif self.protocol == 'cifs' and export.startswith(r'\\'):
- ip = export.split('\\')[2]
- version = 6 if (ip.count(':') > 1 or
- ip.endswith('ipv6-literal.net')) else 4
- else:
- message = ("Protocol %s is not supported" % self.protocol)
- raise self.skipException(message)
- return ip, version
+ @classmethod
+ def skip_checks(cls):
+ super(ShareBasicOpsBase, cls).skip_checks()
+ if cls.protocol not in CONF.share.enable_ip_rules_for_protocols:
+ message = ("%s tests for access rules other than IP are disabled" %
+ cls.protocol)
+ raise cls.skipException(message)
def _ping_host_from_export_location(self, export, remote_client):
ip, version = self.get_ip_and_version_from_export_location(export)
@@ -65,46 +59,11 @@
else:
remote_client.exec_command("ping -c 5 %s" % ip)
- def _get_export_locations_according_to_ip_version(
- self, all_locations, error_on_invalid_ip_version):
- locations = [
- x for x in all_locations
- if self.get_ip_and_version_from_export_location(
- x)[1] == self.ip_version]
-
- if len(locations) == 0 and not error_on_invalid_ip_version:
- message = ("Configured backend does not support "
- "ip_version %s" % self.ip_version)
- raise self.skipException(message)
- return locations
-
- def _get_user_export_locations(self, share=None, snapshot=None,
- error_on_invalid_ip_version=False):
- locations = None
- if share:
- locations = self.get_share_export_locations(share)
- elif snapshot:
- locations = self._get_snapshot_export_locations(snapshot)
-
- self.assertNotEmpty(locations)
- locations = self._get_export_locations_according_to_ip_version(
- locations, error_on_invalid_ip_version)
- self.assertNotEmpty(locations)
-
- return locations
-
- def _get_snapshot_export_locations(self, snapshot):
- exports = (self.shares_v2_client.
- list_snapshot_export_locations(snapshot['id']))
- locations = [x['path'] for x in exports]
-
- return locations
-
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_mount_share_one_vm(self):
instance = self.boot_instance(wait_until="BUILD")
self.create_share()
- locations = self._get_user_export_locations(self.share)
+ locations = self.get_user_export_locations(self.share)
instance = self.wait_for_active_instance(instance["id"])
remote_client = self.init_remote_client(instance)
self.provide_access_to_auxiliary_instance(instance)
@@ -116,16 +75,11 @@
@tc.attr(base.TAG_NEGATIVE, base.TAG_BACKEND)
def test_write_with_ro_access(self):
'''Test if an instance with ro access can write on the share.'''
- if self.protocol.upper() == 'CIFS':
- msg = ("Skipped for CIFS protocol because RO access is not "
- "supported for shares by IP.")
- raise self.skipException(msg)
-
test_data = "Some test data to write"
instance = self.boot_instance(wait_until="BUILD")
self.create_share()
- location = self._get_user_export_locations(self.share)[0]
+ location = self.get_user_export_locations(self.share)[0]
instance = self.wait_for_active_instance(instance["id"])
remote_client_inst = self.init_remote_client(instance)
@@ -153,7 +107,7 @@
instance1 = self.boot_instance(wait_until="BUILD")
instance2 = self.boot_instance(wait_until="BUILD")
self.create_share()
- location = self._get_user_export_locations(self.share)[0]
+ location = self.get_user_export_locations(self.share)[0]
instance1 = self.wait_for_active_instance(instance1["id"])
instance2 = self.wait_for_active_instance(instance2["id"])
@@ -210,7 +164,7 @@
instance = self.boot_instance(wait_until="BUILD")
self.create_share()
- exports = self._get_user_export_locations(self.share)
+ exports = self.get_user_export_locations(self.share)
instance = self.wait_for_active_instance(instance["id"])
self.share = self.shares_admin_v2_client.get_share(self.share['id'])
@@ -266,7 +220,7 @@
self.share = self.migration_complete(self.share['id'], dest_pool)
- new_exports = self._get_user_export_locations(
+ new_exports = self.get_user_export_locations(
self.share, error_on_invalid_ip_version=True)
self.assertEqual(dest_pool, self.share['host'])
@@ -289,10 +243,6 @@
@testtools.skipUnless(
CONF.share.run_snapshot_tests, "Snapshot tests are disabled.")
def test_write_data_to_share_created_from_snapshot(self):
- if self.protocol.upper() == 'CIFS':
- msg = "Skipped for CIFS protocol because of bug/1649573"
- raise self.skipException(msg)
-
# 1 - Create UVM, ok, created
instance = self.boot_instance(wait_until="BUILD")
@@ -308,7 +258,7 @@
self.provide_access_to_auxiliary_instance(instance, parent_share)
# 5 - Try mount S1 to UVM, ok, mounted
- user_export_location = self._get_user_export_locations(parent_share)[0]
+ user_export_location = self.get_user_export_locations(parent_share)[0]
parent_share_dir = "/mnt/parent"
remote_client.exec_command("sudo mkdir -p %s" % parent_share_dir)
@@ -330,7 +280,7 @@
# 10 - Try mount S2 - fail, access denied. We test that child share
# did not get access rules from parent share.
- user_export_location = self._get_user_export_locations(child_share)[0]
+ user_export_location = self.get_user_export_locations(child_share)[0]
child_share_dir = "/mnt/child"
remote_client.exec_command("sudo mkdir -p %s" % child_share_dir)
@@ -377,10 +327,6 @@
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_read_mountable_snapshot(self):
- if self.protocol.upper() == 'CIFS':
- msg = "Skipped for CIFS protocol because of bug/1649573"
- raise self.skipException(msg)
-
# 1 - Create UVM, ok, created
instance = self.boot_instance(wait_until="BUILD")
@@ -396,7 +342,7 @@
self.provide_access_to_auxiliary_instance(instance, parent_share)
# 5 - Try mount S1 to UVM, ok, mounted
- user_export_location = self._get_user_export_locations(parent_share)[0]
+ user_export_location = self.get_user_export_locations(parent_share)[0]
parent_share_dir = "/mnt/parent"
snapshot_dir = "/mnt/snapshot_dir"
remote_client.exec_command("sudo mkdir -p %s" % parent_share_dir)
@@ -419,7 +365,7 @@
self.provide_access_to_auxiliary_instance(instance, snapshot=snapshot)
# 10 - Mount SS1
- user_export_location = self._get_user_export_locations(
+ user_export_location = self.get_user_export_locations(
snapshot=snapshot)[0]
self.mount_share(user_export_location, remote_client, snapshot_dir)
self.addCleanup(self.unmount_share, remote_client, snapshot_dir)
@@ -463,6 +409,22 @@
"sudo mount.cifs \"%s\" %s -o guest" % (location, target_dir)
)
+ @tc.attr(base.TAG_NEGATIVE, base.TAG_BACKEND)
+ def test_write_with_ro_access(self):
+ msg = ("Skipped for CIFS protocol because RO access is not "
+ "supported for shares by IP.")
+ raise self.skipException(msg)
+
+ @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
+ def test_read_mountable_snapshot(self):
+ msg = "Skipped for CIFS protocol because of bug/1649573"
+ raise self.skipException(msg)
+
+ @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
+ def test_write_data_to_share_created_from_snapshot(self):
+ msg = "Skipped for CIFS protocol because of bug/1649573"
+ raise self.skipException(msg)
+
class TestShareBasicOpsNFSIPv6(TestShareBasicOpsNFS):
ip_version = 6
diff --git a/manila_tempest_tests/tests/scenario/test_share_extend.py b/manila_tempest_tests/tests/scenario/test_share_extend.py
index 446ec25..7827b75 100644
--- a/manila_tempest_tests/tests/scenario/test_share_extend.py
+++ b/manila_tempest_tests/tests/scenario/test_share_extend.py
@@ -45,6 +45,14 @@
* Terminate the instance
"""
+ @classmethod
+ def skip_checks(cls):
+ super(ShareExtendBase, cls).skip_checks()
+ if cls.protocol not in CONF.share.enable_ip_rules_for_protocols:
+ message = ("%s tests for access rules other than IP are disabled" %
+ cls.protocol)
+ raise cls.skipException(message)
+
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_create_extend_and_write(self):
default_share_size = CONF.share.share_size
@@ -63,10 +71,9 @@
LOG.debug('Step 4 - grant access')
self.provide_access_to_auxiliary_instance(instance, share=share)
- locations = self.get_share_export_locations(share)
-
LOG.debug('Step 5 - mount')
- self.mount_share(locations[0], remote_client)
+ location = self.get_share_export_location_for_mount(share)
+ self.mount_share(location, remote_client)
total_blocks = (units.Ki * default_share_size) / 64
three_quarter_blocks = (total_blocks / 4) * 3
@@ -99,7 +106,7 @@
self.assertEqual(extended_share_size, int(share["size"]))
LOG.debug('Step 8 - writing more data, should succeed')
- self.write_data_with_remount(locations[0], remote_client, '/mnt/t3',
+ self.write_data_with_remount(location, remote_client, '/mnt/t3',
'64M', over_one_quarter_blocks)
ls_result = remote_client.exec_command("sudo ls -lAh /mnt/")
LOG.debug(ls_result)
diff --git a/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py b/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py
index 8e93335..d58378c 100644
--- a/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py
+++ b/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py
@@ -51,6 +51,14 @@
* Terminate the instance
"""
+ @classmethod
+ def skip_checks(cls):
+ super(ShareManageUnmanageBase, cls).skip_checks()
+ if cls.protocol not in CONF.share.enable_ip_rules_for_protocols:
+ message = ("%s tests for access rules other than IP are disabled" %
+ cls.protocol)
+ raise cls.skipException(message)
+
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@testtools.skipUnless(
CONF.share.run_manage_unmanage_tests,
diff --git a/manila_tempest_tests/tests/scenario/test_share_shrink.py b/manila_tempest_tests/tests/scenario/test_share_shrink.py
index 4d232a8..e8ffdcb 100644
--- a/manila_tempest_tests/tests/scenario/test_share_shrink.py
+++ b/manila_tempest_tests/tests/scenario/test_share_shrink.py
@@ -46,6 +46,14 @@
* Terminate the instance
"""
+ @classmethod
+ def skip_checks(cls):
+ super(ShareShrinkBase, cls).skip_checks()
+ if cls.protocol not in CONF.share.enable_ip_rules_for_protocols:
+ message = ("%s tests for access rules other than IP are disabled" %
+ cls.protocol)
+ raise cls.skipException(message)
+
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@testtools.skipUnless(
CONF.share.run_shrink_tests, 'Shrink share tests are disabled.')
@@ -66,10 +74,9 @@
LOG.debug('Step 4 - grant access')
self.provide_access_to_auxiliary_instance(instance)
- locations = self.get_share_export_locations(share)
-
LOG.debug('Step 5 - mount')
- self.mount_share(locations[0], remote_client)
+ location = self.get_share_export_location_for_mount(share)
+ self.mount_share(location, remote_client)
total_blocks = (1024 * default_share_size) / 64
blocks = total_blocks + 4
diff --git a/manila_tempest_tests/utils.py b/manila_tempest_tests/utils.py
index 2ea623d..e842a33 100644
--- a/manila_tempest_tests/utils.py
+++ b/manila_tempest_tests/utils.py
@@ -23,6 +23,8 @@
CONF = config.CONF
SHARE_NETWORK_SUBNETS_MICROVERSION = '2.51'
+SHARE_REPLICA_QUOTAS_MICROVERSION = "2.53"
+EXPERIMENTAL = {'X-OpenStack-Manila-API-Experimental': 'True'}
def get_microversion_as_tuple(microversion_str):
@@ -202,7 +204,20 @@
return is_microversion_supported(SHARE_NETWORK_SUBNETS_MICROVERSION)
+def share_replica_quotas_are_supported():
+ return is_microversion_supported(SHARE_REPLICA_QUOTAS_MICROVERSION)
+
+
def share_network_get_default_subnet(share_network):
return next((
subnet for subnet in share_network.get('share_network_subnets', [])
if subnet['availability_zone'] is None), None)
+
+
+def get_extra_headers(request_version, graduation_version):
+ headers = None
+ extra_headers = False
+ if is_microversion_lt(request_version, graduation_version):
+ headers = EXPERIMENTAL
+ extra_headers = True
+ return headers, extra_headers
diff --git a/releasenotes/notes/bug-1848278-a37290750e6ac248.yaml b/releasenotes/notes/bug-1848278-a37290750e6ac248.yaml
new file mode 100644
index 0000000..f0ce109
--- /dev/null
+++ b/releasenotes/notes/bug-1848278-a37290750e6ac248.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Instead of using the first export location, use the first export location
+ matching the address family (IPv4 or IPv6) matching that to be used for the
+ mount.
diff --git a/setup.cfg b/setup.cfg
index 1bbfef0..9da84c3 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -14,36 +14,14 @@
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3
- Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.5
+ Programming Language :: Python :: 3.6
+ Programming Language :: Python :: 3.7
[files]
packages =
manila_tempest_tests
-[build_sphinx]
-all-files = 1
-warning-is-error = 1
-source-dir = doc/source
-build-dir = doc/build
-
-[upload_sphinx]
-upload-dir = doc/build/html
-
-[compile_catalog]
-directory = manila_tempest_tests/locale
-domain = manila_tempest_tests
-
-[update_catalog]
-domain = manila_tempest_tests
-output_dir = manila_tempest_tests/locale
-input_file = manila_tempest_tests/locale/manila-tempest-plugin.pot
-
-[extract_messages]
-keywords = _ gettext ngettext l_ lazy_gettext
-mapping_file = babel.cfg
-output_file = manila_tempest_tests/locale/manila-tempest-plugin.pot
-
[entry_points]
tempest.test_plugins =
manila_tests = manila_tempest_tests.plugin:ManilaTempestPlugin
diff --git a/test-requirements.txt b/test-requirements.txt
index 6a6ab89..e3eead6 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -10,6 +10,6 @@
oslotest>=3.2.0 # Apache-2.0
stestr>=1.0.0 # Apache-2.0
testtools>=2.2.0 # MIT
-openstackdocstheme>=1.18.1 # Apache-2.0
+openstackdocstheme>=1.31.2 # Apache-2.0
# releasenotes
reno>=2.5.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index 59764db..a185fab 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,15 +6,17 @@
[testenv]
basepython = python3
usedevelop = True
-install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} {opts} {packages}
setenv =
VIRTUAL_ENV={envdir}
PYTHONWARNINGS=default::DeprecationWarning
OS_STDOUT_CAPTURE=1
OS_STDERR_CAPTURE=1
OS_TEST_TIMEOUT=60
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
+deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -r{toxinidir}/requirements.txt
+ -r{toxinidir}/test-requirements.txt
+
# commands = python setup.py test --slowest --testr-args='{posargs}'
[testenv:pep8]
@@ -24,7 +26,7 @@
commands = {posargs}
[testenv:docs]
-commands = python setup.py build_sphinx
+commands = sphinx-build -W -b html doc/source doc/build/html
[testenv:releasenotes]
commands =