Merge "Added test for Limit formatting routes when adding resources"
diff --git a/.gitignore b/.gitignore
index 59b35f5..9ab095e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -57,3 +57,6 @@
# Files created by releasenotes build
releasenotes/build
+
+# PyCharm IDE
+.idea/
diff --git a/manila_tempest_tests/common/constants.py b/manila_tempest_tests/common/constants.py
index 25cf4ee..c56712e 100644
--- a/manila_tempest_tests/common/constants.py
+++ b/manila_tempest_tests/common/constants.py
@@ -83,3 +83,5 @@
SHARE_GROUP_TYPE_REQUIRED_KEYS = {
'id', 'name', 'share_types', 'is_public', 'group_specs',
}
+
+MIN_SHARE_ACCESS_METADATA_MICROVERSION = '2.45'
diff --git a/manila_tempest_tests/config.py b/manila_tempest_tests/config.py
index 4e6c99b..c24a50e 100644
--- a/manila_tempest_tests/config.py
+++ b/manila_tempest_tests/config.py
@@ -30,7 +30,7 @@
help="The minimum api microversion is configured to be the "
"value of the minimum microversion supported by Manila."),
cfg.StrOpt("max_api_microversion",
- default="2.42",
+ default="2.46",
help="The maximum api microversion is configured to be the "
"value of the latest microversion supported by Manila."),
cfg.StrOpt("region",
diff --git a/manila_tempest_tests/services/share/v2/json/shares_client.py b/manila_tempest_tests/services/share/v2/json/shares_client.py
index 115aab3..522d467 100644
--- a/manila_tempest_tests/services/share/v2/json/shares_client.py
+++ b/manila_tempest_tests/services/share/v2/json/shares_client.py
@@ -731,7 +731,8 @@
def create_access_rule(self, share_id, access_type="ip",
access_to="0.0.0.0", access_level=None,
- version=LATEST_MICROVERSION, action_name=None):
+ version=LATEST_MICROVERSION, metadata=None,
+ action_name=None):
post_body = {
self._get_access_action_name(version, 'os-allow_access'): {
"access_type": access_type,
@@ -739,6 +740,8 @@
"access_level": access_level,
}
}
+ if metadata is not None:
+ post_body['allow_access']['metadata'] = metadata
body = json.dumps(post_body)
resp, body = self.post(
"shares/%s/action" % share_id, body, version=version,
@@ -747,10 +750,34 @@
return self._parse_resp(body)
def list_access_rules(self, share_id, version=LATEST_MICROVERSION,
- action_name=None):
- body = {self._get_access_action_name(version, 'os-access_list'): None}
- resp, body = self.post(
- "shares/%s/action" % share_id, json.dumps(body), version=version)
+ metadata=None, action_name=None):
+ if utils.is_microversion_lt(version, "2.45"):
+ body = {
+ self._get_access_action_name(version, 'os-access_list'): None
+ }
+ resp, body = self.post(
+ "shares/%s/action" % share_id, json.dumps(body),
+ version=version)
+ self.expected_success(200, resp.status)
+ else:
+ return self.list_access_rules_with_new_API(
+ share_id, metadata=metadata, version=version,
+ action_name=action_name)
+ return self._parse_resp(body)
+
+ def list_access_rules_with_new_API(self, share_id, metadata=None,
+ version=LATEST_MICROVERSION,
+ action_name=None):
+ metadata = metadata or {}
+ query_string = ''
+
+ params = sorted(
+ [(k, v) for (k, v) in list(metadata.items()) if v])
+ if params:
+ query_string = "&%s" % urlparse.urlencode(params)
+
+ url = 'share-access-rules?share_id=%s' % share_id + query_string
+ resp, body = self.get(url, version=version)
self.expected_success(200, resp.status)
return self._parse_resp(body)
@@ -767,6 +794,28 @@
self.expected_success(202, resp.status)
return body
+ def get_access(self, access_id, version=LATEST_MICROVERSION):
+ resp, body = self.get("share-access-rules/%s" % access_id,
+ version=version)
+ self.expected_success(200, resp.status)
+ return self._parse_resp(body)
+
+ def update_access_metadata(self, access_id, metadata,
+ version=LATEST_MICROVERSION):
+ url = 'share-access-rules/%s/metadata' % access_id
+ body = {"metadata": metadata}
+ resp, body = self.put(url, json.dumps(body), version=version)
+ self.expected_success(200, resp.status)
+ return self._parse_resp(body)
+
+ def delete_access_metadata(self, access_id, key,
+ version=LATEST_MICROVERSION):
+ url = "share-access-rules/%s/metadata/%s" % (access_id, key)
+ resp, body = self.delete(url, version=version)
+ self.expected_success(200, resp.status)
+ return body
+
+
###############
def list_availability_zones(self, url='availability-zones',
@@ -1734,3 +1783,54 @@
' the required time (%s s).' %
(resource_id, self.build_timeout))
raise exceptions.TimeoutException(message)
+
+###############
+
+ def create_security_service(self, ss_type="ldap",
+ version=LATEST_MICROVERSION, **kwargs):
+ """Creates Security Service.
+
+ :param ss_type: ldap, kerberos, active_directory
+ :param version: microversion string
+ :param kwargs: name, description, dns_ip, server, ou, domain, user,
+ :param kwargs: password
+ """
+ post_body = {"type": ss_type}
+ post_body.update(kwargs)
+ body = json.dumps({"security_service": post_body})
+ resp, body = self.post("security-services", body, version=version)
+ self.expected_success(200, resp.status)
+ return self._parse_resp(body)
+
+ def update_security_service(self, ss_id, version=LATEST_MICROVERSION,
+ **kwargs):
+ """Updates Security Service.
+
+ :param ss_id: id of security-service entity
+ :param version: microversion string
+ :param kwargs: dns_ip, server, ou, domain, user, password, name,
+ :param kwargs: description
+ :param kwargs: for 'active' status can be changed
+ :param kwargs: only 'name' and 'description' fields
+ """
+ body = json.dumps({"security_service": kwargs})
+ resp, body = self.put("security-services/%s" % ss_id, body,
+ version=version)
+ self.expected_success(200, resp.status)
+ return self._parse_resp(body)
+
+ def get_security_service(self, ss_id, version=LATEST_MICROVERSION):
+ resp, body = self.get("security-services/%s" % ss_id, version=version)
+ self.expected_success(200, resp.status)
+ return self._parse_resp(body)
+
+ def list_security_services(self, detailed=False, params=None,
+ version=LATEST_MICROVERSION):
+ uri = "security-services"
+ if detailed:
+ uri += '/detail'
+ if params:
+ uri += "?%s" % urlparse.urlencode(params)
+ resp, body = self.get(uri, version=version)
+ self.expected_success(200, resp.status)
+ return self._parse_resp(body)
diff --git a/manila_tempest_tests/tests/api/admin/test_quotas.py b/manila_tempest_tests/tests/api/admin/test_quotas.py
index 02d0a62..05a548f 100644
--- a/manila_tempest_tests/tests/api/admin/test_quotas.py
+++ b/manila_tempest_tests/tests/api/admin/test_quotas.py
@@ -564,7 +564,7 @@
self.assertEqual(-1, quotas.get('share_group_snapshots'))
@ddt.data(11, -1)
- @tc.attr(base.TAG_NEGATIVE, base.TAG_API)
+ @tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_update_user_quotas_bigger_than_project_quota(self, user_quota):
self.client.update_quotas(self.tenant_id, shares=10)
self.client.update_quotas(
@@ -572,7 +572,7 @@
shares=user_quota)
@ddt.data(11, -1)
- @tc.attr(base.TAG_NEGATIVE, base.TAG_API)
+ @tc.attr(base.TAG_POSITIVE, base.TAG_API)
@base.skip_if_microversion_lt("2.39")
def test_update_share_type_quotas_bigger_than_project_quota(self, st_q):
share_type = self._create_share_type()
@@ -582,7 +582,7 @@
self.tenant_id, share_type=share_type['name'], force=True,
shares=st_q)
- @tc.attr(base.TAG_NEGATIVE, base.TAG_API)
+ @tc.attr(base.TAG_POSITIVE, base.TAG_API)
@base.skip_if_microversion_lt("2.39")
def test_set_share_type_quota_bigger_than_users_quota(self):
share_type = self._create_share_type()
diff --git a/manila_tempest_tests/tests/api/admin/test_share_group_types.py b/manila_tempest_tests/tests/api/admin/test_share_group_types.py
index c0c9a97..c08448b 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_group_types.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_group_types.py
@@ -21,9 +21,12 @@
from manila_tempest_tests.common import constants
from manila_tempest_tests.tests.api import base
+from manila_tempest_tests import utils
CONF = config.CONF
+LATEST_MICROVERSION = CONF.share.max_api_microversion
+
@testtools.skipUnless(
CONF.share.run_share_group_tests, 'Share Group tests disabled.')
@@ -243,3 +246,42 @@
# List projects that have access for share group type - none expected
access = self.shares_v2_client.list_access_to_share_group_type(sgt_id)
self.assertEmpty(access)
+
+ @tc.attr(base.TAG_POSITIVE, base.TAG_API)
+ @ddt.data(*set(('2.45', '2.46', LATEST_MICROVERSION)))
+ def test_share_group_type_create_show_list_with_is_default_key(self,
+ version):
+ self.skip_if_microversion_not_supported(version)
+ name = data_utils.rand_name("tempest-manila")
+
+ # Create share group type
+ sg_type_c = self.create_share_group_type(
+ name=name,
+ share_types=self.share_type['id'],
+ cleanup_in_class=False,
+ version=version)
+ if utils.is_microversion_ge(version, '2.46'):
+ self.assertIn('is_default', sg_type_c)
+ self.assertIs(False, sg_type_c['is_default'])
+ else:
+ self.assertNotIn('is_default', sg_type_c)
+
+ # List share group type
+ sg_type_list = self.shares_v2_client.list_share_group_types(
+ version=version)
+ for sg_type_get in sg_type_list:
+ if utils.is_microversion_ge(version, '2.46'):
+ self.assertIn('is_default', sg_type_get)
+ self.assertTrue(sg_type_get['is_default'] in (True, False))
+ else:
+ self.assertNotIn('is_default', sg_type_get)
+
+ # Show share group type
+ sg_type_id = sg_type_c['id']
+ sg_type_show = self.shares_v2_client.get_share_group_type(
+ sg_type_id, version=version)
+ if utils.is_microversion_ge(version, '2.46'):
+ self.assertIn('is_default', sg_type_show)
+ self.assertIs(False, sg_type_show['is_default'])
+ else:
+ self.assertNotIn('is_default', sg_type_show)
diff --git a/manila_tempest_tests/tests/api/admin/test_share_types.py b/manila_tempest_tests/tests/api/admin/test_share_types.py
index dfa9fe7..f06c70d 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_types.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_types.py
@@ -24,6 +24,8 @@
CONF = config.CONF
+LATEST_MICROVERSION = CONF.share.max_api_microversion
+
@ddt.ddt
class ShareTypesAdminTest(base.BaseSharesAdminTest):
@@ -205,3 +207,43 @@
# List projects that have access for share type - none expected
access = self.shares_v2_client.list_access_to_share_type(st_id)
self.assertEmpty(access)
+
+ @tc.attr(base.TAG_POSITIVE, base.TAG_API)
+ @ddt.data(*set(('2.45', '2.46', LATEST_MICROVERSION)))
+ def test_share_type_create_show_list_with_is_default_key(self, version):
+ self.skip_if_microversion_not_supported(version)
+ name = data_utils.rand_name("tempest-manila")
+ extra_specs = self.add_extra_specs_to_dict()
+
+ # Create share type
+ st_create = self.create_share_type(
+ name, extra_specs=extra_specs, version=version)['share_type']
+
+ if utils.is_microversion_ge(version, '2.46'):
+ self.assertIn('is_default', st_create)
+ self.assertIs(False, st_create['is_default'])
+ else:
+ self.assertNotIn('is_default', st_create)
+
+ # list share types
+ st_list = self.shares_v2_client.list_share_types(version=version)
+ for st_get in st_list['share_types']:
+ if utils.is_microversion_ge(version, '2.46'):
+ self.assertIn('is_default', st_get)
+ if st_create['id'] == st_get['id']:
+ self.assertIs(False, st_get['is_default'])
+ else:
+ self.assertTrue(st_get['is_default'] in (True, False))
+ else:
+ self.assertNotIn('is_default', st_get)
+
+ # show share types
+ st_id = st_create['id']
+ st_show = self.shares_v2_client.get_share_type(
+ st_id, version=version)['share_type']
+
+ if utils.is_microversion_ge(version, '2.46'):
+ self.assertIn('is_default', st_show)
+ self.assertIs(False, st_show['is_default'])
+ else:
+ self.assertNotIn('is_default', st_show)
diff --git a/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py b/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py
index a93f737..8798212 100644
--- a/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py
+++ b/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py
@@ -136,7 +136,7 @@
2.16.
"""
# Skip in case specified version is not supported
- utils.skip_if_microversion_not_supported(version)
+ self.skip_if_microversion_not_supported(version)
snap_name = data_utils.rand_name("tempest-snapshot-name")
snap_desc = data_utils.rand_name("tempest-snapshot-description")
diff --git a/manila_tempest_tests/tests/api/base.py b/manila_tempest_tests/tests/api/base.py
index 401c089..759b723 100644
--- a/manila_tempest_tests/tests/api/base.py
+++ b/manila_tempest_tests/tests/api/base.py
@@ -954,7 +954,7 @@
return data
@classmethod
- def generate_security_service_data(self):
+ def generate_security_service_data(self, set_ou=False):
data = {
"name": data_utils.rand_name("ss-name"),
"description": data_utils.rand_name("ss-desc"),
@@ -964,6 +964,9 @@
"user": data_utils.rand_name("ss-user"),
"password": data_utils.rand_name("ss-password"),
}
+ if set_ou:
+ data["ou"] = data_utils.rand_name("ss-ou")
+
return data
# Useful assertions
diff --git a/manila_tempest_tests/tests/api/test_access_rules_metadata.py b/manila_tempest_tests/tests/api/test_access_rules_metadata.py
new file mode 100644
index 0000000..d3f25d7
--- /dev/null
+++ b/manila_tempest_tests/tests/api/test_access_rules_metadata.py
@@ -0,0 +1,133 @@
+# Copyright 2018 Huawei Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import ddt
+from tempest import config
+from testtools import testcase as tc
+
+from manila_tempest_tests.common import constants
+from manila_tempest_tests.tests.api import base
+from manila_tempest_tests import utils
+
+CONF = config.CONF
+
+
+@base.skip_if_microversion_lt(
+ constants.MIN_SHARE_ACCESS_METADATA_MICROVERSION)
+@ddt.ddt
+class AccessRulesMetadataTest(base.BaseSharesTest):
+
+ @classmethod
+ def resource_setup(cls):
+ super(AccessRulesMetadataTest, cls).resource_setup()
+ # The share access rule metadata doesn't care about the value of
+ # access type, access protocol, access_to, so we only get one of
+ # the value that the driver support.
+ if not (any(p in CONF.share.enable_ip_rules_for_protocols
+ for p in cls.protocols) or
+ any(p in CONF.share.enable_user_rules_for_protocols
+ for p in cls.protocols) or
+ any(p in CONF.share.enable_cert_rules_for_protocols
+ for p in cls.protocols) or
+ any(p in CONF.share.enable_cephx_rules_for_protocols
+ for p in cls.protocols)):
+ cls.message = "Rule tests are disabled"
+ raise cls.skipException(cls.message)
+ if CONF.share.enable_ip_rules_for_protocols:
+ cls.protocol = CONF.share.enable_ip_rules_for_protocols[0]
+ cls.access_type = "ip"
+ elif CONF.share.enable_user_rules_for_protocols:
+ cls.protocol = CONF.share.enable_user_rules_for_protocols[0]
+ cls.access_type = "user"
+ elif CONF.share.enable_cert_rules_for_protocols:
+ cls.protocol = CONF.share.enable_cert_rules_for_protocols[0]
+ cls.access_type = "cert"
+ elif CONF.share.enable_cephx_rules_for_protocols:
+ cls.protocol = CONF.share.enable_cephx_rules_for_protocols[0]
+ cls.access_type = "cephx"
+ cls.shares_v2_client.share_protocol = cls.protocol
+ int_range = range(20, 50)
+ cls.access_to = {
+ # list of unique values is required for ability to create lots
+ # of access rules for one share using different API microversions.
+ 'ip': set([utils.rand_ipv6_ip() for i in int_range]),
+ # following users are fakes and access rules that use it are
+ # expected to fail, but they are used only for API testing.
+ 'user': ['foo_user_%d' % i for i in int_range],
+ 'cert': ['tenant_%d.example.com' % i for i in int_range],
+ 'cephx': ['eve%d' % i for i in int_range],
+ }
+ cls.share = cls.create_share()
+ cls.md1 = {"key1": "value1", "key2": "value2"}
+ cls.access = cls.shares_v2_client.create_access_rule(
+ cls.share["id"], cls.access_type,
+ cls.access_to[cls.access_type].pop(), 'rw', metadata=cls.md1)
+
+ @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
+ def test_set_get_delete_access_metadata(self):
+ data = {"key1": "v" * 255, "k" * 255: "value2"}
+ # set metadata
+ access = self.shares_v2_client.create_access_rule(
+ self.share["id"], self.access_type,
+ self.access_to[self.access_type].pop(), 'rw', metadata=data)
+
+ # read metadata
+ get_access = self.shares_v2_client.get_access(access["id"])
+
+ # verify metadata
+ self.assertEqual(data, get_access['metadata'])
+
+ # delete metadata
+ for key in data.keys():
+ self.shares_v2_client.delete_access_metadata(access["id"], key)
+
+ # verify deletion of metadata
+ access_without_md = self.shares_v2_client.get_access(access["id"])
+ self.assertEqual({}, access_without_md['metadata'])
+ self.shares_v2_client.delete_access_rule(self.share["id"],
+ access["id"])
+ self.shares_v2_client.wait_for_resource_deletion(
+ rule_id=access["id"], share_id=self.share["id"])
+
+ @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
+ def test_update_metadata_by_key(self):
+ md2 = {"key7": "value7", "key2": "value6_new"}
+
+ # update metadata
+ self.shares_v2_client.update_access_metadata(
+ access_id=self.access['id'], metadata=md2)
+ # get metadata
+ get_access = self.shares_v2_client.get_access(self.access['id'])
+
+ # verify metadata
+ self.md1.update(md2)
+ self.assertEqual(self.md1, get_access['metadata'])
+
+ @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
+ def test_list_access_filter_by_metadata(self):
+ data = {"key3": "v3", "key4": "value4"}
+ # set metadata
+ access = self.shares_v2_client.create_access_rule(
+ self.share["id"], self.access_type,
+ self.access_to[self.access_type].pop(), 'rw', metadata=data)
+
+ # list metadata with metadata filter
+ list_access = self.shares_v2_client.list_access_rules(
+ share_id=self.share["id"], metadata={'metadata': data})
+
+ # verify metadata
+ self.assertEqual(1, len(list_access))
+ self.assertEqual(access['metadata'], list_access[0]['metadata'])
+ self.assertEqual(access['id'], list_access[0]['id'])
diff --git a/manila_tempest_tests/tests/api/test_access_rules_metadata_negative.py b/manila_tempest_tests/tests/api/test_access_rules_metadata_negative.py
new file mode 100644
index 0000000..692159b
--- /dev/null
+++ b/manila_tempest_tests/tests/api/test_access_rules_metadata_negative.py
@@ -0,0 +1,81 @@
+# Copyright 2018 Huawei Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import ddt
+from tempest import config
+from tempest.lib import exceptions as lib_exc
+from testtools import testcase as tc
+
+from manila_tempest_tests.common import constants
+from manila_tempest_tests.tests.api import base
+from manila_tempest_tests import utils
+
+CONF = config.CONF
+
+
+@base.skip_if_microversion_lt(
+ constants.MIN_SHARE_ACCESS_METADATA_MICROVERSION)
+@ddt.ddt
+class AccessesMetadataNegativeTest(base.BaseSharesTest):
+
+ @classmethod
+ def resource_setup(cls):
+ super(AccessesMetadataNegativeTest, cls).resource_setup()
+ if not (any(p in CONF.share.enable_ip_rules_for_protocols
+ for p in cls.protocols) or
+ any(p in CONF.share.enable_user_rules_for_protocols
+ for p in cls.protocols) or
+ any(p in CONF.share.enable_cert_rules_for_protocols
+ for p in cls.protocols) or
+ any(p in CONF.share.enable_cephx_rules_for_protocols
+ for p in cls.protocols)):
+ cls.message = "Rule tests are disabled"
+ raise cls.skipException(cls.message)
+ if CONF.share.enable_ip_rules_for_protocols:
+ cls.protocol = CONF.share.enable_ip_rules_for_protocols[0]
+ cls.access_type = "ip"
+ cls.access_to = utils.rand_ip()
+ elif CONF.share.enable_user_rules_for_protocols:
+ cls.protocol = CONF.share.enable_user_rules_for_protocols[0]
+ cls.access_type = "user"
+ cls.access_to = CONF.share.username_for_user_rules
+ elif CONF.share.enable_cert_rules_for_protocols:
+ cls.protocol = CONF.share.enable_cert_rules_for_protocols[0]
+ cls.access_type = "cert"
+ cls.access_to = "client3.com"
+ elif CONF.share.enable_cephx_rules_for_protocols:
+ cls.protocol = CONF.share.enable_cephx_rules_for_protocols[0]
+ cls.access_type = "cephx"
+ cls.access_to = "eve"
+ cls.shares_v2_client.share_protocol = cls.protocol
+ cls.share = cls.create_share()
+ cls.access = cls.shares_v2_client.create_access_rule(
+ cls.share["id"], cls.access_type, cls.access_to,
+ 'rw', metadata={u"key1": u"value1"})
+
+ @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
+ @ddt.data({'data': {"": "value"}}, {'data': {"k" * 256: "value"}},
+ {'data': {"key": "x" * 1024}})
+ @ddt.unpack
+ def test_try_upd_access_metadata_error(self, data):
+ self.assertRaises(lib_exc.BadRequest,
+ self.shares_v2_client.update_access_metadata,
+ self.access["id"], data)
+
+ @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
+ def test_try_delete_unexisting_access_metadata(self):
+ self.assertRaises(lib_exc.NotFound,
+ self.shares_v2_client.delete_access_metadata,
+ self.access["id"], "wrong_key")
diff --git a/manila_tempest_tests/tests/api/test_rules.py b/manila_tempest_tests/tests/api/test_rules.py
index 0b44a5c..d16eaf1 100644
--- a/manila_tempest_tests/tests/api/test_rules.py
+++ b/manila_tempest_tests/tests/api/test_rules.py
@@ -488,14 +488,19 @@
cls.share = cls.create_share()
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
- @ddt.data(*set(['1.0', '2.9', '2.27', '2.28', LATEST_MICROVERSION]))
+ @ddt.data(*set(
+ ['1.0', '2.9', '2.27', '2.28', '2.45', LATEST_MICROVERSION]))
def test_list_access_rules(self, version):
+ self.skip_if_microversion_not_supported(version)
if (utils.is_microversion_lt(version, '2.13') and
CONF.share.enable_cephx_rules_for_protocols):
msg = ("API version %s does not support cephx access type, need "
"version >= 2.13." % version)
raise self.skipException(msg)
+ metadata = None
+ if utils.is_microversion_ge(version, '2.45'):
+ metadata = {'key1': 'v1', 'key2': 'v2'}
# create rule
if utils.is_microversion_eq(version, '1.0'):
rule = self.shares_client.create_access_rule(
@@ -503,7 +508,7 @@
else:
rule = self.shares_v2_client.create_access_rule(
self.share["id"], self.access_type, self.access_to,
- version=version)
+ metadata=metadata, version=version)
# verify added rule keys since 2.33 when create rule
if utils.is_microversion_ge(version, '2.33'):
@@ -543,6 +548,8 @@
keys += ("access_key", )
if utils.is_microversion_ge(version, '2.33'):
keys += ("created_at", "updated_at", )
+ if utils.is_microversion_ge(version, '2.45'):
+ keys += ("metadata",)
for key in keys:
[self.assertIn(key, r.keys()) for r in rules]
for key in ('deleted', 'deleted_at', 'instance_mappings'):
@@ -625,7 +632,11 @@
self.assertRaises(lib_exc.NotFound,
self.shares_client.list_access_rules,
share['id'])
- else:
+ elif utils.is_microversion_lt(version, '2.45'):
self.assertRaises(lib_exc.NotFound,
self.shares_v2_client.list_access_rules,
share['id'], version)
+ else:
+ self.assertRaises(lib_exc.BadRequest,
+ self.shares_v2_client.list_access_rules,
+ share['id'], version)
diff --git a/manila_tempest_tests/tests/api/test_security_services.py b/manila_tempest_tests/tests/api/test_security_services.py
index 30cf6a9..a0e50dc 100644
--- a/manila_tempest_tests/tests/api/test_security_services.py
+++ b/manila_tempest_tests/tests/api/test_security_services.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import ddt
from oslo_log import log
import six
from tempest import config
@@ -20,11 +21,14 @@
from testtools import testcase as tc
from manila_tempest_tests.tests.api import base
+from manila_tempest_tests import utils
CONF = config.CONF
+LATEST_MICROVERSION = CONF.share.max_api_microversion
LOG = log.getLogger(__name__)
+@ddt.ddt
class SecurityServiceListMixin(object):
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
@@ -39,8 +43,16 @@
[self.assertIn(key, s_s.keys()) for s_s in listed for key in keys]
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
- def test_list_security_services_with_detail(self):
- listed = self.shares_client.list_security_services(detailed=True)
+ @ddt.data(*set(['1.0', '2.42', '2.44', LATEST_MICROVERSION]))
+ def test_list_security_services_with_detail(self, version):
+ self.skip_if_microversion_not_supported(version)
+ with_ou = True if utils.is_microversion_ge(version, '2.44') else False
+ if utils.is_microversion_ge(version, '2.0'):
+ listed = self.shares_v2_client.list_security_services(
+ detailed=True, version=version)
+ else:
+ listed = self.shares_client.list_security_services(detailed=True)
+
self.assertTrue(any(self.ss_ldap['id'] == ss['id'] for ss in listed))
self.assertTrue(any(self.ss_kerberos['id'] == ss['id']
for ss in listed))
@@ -53,6 +65,9 @@
]
[self.assertIn(key, s_s.keys()) for s_s in listed for key in keys]
+ for ss in listed:
+ self.assertEqual(with_ou, 'ou' in ss.keys())
+
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
@testtools.skipIf(
not CONF.share.multitenancy_enabled, "Only for multitenancy.")
@@ -98,6 +113,7 @@
in search_opts.items()))
+@ddt.ddt
class SecurityServicesTest(base.BaseSharesTest,
SecurityServiceListMixin):
def setUp(self):
@@ -110,6 +126,8 @@
'user': 'fake_user',
'password': 'pass',
}
+ if utils.is_microversion_ge(CONF.share.max_api_microversion, '2.44'):
+ ss_ldap_data['ou'] = 'OU=fake_unit_1'
ss_kerberos_data = {
'name': 'ss_kerberos',
'dns_ip': '2.2.2.2',
@@ -118,6 +136,8 @@
'user': 'test_user',
'password': 'word',
}
+ if utils.is_microversion_ge(CONF.share.max_api_microversion, '2.44'):
+ ss_kerberos_data['ou'] = 'OU=fake_unit_2'
self.ss_ldap = self.create_security_service('ldap', **ss_ldap_data)
self.ss_kerberos = self.create_security_service(
'kerberos', **ss_kerberos_data)
@@ -133,13 +153,25 @@
self.shares_client.delete_security_service(ss["id"])
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
- def test_get_security_service(self):
- data = self.generate_security_service_data()
- ss = self.create_security_service(**data)
- self.assertDictContainsSubset(data, ss)
+ @ddt.data(*set(['1.0', '2.43', '2.44', LATEST_MICROVERSION]))
+ def test_get_security_service(self, version):
+ self.skip_if_microversion_not_supported(version)
+ with_ou = True if utils.is_microversion_ge(version, '2.44') else False
+ data = self.generate_security_service_data(set_ou=with_ou)
- get = self.shares_client.get_security_service(ss["id"])
+ if utils.is_microversion_ge(version, '2.0'):
+ ss = self.create_security_service(
+ client=self.shares_v2_client, version=version, **data)
+ get = self.shares_v2_client.get_security_service(
+ ss["id"], version=version)
+ else:
+ ss = self.create_security_service(**data)
+ get = self.shares_client.get_security_service(ss["id"])
+
+ self.assertDictContainsSubset(data, ss)
+ self.assertEqual(with_ou, 'ou' in ss)
self.assertDictContainsSubset(data, get)
+ self.assertEqual(with_ou, 'ou' in get)
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_update_security_service(self):
@@ -155,6 +187,16 @@
self.assertDictContainsSubset(upd_data, updated)
self.assertDictContainsSubset(upd_data, get)
+ if utils.is_microversion_ge(CONF.share.max_api_microversion, '2.44'):
+ # update again with ou
+ upd_data_ou = self.generate_security_service_data(set_ou=True)
+ updated_ou = self.shares_v2_client.update_security_service(
+ ss["id"], **upd_data_ou)
+
+ get_ou = self.shares_v2_client.get_security_service(ss["id"])
+ self.assertDictContainsSubset(upd_data_ou, updated_ou)
+ self.assertDictContainsSubset(upd_data_ou, get_ou)
+
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
@testtools.skipIf(
not CONF.share.multitenancy_enabled, "Only for multitenancy.")
diff --git a/manila_tempest_tests/tests/api/test_shares_actions.py b/manila_tempest_tests/tests/api/test_shares_actions.py
index 7d28d83..ae83c85 100644
--- a/manila_tempest_tests/tests/api/test_shares_actions.py
+++ b/manila_tempest_tests/tests/api/test_shares_actions.py
@@ -440,7 +440,7 @@
if version is None:
snapshot = self.shares_client.get_snapshot(self.snap["id"])
else:
- utils.skip_if_microversion_not_supported(version)
+ self.skip_if_microversion_not_supported(version)
snapshot = self.shares_v2_client.get_snapshot(
self.snap["id"], version=version)
@@ -515,7 +515,7 @@
if version is None:
snaps = self.shares_client.list_snapshots_with_detail()
else:
- utils.skip_if_microversion_not_supported(version)
+ self.skip_if_microversion_not_supported(version)
snaps = self.shares_v2_client.list_snapshots_with_detail(
version=version, params=params)
diff --git a/manila_tempest_tests/tests/scenario/manager.py b/manila_tempest_tests/tests/scenario/manager.py
index 73a6693..0aa1b9e 100644
--- a/manila_tempest_tests/tests/scenario/manager.py
+++ b/manila_tempest_tests/tests/scenario/manager.py
@@ -18,7 +18,6 @@
import netaddr
from oslo_log import log
-from oslo_serialization import jsonutils as json
from oslo_utils import netutils
import six
@@ -81,13 +80,6 @@
cls.security_group_rules_client = (
cls.os_primary.security_group_rules_client)
- if CONF.volume_feature_enabled.api_v2:
- cls.volumes_client = cls.os_primary.volumes_v2_client
- cls.snapshots_client = cls.os_primary.snapshots_v2_client
- else:
- cls.volumes_client = cls.os_primary.volumes_client
- cls.snapshots_client = cls.os_primary.snapshots_client
-
# ## Test functions library
#
# The create_[resource] functions only return body and discard the
@@ -214,61 +206,6 @@
server = clients.servers_client.show_server(body['id'])['server']
return server
- def create_volume(self, size=None, name=None, snapshot_id=None,
- imageRef=None, volume_type=None):
- if size is None:
- size = CONF.volume.volume_size
- if imageRef:
- image = self.compute_images_client.show_image(imageRef)['image']
- min_disk = image.get('minDisk')
- size = max(size, min_disk)
- if name is None:
- name = data_utils.rand_name(self.__class__.__name__ + "-volume")
- kwargs = {'display_name': name,
- 'snapshot_id': snapshot_id,
- 'imageRef': imageRef,
- 'volume_type': volume_type,
- 'size': size}
- volume = self.volumes_client.create_volume(**kwargs)['volume']
-
- self.addCleanup(self.volumes_client.wait_for_resource_deletion,
- volume['id'])
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self.volumes_client.delete_volume, volume['id'])
-
- # NOTE(e0ne): Cinder API v2 uses name instead of display_name
- if 'display_name' in volume:
- self.assertEqual(name, volume['display_name'])
- else:
- self.assertEqual(name, volume['name'])
- waiters.wait_for_volume_resource_status(self.volumes_client,
- volume['id'], 'available')
- # The volume retrieved on creation has a non-up-to-date status.
- # Retrieval after it becomes active ensures correct details.
- volume = self.volumes_client.show_volume(volume['id'])['volume']
- return volume
-
- def create_volume_type(self, client=None, name=None, backend_name=None):
- if not client:
- client = self.admin_volume_types_client
- if not name:
- class_name = self.__class__.__name__
- name = data_utils.rand_name(class_name + '-volume-type')
- randomized_name = data_utils.rand_name('scenario-type-' + name)
-
- LOG.debug("Creating a volume type: %s on backend %s",
- randomized_name, backend_name)
- extra_specs = {}
- if backend_name:
- extra_specs = {"volume_backend_name": backend_name}
-
- body = client.create_volume_type(name=randomized_name,
- extra_specs=extra_specs)
- volume_type = body['volume_type']
- self.assertIn('id', volume_type)
- self.addCleanup(client.delete_volume_type, volume_type['id'])
- return volume_type
-
def _create_loginable_secgroup_rule(self, secgroup_id=None):
_client = self.compute_security_groups_client
_client_rules = self.compute_security_group_rules_client
@@ -443,72 +380,6 @@
if not isinstance(exc, lib_exc.SSHTimeout):
LOG.debug('Network information on a devstack host')
- def create_server_snapshot(self, server, name=None):
- # Glance client
- _image_client = self.image_client
- # Compute client
- _images_client = self.compute_images_client
- if name is None:
- name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
- LOG.debug("Creating a snapshot image for server: %s", server['name'])
- image = _images_client.create_image(server['id'], name=name)
- image_id = image.response['location'].split('images/')[1]
- waiters.wait_for_image_status(_image_client, image_id, 'active')
-
- self.addCleanup(_image_client.wait_for_resource_deletion,
- image_id)
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- _image_client.delete_image, image_id)
-
- if CONF.image_feature_enabled.api_v1:
- # In glance v1 the additional properties are stored in the headers.
- resp = _image_client.check_image(image_id)
- snapshot_image = common_image.get_image_meta_from_headers(resp)
- image_props = snapshot_image.get('properties', {})
- else:
- # In glance v2 the additional properties are flattened.
- snapshot_image = _image_client.show_image(image_id)
- image_props = snapshot_image
-
- bdm = image_props.get('block_device_mapping')
- if bdm:
- bdm = json.loads(bdm)
- if bdm and 'snapshot_id' in bdm[0]:
- snapshot_id = bdm[0]['snapshot_id']
- self.addCleanup(
- self.snapshots_client.wait_for_resource_deletion,
- snapshot_id)
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self.snapshots_client.delete_snapshot,
- snapshot_id)
- waiters.wait_for_volume_resource_status(self.snapshots_client,
- snapshot_id,
- 'available')
- image_name = snapshot_image['name']
- self.assertEqual(name, image_name)
- LOG.debug("Created snapshot image %s for server %s",
- image_name, server['name'])
- return snapshot_image
-
- def nova_volume_attach(self, server, volume_to_attach):
- volume = self.servers_client.attach_volume(
- server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
- % CONF.compute.volume_device_name)['volumeAttachment']
- self.assertEqual(volume_to_attach['id'], volume['id'])
- waiters.wait_for_volume_resource_status(self.volumes_client,
- volume['id'], 'in-use')
-
- # Return the updated volume after the attachment
- return self.volumes_client.show_volume(volume['id'])['volume']
-
- def nova_volume_detach(self, server, volume):
- self.servers_client.detach_volume(server['id'], volume['id'])
- waiters.wait_for_volume_resource_status(self.volumes_client,
- volume['id'], 'available')
-
- volume = self.volumes_client.show_volume(volume['id'])['volume']
- self.assertEqual('available', volume['status'])
-
def rebuild_server(self, server_id, image=None,
preserve_ephemeral=False, wait=True,
rebuild_kwargs=None):
diff --git a/manila_tempest_tests/tests/scenario/manager_share.py b/manila_tempest_tests/tests/scenario/manager_share.py
index 5c8cbbf..a80400e 100644
--- a/manila_tempest_tests/tests/scenario/manager_share.py
+++ b/manila_tempest_tests/tests/scenario/manager_share.py
@@ -16,13 +16,20 @@
from oslo_log import log
import six
-from tempest import config
-from tempest.lib.common.utils import data_utils
-
from manila_tempest_tests.common import constants
from manila_tempest_tests.common import remote_client
+from manila_tempest_tests.tests.api import base
from manila_tempest_tests.tests.scenario import manager
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions
+
+from tempfile import mkstemp
+from urllib2 import urlopen
+
CONF = config.CONF
LOG = log.getLogger(__name__)
@@ -31,6 +38,12 @@
"""Provide harness to do Manila scenario tests."""
credentials = ('admin', 'primary')
+ protocol = None
+ ip_version = 4
+
+ @property
+ def ipv6_enabled(self):
+ return self.ip_version == 6
@classmethod
def resource_setup(cls):
@@ -48,6 +61,266 @@
if not CONF.service_available.manila:
raise cls.skipException("Manila support is required")
+ def setUp(self):
+ base.verify_test_has_appropriate_tags(self)
+ if self.ipv6_enabled and not CONF.share.run_ipv6_tests:
+ raise self.skipException("IPv6 tests are disabled")
+ if self.protocol not in CONF.share.enable_protocols:
+ message = "%s tests are disabled" % self.protocol
+ raise self.skipException(message)
+ if self.protocol not in CONF.share.enable_ip_rules_for_protocols:
+ message = ("%s tests for access rules other than IP are disabled" %
+ self.protocol)
+ raise self.skipException(message)
+ super(ShareScenarioTest, self).setUp()
+
+ self.image_id = None
+ # Setup image and flavor the test instance
+ # Support both configured and injected values
+ self.floating_ips = {}
+
+ if not hasattr(self, 'flavor_ref'):
+ self.flavor_ref = CONF.share.client_vm_flavor_ref
+
+ if CONF.share.image_with_share_tools == 'centos':
+ self.image_ref = self._create_centos_based_glance_image()
+ elif CONF.share.image_with_share_tools:
+ images = self.compute_images_client.list_images()["images"]
+ for img in images:
+ if img["name"] == CONF.share.image_with_share_tools:
+ self.image_id = img['id']
+ break
+ if not self.image_id:
+ msg = ("Image %s not found. Expecting an image including "
+ "required share tools." %
+ CONF.share.image_with_share_tools)
+ raise exceptions.InvalidConfiguration(message=msg)
+ self.ssh_user = CONF.share.image_username
+ LOG.debug('Starting test for i:{image_id}, f:{flavor}. '
+ 'user: {ssh_user}'.format(image_id=self.image_id,
+ flavor=self.flavor_ref,
+ ssh_user=self.ssh_user))
+
+ self.security_group = self._create_security_group()
+ self.share_network = self.create_share_network()
+
+ def mount_share(self, location, remote_client, target_dir=None):
+ raise NotImplementedError
+
+ def umount_share(self, remote_client, target_dir=None):
+ target_dir = target_dir or "/mnt"
+ remote_client.exec_command("sudo umount %s" % target_dir)
+
+ def create_share_network(self):
+ self.net = self._create_network(namestart="manila-share")
+ self.subnet = self._create_subnet(
+ network=self.net,
+ namestart="manila-share-sub",
+ ip_version=self.ip_version,
+ use_default_subnetpool=self.ipv6_enabled)
+ router = self._get_router()
+ self._create_router_interface(subnet_id=self.subnet['id'],
+ router_id=router['id'])
+ share_network = self._create_share_network(
+ neutron_net_id=self.net['id'],
+ neutron_subnet_id=self.subnet['id'],
+ name=data_utils.rand_name("sn-name"))
+ return share_network
+
+ def boot_instance(self, wait_until="ACTIVE"):
+ self.keypair = self.create_keypair()
+ security_groups = [{'name': self.security_group['name']}]
+ create_kwargs = {
+ 'key_name': self.keypair['name'],
+ 'security_groups': security_groups,
+ 'wait_until': wait_until,
+ 'networks': [{'uuid': self.net['id']}, ],
+ }
+ instance = self.create_server(
+ image_id=self.image_id, flavor=self.flavor_ref, **create_kwargs)
+ return instance
+
+ def init_remote_client(self, instance):
+ if self.ipv6_enabled:
+ server_ip = self._get_ipv6_server_ip(instance)
+ else:
+ # Obtain a floating IP
+ floating_ip = (
+ self.compute_floating_ips_client.create_floating_ip()
+ ['floating_ip'])
+ self.floating_ips[instance['id']] = floating_ip
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.compute_floating_ips_client.delete_floating_ip,
+ floating_ip['id'])
+ # Attach a floating IP
+ self.compute_floating_ips_client.associate_floating_ip_to_server(
+ floating_ip['ip'], instance['id'])
+ server_ip = floating_ip['ip']
+ self.assertIsNotNone(server_ip)
+ # Check ssh
+ remote_client = self.get_remote_client(
+ server_or_ip=server_ip,
+ username=self.ssh_user,
+ private_key=self.keypair['private_key'])
+
+ # NOTE(u_glide): Workaround for bug #1465682
+ remote_client = remote_client.ssh_client
+
+ self.share = self.shares_client.get_share(self.share['id'])
+ return remote_client
+
+ def write_data_to_mounted_share(self, escaped_string, remote_client,
+ mount_point='/mnt/t1'):
+ remote_client.exec_command("echo \"{escaped_string}\" "
+ "| sudo tee {mount_point} && sudo sync"
+ .format(escaped_string=escaped_string,
+ mount_point=mount_point))
+
+ def read_data_from_mounted_share(self,
+ remote_client,
+ mount_point='/mnt/t1'):
+ data = remote_client.exec_command("sudo cat {mount_point}"
+ .format(mount_point=mount_point))
+ return data.rstrip()
+
+ def migrate_share(self, share_id, dest_host, status,
+ force_host_assisted=False):
+ share = self._migrate_share(
+ share_id, dest_host, status, force_host_assisted,
+ self.shares_admin_v2_client)
+ return share
+
+ def migration_complete(self, share_id, dest_host):
+ return self._migration_complete(share_id, dest_host)
+
+ def create_share(self, **kwargs):
+ kwargs.update({
+ 'share_protocol': self.protocol,
+ })
+ if not ('share_type_id' in kwargs or 'snapshot_id' in kwargs):
+ default_share_type_id = self._get_share_type()['id']
+ kwargs.update({'share_type_id': default_share_type_id})
+ if CONF.share.multitenancy_enabled:
+ kwargs.update({'share_network_id': self.share_network['id']})
+ self.share = self._create_share(**kwargs)
+ return self.share
+
+ def get_remote_client(self, *args, **kwargs):
+ if not CONF.share.image_with_share_tools:
+ return super(ShareScenarioTest,
+ self).get_remote_client(*args, **kwargs)
+ # NOTE(u_glide): We need custom implementation of this method until
+ # original implementation depends on CONF.compute.ssh_auth_method
+ # option.
+ server_or_ip = kwargs['server_or_ip']
+ if isinstance(server_or_ip, six.string_types):
+ ip = server_or_ip
+ else:
+ addr = server_or_ip['addresses'][
+ CONF.validation.network_for_ssh][0]
+ ip = addr['addr']
+
+ # NOTE(u_glide): Both options (pkey and password) are required here to
+ # support service images without Nova metadata support
+ client_params = {
+ 'username': kwargs['username'],
+ 'password': CONF.share.image_password,
+ 'pkey': kwargs.get('private_key'),
+ }
+
+ linux_client = remote_client.RemoteClient(ip, **client_params)
+ try:
+ linux_client.validate_authentication()
+ except Exception:
+ LOG.exception('Initializing SSH connection to %s failed', ip)
+ self._log_console_output()
+ raise
+
+ return linux_client
+
+ def allow_access_ip(self, share_id, ip=None, instance=None,
+ access_level="rw", cleanup=True, snapshot=None):
+ if instance and not ip:
+ try:
+ net_addresses = instance['addresses']
+ first_address = net_addresses.values()[0][0]
+ ip = first_address['addr']
+ except Exception:
+ LOG.debug("Instance has no valid IP address: %s", instance)
+ # In case on an error ip will be still none
+ LOG.exception("Instance has no valid IP address. "
+ "Falling back to default")
+ if not ip:
+ ip = '0.0.0.0/0'
+
+ if snapshot:
+ self._allow_access_snapshot(snapshot['id'], access_type='ip',
+ access_to=ip, cleanup=cleanup)
+ else:
+ return self._allow_access(share_id, access_type='ip',
+ access_level=access_level, access_to=ip,
+ cleanup=cleanup,
+ client=self.shares_v2_client)
+
+ def deny_access(self, share_id, access_rule_id, client=None):
+ """Deny share access
+
+ :param share_id: id of the share
+ :param access_rule_id: id of the rule that will be deleted
+ """
+ client = client or self.shares_client
+ client.delete_access_rule(share_id, access_rule_id)
+ self.shares_v2_client.wait_for_share_status(
+ share_id, "active", status_attr='access_rules_status')
+
+ def provide_access_to_auxiliary_instance(self, instance, share=None,
+ snapshot=None, access_level='rw'):
+ share = share or self.share
+ if self.protocol.lower() == 'cifs':
+ self.allow_access_ip(
+ share['id'], instance=instance, cleanup=False,
+ snapshot=snapshot, access_level=access_level)
+ elif not CONF.share.multitenancy_enabled:
+ if self.ipv6_enabled:
+ server_ip = self._get_ipv6_server_ip(instance)
+ else:
+ server_ip = (CONF.share.override_ip_for_nfs_access or
+ self.floating_ips[instance['id']]['ip'])
+ self.assertIsNotNone(server_ip)
+ return self.allow_access_ip(
+ share['id'], ip=server_ip,
+ instance=instance, cleanup=False, snapshot=snapshot,
+ access_level=access_level)
+ elif (CONF.share.multitenancy_enabled and
+ self.protocol.lower() == 'nfs'):
+ return self.allow_access_ip(
+ share['id'], instance=instance, cleanup=False,
+ snapshot=snapshot, access_level=access_level)
+
+ def wait_for_active_instance(self, instance_id):
+ waiters.wait_for_server_status(
+ self.os_primary.servers_client, instance_id, "ACTIVE")
+ return self.os_primary.servers_client.show_server(
+ instance_id)["server"]
+
+ def _get_share_type(self):
+ if CONF.share.default_share_type_name:
+ return self.shares_client.get_share_type(
+ CONF.share.default_share_type_name)['share_type']
+ return self._create_share_type(
+ data_utils.rand_name("share_type"),
+ extra_specs={
+ 'snapshot_support': CONF.share.capability_snapshot_support,
+ 'driver_handles_share_servers': CONF.share.multitenancy_enabled
+ },)['share_type']
+
+ def _get_ipv6_server_ip(self, instance):
+ for net_list in instance['addresses'].values():
+ for net_data in net_list:
+ if net_data['version'] == 6:
+ return net_data['addr']
+
def _create_share(self, share_protocol=None, size=None, name=None,
snapshot_id=None, description=None, metadata=None,
share_network_id=None, share_type_id=None,
@@ -160,17 +433,6 @@
self.addCleanup(client.delete_access_rule, share_id, access['id'])
return access
- def _deny_access(self, share_id, rule_id, client=None):
- """Deny share access
-
- :param share_id: id of the share
- :param rule_id: id of the rule that will be deleted
- """
- client = client or self.shares_client
- client.delete_access_rule(share_id, rule_id)
- self.shares_v2_client.wait_for_share_status(
- share_id, "active", status_attr='access_rules_status')
-
def _allow_access_snapshot(self, snapshot_id, access_type="ip",
access_to="0.0.0.0/0", cleanup=True):
"""Allow snapshot access
@@ -206,39 +468,6 @@
self.addCleanup(
client.remove_router_interface, router_id, subnet_id=subnet_id)
- def get_remote_client(self, *args, **kwargs):
- if not CONF.share.image_with_share_tools:
- return super(ShareScenarioTest,
- self).get_remote_client(*args, **kwargs)
- # NOTE(u_glide): We need custom implementation of this method until
- # original implementation depends on CONF.compute.ssh_auth_method
- # option.
- server_or_ip = kwargs['server_or_ip']
- if isinstance(server_or_ip, six.string_types):
- ip = server_or_ip
- else:
- addr = server_or_ip['addresses'][
- CONF.validation.network_for_ssh][0]
- ip = addr['addr']
-
- # NOTE(u_glide): Both options (pkey and password) are required here to
- # support service images without Nova metadata support
- client_params = {
- 'username': kwargs['username'],
- 'password': CONF.share.image_password,
- 'pkey': kwargs.get('private_key'),
- }
-
- linux_client = remote_client.RemoteClient(ip, **client_params)
- try:
- linux_client.validate_authentication()
- except Exception:
- LOG.exception('Initializing SSH connection to %s failed', ip)
- self._log_console_output()
- raise
-
- return linux_client
-
def _migrate_share(self, share_id, dest_host, status, force_host_assisted,
client=None):
client = client or self.shares_admin_v2_client
@@ -264,3 +493,21 @@
self.addCleanup(self.shares_admin_v2_client.delete_share_type,
share_type['share_type']['id'])
return share_type
+
+ def _create_centos_based_glance_image(self):
+ imagepath = mkstemp(suffix='.qcow2')[1]
+ imagefile = open(imagepath, 'wb+')
+ image_response = urlopen('http://cloud.centos.org/centos/7/images/' +
+ 'CentOS-7-x86_64-GenericCloud.qcow2')
+
+ LOG.info('Downloading CentOS7 image')
+ while True:
+ imagecopy = image_response.read(100 * 1024 * 1024)
+ if imagecopy == '':
+ break
+ imagefile.write(imagecopy)
+
+ imagefile.close()
+
+ LOG.info('Creating Glance image using the downloaded image file')
+ return self._image_create('centos', 'bare', imagepath, 'qcow2')
diff --git a/manila_tempest_tests/tests/scenario/test_share_basic_ops.py b/manila_tempest_tests/tests/scenario/test_share_basic_ops.py
index a72d538..32fd735 100644
--- a/manila_tempest_tests/tests/scenario/test_share_basic_ops.py
+++ b/manila_tempest_tests/tests/scenario/test_share_basic_ops.py
@@ -16,10 +16,7 @@
import ddt
from oslo_log import log as logging
-from tempest.common import waiters
from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions
import testtools
from testtools import testcase as tc
@@ -29,11 +26,7 @@
from manila_tempest_tests.tests.scenario import manager_share as manager
from manila_tempest_tests import utils
-from tempfile import mkstemp
-from urllib2 import urlopen
-
CONF = config.CONF
-
LOG = logging.getLogger(__name__)
@@ -50,225 +43,6 @@
* Mount share
* Terminate the instance
"""
- protocol = None
- ip_version = 4
-
- @property
- def use_ipv6(self):
- return self.ip_version == 6
-
- def setUp(self):
- super(ShareBasicOpsBase, self).setUp()
- if self.use_ipv6 and not CONF.share.run_ipv6_tests:
- raise self.skipException("IPv6 tests are disabled")
- base.verify_test_has_appropriate_tags(self)
- self.image_ref = None
- # Setup image and flavor the test instance
- # Support both configured and injected values
- self.floatings = {}
- if self.protocol not in CONF.share.enable_protocols:
- message = "%s tests are disabled" % self.protocol
- raise self.skipException(message)
- if self.protocol not in CONF.share.enable_ip_rules_for_protocols:
- message = ("%s tests for access rules other than IP are disabled" %
- self.protocol)
- raise self.skipException(message)
- if not hasattr(self, 'flavor_ref'):
- self.flavor_ref = CONF.share.client_vm_flavor_ref
-
- if CONF.share.image_with_share_tools == 'centos':
- self.image_ref = self._create_centos_based_glance_image()
- elif CONF.share.image_with_share_tools:
- images = self.compute_images_client.list_images()["images"]
- for img in images:
- if img["name"] == CONF.share.image_with_share_tools:
- self.image_ref = img['id']
- break
- if not self.image_ref:
- msg = ("Image %s not found" %
- CONF.share.image_with_share_tools)
- raise exceptions.InvalidConfiguration(message=msg)
- self.ssh_user = CONF.share.image_username
- LOG.debug('Starting test for i:{image}, f:{flavor}. '
- 'user: {ssh_user}'.format(
- image=self.image_ref, flavor=self.flavor_ref,
- ssh_user=self.ssh_user))
- self.security_group = self._create_security_group()
- self.create_share_network()
-
- def boot_instance(self, wait_until="ACTIVE"):
- self.keypair = self.create_keypair()
- security_groups = [{'name': self.security_group['name']}]
- create_kwargs = {
- 'key_name': self.keypair['name'],
- 'security_groups': security_groups,
- 'wait_until': wait_until,
- 'networks': [{'uuid': self.net['id']}, ],
- }
- instance = self.create_server(
- image_id=self.image_ref, flavor=self.flavor_ref, **create_kwargs)
- return instance
-
- def init_ssh(self, instance):
- if self.use_ipv6:
- server_ip = self._get_ipv6_server_ip(instance)
- else:
- # Obtain a floating IP
- floating_ip = (
- self.compute_floating_ips_client.create_floating_ip()
- ['floating_ip'])
- self.floatings[instance['id']] = floating_ip
- self.addCleanup(
- test_utils.call_and_ignore_notfound_exc,
- self.compute_floating_ips_client.delete_floating_ip,
- floating_ip['id'])
- # Attach a floating IP
- self.compute_floating_ips_client.associate_floating_ip_to_server(
- floating_ip['ip'], instance['id'])
- server_ip = floating_ip['ip']
- self.assertIsNotNone(server_ip)
- # Check ssh
- ssh_client = self.get_remote_client(
- server_or_ip=server_ip,
- username=self.ssh_user,
- private_key=self.keypair['private_key'])
-
- # NOTE(u_glide): Workaround for bug #1465682
- ssh_client = ssh_client.ssh_client
-
- self.share = self.shares_client.get_share(self.share['id'])
- return ssh_client
-
- def mount_share(self, location, ssh_client, target_dir=None):
- raise NotImplementedError
-
- def umount_share(self, ssh_client, target_dir=None):
- target_dir = target_dir or "/mnt"
- ssh_client.exec_command("sudo umount %s" % target_dir)
-
- def write_data(self, data, ssh_client):
- ssh_client.exec_command("echo \"%s\" | sudo tee /mnt/t1 && sudo sync" %
- data)
-
- def read_data(self, ssh_client):
- data = ssh_client.exec_command("sudo cat /mnt/t1")
- return data.rstrip()
-
- def migrate_share(self, share_id, dest_host, status, force_host_assisted):
- share = self._migrate_share(
- share_id, dest_host, status, force_host_assisted,
- self.shares_admin_v2_client)
- return share
-
- def migration_complete(self, share_id, dest_host):
- return self._migration_complete(share_id, dest_host)
-
- def create_share_network(self):
- self.net = self._create_network(namestart="manila-share")
- self.subnet = self._create_subnet(
- network=self.net,
- namestart="manila-share-sub",
- ip_version=self.ip_version,
- use_default_subnetpool=self.use_ipv6)
- router = self._get_router()
- self._create_router_interface(subnet_id=self.subnet['id'],
- router_id=router['id'])
- self.share_net = self._create_share_network(
- neutron_net_id=self.net['id'],
- neutron_subnet_id=self.subnet['id'],
- name=data_utils.rand_name("sn-name"))
-
- def _get_ipv6_server_ip(self, instance):
- for net_list in instance['addresses'].values():
- for net_data in net_list:
- if net_data['version'] == 6:
- return net_data['addr']
-
- def _get_share_type(self):
- if CONF.share.default_share_type_name:
- return self.shares_client.get_share_type(
- CONF.share.default_share_type_name)['share_type']
- return self._create_share_type(
- data_utils.rand_name("share_type"),
- extra_specs={
- 'snapshot_support': CONF.share.capability_snapshot_support,
- 'driver_handles_share_servers': CONF.share.multitenancy_enabled
- },)['share_type']
-
- def create_share(self, **kwargs):
- kwargs.update({
- 'share_protocol': self.protocol,
- })
- if not ('share_type_id' in kwargs or 'snapshot_id' in kwargs):
- kwargs.update({'share_type_id': self._get_share_type()['id']})
- if CONF.share.multitenancy_enabled:
- kwargs.update({'share_network_id': self.share_net['id']})
- self.share = self._create_share(**kwargs)
- return self.share
-
- def allow_access_ip(self, share_id, ip=None, instance=None,
- access_level="rw", cleanup=True, snapshot=None):
- if instance and not ip:
- try:
- net_addresses = instance['addresses']
- first_address = net_addresses.values()[0][0]
- ip = first_address['addr']
- except Exception:
- LOG.debug("Instance: %s", instance)
- # In case on an error ip will be still none
- LOG.exception("Instance does not have a valid IP address."
- "Falling back to default")
- if not ip:
- ip = '0.0.0.0/0'
-
- if snapshot:
- self._allow_access_snapshot(snapshot['id'], access_type='ip',
- access_to=ip, cleanup=cleanup)
- else:
- return self._allow_access(share_id, access_type='ip',
- access_level=access_level, access_to=ip,
- cleanup=cleanup,
- client=self.shares_v2_client)
-
- def deny_access(self, share_id, access_rule_id):
- self._deny_access(share_id, access_rule_id)
-
- def provide_access_to_auxiliary_instance(self, instance, share=None,
- snapshot=None, access_level='rw'):
- share = share or self.share
- if self.protocol.lower() == 'cifs':
- return self.allow_access_ip(
- share['id'], instance=instance, cleanup=False,
- snapshot=snapshot, access_level=access_level)
- elif not CONF.share.multitenancy_enabled:
- if self.use_ipv6:
- server_ip = self._get_ipv6_server_ip(instance)
- else:
- server_ip = (CONF.share.override_ip_for_nfs_access or
- self.floatings[instance['id']]['ip'])
- self.assertIsNotNone(server_ip)
- return self.allow_access_ip(
- share['id'], ip=server_ip,
- instance=instance, cleanup=False, snapshot=snapshot,
- access_level=access_level)
- elif (CONF.share.multitenancy_enabled and
- self.protocol.lower() == 'nfs'):
- return self.allow_access_ip(
- share['id'], instance=instance, cleanup=False,
- snapshot=snapshot, access_level=access_level)
-
- def wait_for_active_instance(self, instance_id):
- waiters.wait_for_server_status(
- self.os_primary.servers_client, instance_id, "ACTIVE")
- return self.os_primary.servers_client.show_server(
- instance_id)["server"]
-
- def _ping_export_location(self, export, ssh_client):
- ip, version = self.get_ip_and_version_from_export_location(export)
- if version == 6:
- ssh_client.exec_command("ping6 -c 1 %s" % ip)
- else:
- ssh_client.exec_command("ping -c 1 %s" % ip)
def get_ip_and_version_from_export_location(self, export):
export = export.replace('[', '').replace(']', '')
@@ -284,6 +58,13 @@
raise self.skipException(message)
return ip, version
+ def _ping_host_from_export_location(self, export, remote_client):
+ ip, version = self.get_ip_and_version_from_export_location(export)
+ if version == 6:
+ remote_client.exec_command("ping6 -c 1 %s" % ip)
+ else:
+ remote_client.exec_command("ping -c 1 %s" % ip)
+
def _get_export_locations_according_to_ip_version(
self, all_locations, error_on_invalid_ip_version):
locations = [
@@ -297,6 +78,21 @@
raise self.skipException(message)
return locations
+ def _get_user_export_locations(self, share=None, snapshot=None,
+ error_on_invalid_ip_version=False):
+ locations = None
+ if share:
+ locations = self._get_share_export_locations(share)
+ elif snapshot:
+ locations = self._get_snapshot_export_locations(snapshot)
+
+ self.assertNotEmpty(locations)
+ locations = self._get_export_locations_according_to_ip_version(
+ locations, error_on_invalid_ip_version)
+ self.assertNotEmpty(locations)
+
+ return locations
+
def _get_share_export_locations(self, share):
if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
@@ -308,23 +104,12 @@
return locations
- def _create_centos_based_glance_image(self):
- imagepath = mkstemp(suffix='.qcow2')[1]
- imagefile = open(imagepath, 'wb+')
- image_response = urlopen('http://cloud.centos.org/centos/7/images/' +
- 'CentOS-7-x86_64-GenericCloud.qcow2')
+ def _get_snapshot_export_locations(self, snapshot):
+ exports = (self.shares_v2_client.
+ list_snapshot_export_locations(snapshot['id']))
+ locations = [x['path'] for x in exports]
- LOG.info('Downloading CentOS7 image')
- while True:
- imagecopy = image_response.read(100 * 1024 * 1024)
- if imagecopy == '':
- break
- imagefile.write(imagecopy)
-
- imagefile.close()
-
- LOG.info('Creating Glance image using the downloaded image file')
- return self._image_create('centos', 'bare', imagepath, 'qcow2')
+ return locations
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_mount_share_one_vm(self):
@@ -332,19 +117,12 @@
self.create_share()
locations = self._get_user_export_locations(self.share)
instance = self.wait_for_active_instance(instance["id"])
- ssh_client = self.init_ssh(instance)
+ remote_client = self.init_remote_client(instance)
self.provide_access_to_auxiliary_instance(instance)
for location in locations:
- self.mount_share(location, ssh_client)
- self.umount_share(ssh_client)
-
- def _get_snapshot_export_locations(self, snapshot):
- exports = (self.shares_v2_client.
- list_snapshot_export_locations(snapshot['id']))
- locations = [x['path'] for x in exports]
-
- return locations
+ self.mount_share(location, remote_client)
+ self.umount_share(remote_client)
@tc.attr(base.TAG_NEGATIVE, base.TAG_BACKEND)
def test_write_with_ro_access(self):
@@ -356,20 +134,21 @@
location = self._get_user_export_locations(self.share)[0]
instance = self.wait_for_active_instance(instance["id"])
- ssh_client_inst = self.init_ssh(instance)
+ remote_client_inst = self.init_remote_client(instance)
# First, check if write works RW access.
acc_rule_id = self.provide_access_to_auxiliary_instance(instance)['id']
- self.mount_share(location, ssh_client_inst)
- self.write_data(test_data, ssh_client_inst)
+ self.mount_share(location, remote_client_inst)
+ self.write_data_to_mounted_share(test_data, remote_client_inst)
self.deny_access(self.share['id'], acc_rule_id)
self.provide_access_to_auxiliary_instance(instance, access_level='ro')
- self.addCleanup(self.umount_share, ssh_client_inst)
+ self.addCleanup(self.umount_share, remote_client_inst)
# Test if write with RO access fails.
self.assertRaises(exceptions.SSHExecCommandFailed,
- self.write_data, test_data, ssh_client_inst)
+ self.write_data_to_mounted_share,
+ test_data, remote_client_inst)
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_read_write_two_vms(self):
@@ -385,23 +164,23 @@
instance2 = self.wait_for_active_instance(instance2["id"])
# Write data to first VM
- ssh_client_inst1 = self.init_ssh(instance1)
+ remote_client_inst1 = self.init_remote_client(instance1)
self.provide_access_to_auxiliary_instance(instance1)
- self.mount_share(location, ssh_client_inst1)
+ self.mount_share(location, remote_client_inst1)
self.addCleanup(self.umount_share,
- ssh_client_inst1)
- self.write_data(test_data, ssh_client_inst1)
+ remote_client_inst1)
+ self.write_data_to_mounted_share(test_data, remote_client_inst1)
# Read from second VM
- ssh_client_inst2 = self.init_ssh(instance2)
- if not CONF.share.override_ip_for_nfs_access or self.use_ipv6:
+ remote_client_inst2 = self.init_remote_client(instance2)
+ if not CONF.share.override_ip_for_nfs_access or self.ipv6_enabled:
self.provide_access_to_auxiliary_instance(instance2)
- self.mount_share(location, ssh_client_inst2)
+ self.mount_share(location, remote_client_inst2)
self.addCleanup(self.umount_share,
- ssh_client_inst2)
- data = self.read_data(ssh_client_inst2)
+ remote_client_inst2)
+ data = self.read_data_from_mounted_share(remote_client_inst2)
self.assertEqual(test_data, data)
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@@ -425,6 +204,10 @@
raise self.skipException("Only NFS protocol supported "
"at this moment.")
+ if self.ipv6_enabled:
+ raise self.skipException("Share Migration using IPv6 is not "
+ "supported at this moment.")
+
pools = self.shares_admin_v2_client.list_pools(detail=True)['pools']
if len(pools) < 2:
@@ -448,29 +231,29 @@
dest_pool = dest_pool['name']
- ssh_client = self.init_ssh(instance)
+ remote_client = self.init_remote_client(instance)
self.provide_access_to_auxiliary_instance(instance)
- self.mount_share(exports[0], ssh_client)
+ self.mount_share(exports[0], remote_client)
- ssh_client.exec_command("sudo mkdir -p /mnt/f1")
- ssh_client.exec_command("sudo mkdir -p /mnt/f2")
- ssh_client.exec_command("sudo mkdir -p /mnt/f3")
- ssh_client.exec_command("sudo mkdir -p /mnt/f4")
- ssh_client.exec_command("sudo mkdir -p /mnt/f1/ff1")
- ssh_client.exec_command("sleep 1")
- ssh_client.exec_command(
+ remote_client.exec_command("sudo mkdir -p /mnt/f1")
+ remote_client.exec_command("sudo mkdir -p /mnt/f2")
+ remote_client.exec_command("sudo mkdir -p /mnt/f3")
+ remote_client.exec_command("sudo mkdir -p /mnt/f4")
+ remote_client.exec_command("sudo mkdir -p /mnt/f1/ff1")
+ remote_client.exec_command("sleep 1")
+ remote_client.exec_command(
"sudo dd if=/dev/zero of=/mnt/f1/1m1.bin bs=1M count=1")
- ssh_client.exec_command(
+ remote_client.exec_command(
"sudo dd if=/dev/zero of=/mnt/f2/1m2.bin bs=1M count=1")
- ssh_client.exec_command(
+ remote_client.exec_command(
"sudo dd if=/dev/zero of=/mnt/f3/1m3.bin bs=1M count=1")
- ssh_client.exec_command(
+ remote_client.exec_command(
"sudo dd if=/dev/zero of=/mnt/f4/1m4.bin bs=1M count=1")
- ssh_client.exec_command(
+ remote_client.exec_command(
"sudo dd if=/dev/zero of=/mnt/f1/ff1/1m5.bin bs=1M count=1")
- ssh_client.exec_command("sudo chmod -R 555 /mnt/f3")
- ssh_client.exec_command("sudo chmod -R 777 /mnt/f4")
+ remote_client.exec_command("sudo chmod -R 555 /mnt/f3")
+ remote_client.exec_command("sudo chmod -R 777 /mnt/f4")
task_state = (constants.TASK_STATE_DATA_COPYING_COMPLETED
if force_host_assisted
@@ -482,10 +265,10 @@
if force_host_assisted:
self.assertRaises(
exceptions.SSHExecCommandFailed,
- ssh_client.exec_command,
+ remote_client.exec_command,
"dd if=/dev/zero of=/mnt/f1/1m6.bin bs=1M count=1")
- self.umount_share(ssh_client)
+ self.umount_share(remote_client)
self.share = self.migration_complete(self.share['id'], dest_pool)
@@ -496,11 +279,11 @@
self.assertEqual(constants.TASK_STATE_MIGRATION_SUCCESS,
self.share['task_state'])
- self.mount_share(new_exports[0], ssh_client)
+ self.mount_share(new_exports[0], remote_client)
- output = ssh_client.exec_command("ls -lRA --ignore=lost+found /mnt")
+ output = remote_client.exec_command("ls -lRA --ignore=lost+found /mnt")
- self.umount_share(ssh_client)
+ self.umount_share(remote_client)
self.assertIn('1m1.bin', output)
self.assertIn('1m2.bin', output)
@@ -508,21 +291,6 @@
self.assertIn('1m4.bin', output)
self.assertIn('1m5.bin', output)
- def _get_user_export_locations(self, share=None, snapshot=None,
- error_on_invalid_ip_version=False):
- locations = None
- if share:
- locations = self._get_share_export_locations(share)
- elif snapshot:
- locations = self._get_snapshot_export_locations(snapshot)
-
- self.assertNotEmpty(locations)
- locations = self._get_export_locations_according_to_ip_version(
- locations, error_on_invalid_ip_version)
- self.assertNotEmpty(locations)
-
- return locations
-
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@testtools.skipUnless(
CONF.share.run_snapshot_tests, "Snapshot tests are disabled.")
@@ -540,7 +308,7 @@
self.addCleanup(self.servers_client.delete_server, instance['id'])
# 3 - SSH to UVM, ok, connected
- ssh_client = self.init_ssh(instance)
+ remote_client = self.init_remote_client(instance)
# 4 - Provide RW access to S1, ok, provided
self.provide_access_to_auxiliary_instance(instance, parent_share)
@@ -548,20 +316,20 @@
# 5 - Try mount S1 to UVM, ok, mounted
user_export_location = self._get_user_export_locations(parent_share)[0]
parent_share_dir = "/mnt/parent"
- ssh_client.exec_command("sudo mkdir -p %s" % parent_share_dir)
+ remote_client.exec_command("sudo mkdir -p %s" % parent_share_dir)
- self.mount_share(user_export_location, ssh_client, parent_share_dir)
- self.addCleanup(self.umount_share, ssh_client, parent_share_dir)
+ self.mount_share(user_export_location, remote_client, parent_share_dir)
+ self.addCleanup(self.umount_share, remote_client, parent_share_dir)
# 6 - Create "file1", ok, created
- ssh_client.exec_command("sudo touch %s/file1" % parent_share_dir)
+ remote_client.exec_command("sudo touch %s/file1" % parent_share_dir)
# 7 - Create snapshot SS1 from S1, ok, created
snapshot = self._create_snapshot(parent_share['id'])
# 8 - Create "file2" in share S1 - ok, created. We expect that
# snapshot will not contain any data created after snapshot creation.
- ssh_client.exec_command("sudo touch %s/file2" % parent_share_dir)
+ remote_client.exec_command("sudo touch %s/file2" % parent_share_dir)
# 9 - Create share S2 from SS1, ok, created
child_share = self.create_share(snapshot_id=snapshot["id"])
@@ -570,37 +338,40 @@
# did not get access rules from parent share.
user_export_location = self._get_user_export_locations(child_share)[0]
child_share_dir = "/mnt/child"
- ssh_client.exec_command("sudo mkdir -p %s" % child_share_dir)
+ remote_client.exec_command("sudo mkdir -p %s" % child_share_dir)
self.assertRaises(
exceptions.SSHExecCommandFailed,
self.mount_share,
- user_export_location, ssh_client, child_share_dir,
+ user_export_location, remote_client, child_share_dir,
)
# 11 - Provide RW access to S2, ok, provided
self.provide_access_to_auxiliary_instance(instance, child_share)
# 12 - Try mount S2, ok, mounted
- self.mount_share(user_export_location, ssh_client, child_share_dir)
- self.addCleanup(self.umount_share, ssh_client, child_share_dir)
+ self.mount_share(user_export_location, remote_client, child_share_dir)
+ self.addCleanup(self.umount_share, remote_client, child_share_dir)
# 13 - List files on S2, only "file1" exists
- output = ssh_client.exec_command("sudo ls -lRA %s" % child_share_dir)
+ output = remote_client.exec_command(
+ "sudo ls -lRA %s" % child_share_dir)
self.assertIn('file1', output)
self.assertNotIn('file2', output)
# 14 - Create file3 on S2, ok, file created
- ssh_client.exec_command("sudo touch %s/file3" % child_share_dir)
+ remote_client.exec_command("sudo touch %s/file3" % child_share_dir)
# 15 - List files on S1, two files exist - "file1" and "file2"
- output = ssh_client.exec_command("sudo ls -lRA %s" % parent_share_dir)
+ output = remote_client.exec_command(
+ "sudo ls -lRA %s" % parent_share_dir)
self.assertIn('file1', output)
self.assertIn('file2', output)
self.assertNotIn('file3', output)
# 16 - List files on S2, two files exist - "file1" and "file3"
- output = ssh_client.exec_command("sudo ls -lRA %s" % child_share_dir)
+ output = remote_client.exec_command(
+ "sudo ls -lRA %s" % child_share_dir)
self.assertIn('file1', output)
self.assertNotIn('file2', output)
self.assertIn('file3', output)
@@ -625,7 +396,7 @@
self.addCleanup(self.servers_client.delete_server, instance['id'])
# 3 - SSH to UVM, ok, connected
- ssh_client = self.init_ssh(instance)
+ remote_client = self.init_remote_client(instance)
# 4 - Provide RW access to S1, ok, provided
self.provide_access_to_auxiliary_instance(instance, parent_share)
@@ -634,21 +405,21 @@
user_export_location = self._get_user_export_locations(parent_share)[0]
parent_share_dir = "/mnt/parent"
snapshot_dir = "/mnt/snapshot_dir"
- ssh_client.exec_command("sudo mkdir -p %s" % parent_share_dir)
- ssh_client.exec_command("sudo mkdir -p %s" % snapshot_dir)
+ remote_client.exec_command("sudo mkdir -p %s" % parent_share_dir)
+ remote_client.exec_command("sudo mkdir -p %s" % snapshot_dir)
- self.mount_share(user_export_location, ssh_client, parent_share_dir)
- self.addCleanup(self.umount_share, ssh_client, parent_share_dir)
+ self.mount_share(user_export_location, remote_client, parent_share_dir)
+ self.addCleanup(self.umount_share, remote_client, parent_share_dir)
# 6 - Create "file1", ok, created
- ssh_client.exec_command("sudo touch %s/file1" % parent_share_dir)
+ remote_client.exec_command("sudo touch %s/file1" % parent_share_dir)
# 7 - Create snapshot SS1 from S1, ok, created
snapshot = self._create_snapshot(parent_share['id'])
# 8 - Create "file2" in share S1 - ok, created. We expect that
# snapshot will not contain any data created after snapshot creation.
- ssh_client.exec_command("sudo touch %s/file2" % parent_share_dir)
+ remote_client.exec_command("sudo touch %s/file2" % parent_share_dir)
# 9 - Allow access to SS1
self.provide_access_to_auxiliary_instance(instance, snapshot=snapshot)
@@ -656,45 +427,45 @@
# 10 - Mount SS1
user_export_location = self._get_user_export_locations(
snapshot=snapshot)[0]
- self.mount_share(user_export_location, ssh_client, snapshot_dir)
- self.addCleanup(self.umount_share, ssh_client, snapshot_dir)
+ self.mount_share(user_export_location, remote_client, snapshot_dir)
+ self.addCleanup(self.umount_share, remote_client, snapshot_dir)
# 11 - List files on SS1, only "file1" exists
# NOTE(lseki): using ls without recursion to avoid permission denied
# error while listing lost+found directory on LVM volumes
- output = ssh_client.exec_command("sudo ls -lA %s" % snapshot_dir)
+ output = remote_client.exec_command("sudo ls -lA %s" % snapshot_dir)
self.assertIn('file1', output)
self.assertNotIn('file2', output)
# 12 - Try to create a file on SS1, should fail
self.assertRaises(
exceptions.SSHExecCommandFailed,
- ssh_client.exec_command,
+ remote_client.exec_command,
"sudo touch %s/file3" % snapshot_dir)
class TestShareBasicOpsNFS(ShareBasicOpsBase):
protocol = "nfs"
- def mount_share(self, location, ssh_client, target_dir=None):
+ def mount_share(self, location, remote_client, target_dir=None):
- self._ping_export_location(location, ssh_client)
+ self._ping_host_from_export_location(location, remote_client)
target_dir = target_dir or "/mnt"
- ssh_client.exec_command(
+ remote_client.exec_command(
"sudo mount -vt nfs \"%s\" %s" % (location, target_dir))
class TestShareBasicOpsCIFS(ShareBasicOpsBase):
protocol = "cifs"
- def mount_share(self, location, ssh_client, target_dir=None):
+ def mount_share(self, location, remote_client, target_dir=None):
- self._ping_export_location(location, ssh_client)
+ self._ping_host_from_export_location(location, remote_client)
location = location.replace("\\", "/")
target_dir = target_dir or "/mnt"
- ssh_client.exec_command(
+ remote_client.exec_command(
"sudo mount.cifs \"%s\" %s -o guest" % (location, target_dir)
)