Merge "Migrate test_server_basic_ops to tempest client"
diff --git a/REVIEWING.rst b/REVIEWING.rst
new file mode 100644
index 0000000..d6dc83e
--- /dev/null
+++ b/REVIEWING.rst
@@ -0,0 +1,60 @@
+Reviewing Tempest Code
+======================
+
+To start read the `OpenStack Common Review Checklist
+<https://wiki.openstack.org/wiki/ReviewChecklist#Common_Review_Checklist>`_
+
+
+Ensuring code is executed
+-------------------------
+
+For any new or change to a test it has to be verified in the gate. This means
+that the first thing to check with any change is that a gate job actually runs
+it. Tests which aren't executed either because of configuration or skips should
+not be accepted.
+
+
+Unit Tests
+----------
+
+For any change that adds new functionality to either common functionality or an
+out-of-band tool unit tests are required. This is to ensure we don't introduce
+future regressions and to test conditions which we may not hit in the gate runs.
+Tests, and service clients aren't required to have unit tests since they should
+be self verifying by running them in the gate.
+
+
+API Stability
+-------------
+Tests should only be added for a published stable APIs. If a patch contains
+tests for an API which hasn't been marked as stable or for an API that which
+doesn't conform to the `API stability guidelines
+<https://wiki.openstack.org/wiki/Governance/Approved/APIStability>`_ then it
+should not be approved.
+
+
+Reject Copy and Paste Test Code
+------------------------
+When creating new tests that are similar to existing tests it is tempting to
+simply copy the code and make a few modifications. This increases code size and
+the maintenance burden. Such changes should not be approved if it is easy to
+abstract the duplicated code into a function or method.
+
+
+Being explicit
+--------------
+When tests are being added that depend on a configurable feature or extension,
+polling the API to discover that it is enabled should not be done. This will
+just result in bugs being masked because the test can be skipped automatically.
+Instead the config file should be used to determine whether a test should be
+skipped or not. Do not approve changes that depend on an API call to determine
+whether to skip or not.
+
+
+When to approve
+---------------
+ * Every patch needs two +2s before being approved.
+ * Its ok to hold off on an approval until a subject matter expert reviews it
+ * If a patch has already been approved but requires a trivial rebase to merge,
+ you do not have to wait for a second +2, since the patch has already had
+ two +2s.
diff --git a/doc/source/REVIEWING.rst b/doc/source/REVIEWING.rst
new file mode 120000
index 0000000..841e042
--- /dev/null
+++ b/doc/source/REVIEWING.rst
@@ -0,0 +1 @@
+../../REVIEWING.rst
\ No newline at end of file
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 25bc900..d3118ac 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -9,6 +9,7 @@
overview
HACKING
+ REVIEWING
------------
Field Guides
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 247f6d1..ef56ab3 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -933,8 +933,15 @@
# Directory containing image files (string value)
#img_dir=/opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec
-# QCOW2 image file name (string value)
-#qcow2_img_file=cirros-0.3.1-x86_64-disk.img
+# Image file name (string value)
+# Deprecated group/name - [DEFAULT]/qcow2_img_file
+#img_file=cirros-0.3.1-x86_64-disk.img
+
+# Image disk format (string value)
+#img_disk_format=qcow2
+
+# Image container format (string value)
+#img_container_format=bare
# AMI image file name (string value)
#ami_img_file=cirros-0.3.1-x86_64-blank.img
@@ -1003,9 +1010,9 @@
# value)
#trove=false
-# Whether or not Marconi is expected to be available (boolean
+# Whether or not Zaqar is expected to be available (boolean
# value)
-#marconi=false
+#zaqar=false
[stress]
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 206f37b..f3da614 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -374,7 +374,8 @@
@test.attr(type='smoke')
def test_create_delete_subnet_with_gw(self):
- gateway = '2003::2'
+ net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
+ gateway = str(netaddr.IPAddress(net.first + 2))
name = data_utils.rand_name('network-')
_, body = self.client.create_network(name=name)
network = body['network']
@@ -388,13 +389,15 @@
@test.attr(type='smoke')
def test_create_delete_subnet_without_gw(self):
+ net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
+ gateway_ip = str(netaddr.IPAddress(net.first + 1))
name = data_utils.rand_name('network-')
_, body = self.client.create_network(name=name)
network = body['network']
net_id = network['id']
subnet = self.create_subnet(network)
# Verifies Subnet GW in IPv6
- self.assertEqual(subnet['gateway_ip'], '2003::1')
+ self.assertEqual(subnet['gateway_ip'], gateway_ip)
# Delete network and subnet
_, body = self.client.delete_network(net_id)
self.subnets.pop()
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index 9b9dfec..c1e2d59 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -99,6 +99,7 @@
self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
@test.attr(type='slow')
+ @test.services('network')
def test_created_network(self):
"""Verifies created network."""
network_id = self.test_resources.get('Network')['physical_resource_id']
@@ -109,6 +110,7 @@
self.assertEqual('NewNetwork', network['name'])
@test.attr(type='slow')
+ @test.services('network')
def test_created_subnet(self):
"""Verifies created subnet."""
subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
@@ -124,6 +126,7 @@
self.assertEqual(str(self.subnet_cidr), subnet['cidr'])
@test.attr(type='slow')
+ @test.services('network')
def test_created_router(self):
"""Verifies created router."""
router_id = self.test_resources.get('Router')['physical_resource_id']
@@ -135,6 +138,7 @@
self.assertEqual(True, router['admin_state_up'])
@test.attr(type='slow')
+ @test.services('network')
def test_created_router_interface(self):
"""Verifies created router interface."""
router_id = self.test_resources.get('Router')['physical_resource_id']
@@ -157,6 +161,7 @@
router_interface_ip)
@test.attr(type='slow')
+ @test.services('compute', 'network')
def test_created_server(self):
"""Verifies created sever."""
server_id = self.test_resources.get('Server')['physical_resource_id']
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index b307be5..cbe62a1 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -61,6 +61,7 @@
self.assertEqual(resource_name, resource['logical_resource_id'])
self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
+ @test.services('object_storage')
def test_created_containers(self):
params = {'format': 'json'}
_, container_list = \
@@ -69,6 +70,7 @@
for cont in container_list:
self.assertTrue(cont['name'].startswith(self.stack_name))
+ @test.services('object_storage')
def test_acl(self):
acl_headers = ('x-container-meta-web-index', 'x-container-read')
@@ -85,6 +87,7 @@
for h in acl_headers:
self.assertIn(h, headers)
+ @test.services('object_storage')
def test_metadata(self):
swift_basic_template = self.load_template('swift_basic')
metadatas = swift_basic_template['resources']['SwiftContainerWebsite'][
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
index ff1cfac..f371370 100644
--- a/tempest/api/orchestration/stacks/test_volumes.py
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -54,6 +54,7 @@
'name'], self.get_stack_output(stack_identifier, 'display_name'))
@test.attr(type='gate')
+ @test.services('volume')
def test_cinder_volume_create_delete(self):
"""Create and delete a volume via OS::Cinder::Volume."""
stack_name = data_utils.rand_name('heat')
@@ -82,6 +83,7 @@
self.volumes_client.wait_for_resource_deletion(volume_id)
@test.attr(type='gate')
+ @test.services('volume')
def test_cinder_volume_create_delete_retain(self):
"""Ensure the 'Retain' deletion policy is respected."""
stack_name = data_utils.rand_name('heat')
diff --git a/tempest/api/queuing/base.py b/tempest/api/queuing/base.py
index f4ff7f1..41a02f2 100644
--- a/tempest/api/queuing/base.py
+++ b/tempest/api/queuing/base.py
@@ -26,7 +26,7 @@
class BaseQueuingTest(test.BaseTestCase):
"""
- Base class for the Queuing tests that use the Tempest Marconi REST client
+ Base class for the Queuing tests that use the Tempest Zaqar REST client
It is assumed that the following option is defined in the
[service_available] section of etc/tempest.conf
@@ -37,8 +37,8 @@
@classmethod
def setUpClass(cls):
super(BaseQueuingTest, cls).setUpClass()
- if not CONF.service_available.marconi:
- raise cls.skipException("Marconi support is required")
+ if not CONF.service_available.zaqar:
+ raise cls.skipException("Zaqar support is required")
os = cls.get_client_manager()
cls.queuing_cfg = CONF.queuing
cls.client = os.queuing_client
diff --git a/tempest/api_schema/response/compute/servers.py b/tempest/api_schema/response/compute/servers.py
index d6c2ddb..f9c957b 100644
--- a/tempest/api_schema/response/compute/servers.py
+++ b/tempest/api_schema/response/compute/servers.py
@@ -54,14 +54,15 @@
'id': {'type': 'string'},
'name': {'type': 'string'},
'status': {'type': 'string'},
- 'image': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'links': parameter_types.links
- },
- 'required': ['id', 'links']
- },
+ 'image': {'oneOf': [
+ {'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']},
+ {'type': ['string', 'null']}
+ ]},
'flavor': {
'type': 'object',
'properties': {
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 70fd27b..cd696a9 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -247,7 +247,7 @@
'data_processing': 'sahara',
'baremetal': 'ironic',
'identity': 'keystone',
- 'queuing': 'marconi',
+ 'queuing': 'zaqar',
'database': 'trove'
}
# Get catalog list for endpoints to use for validation
diff --git a/tempest/common/generator/base_generator.py b/tempest/common/generator/base_generator.py
index 57b98f7..0398af1 100644
--- a/tempest/common/generator/base_generator.py
+++ b/tempest/common/generator/base_generator.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
+
import jsonschema
from tempest.openstack.common import log as logging
@@ -39,6 +41,7 @@
"""
Decorator for simple generators that return one value
"""
+ @functools.wraps(fn)
def wrapped(self, schema):
result = fn(self, schema)
if result is not None:
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index d5e49db..dca1f86 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -13,6 +13,7 @@
# under the License.
import netaddr
+from neutronclient.common import exceptions as n_exc
from tempest import auth
from tempest import clients
@@ -263,7 +264,7 @@
body['subnet']['cidr'] = str(subnet_cidr)
resp_body = self.network_admin_client.create_subnet(body)
break
- except exceptions.BadRequest as e:
+ except (n_exc.BadRequest, exceptions.BadRequest) as e:
if 'overlaps with another subnet' not in str(e):
raise
else:
diff --git a/tempest/config.py b/tempest/config.py
index 4836c63..1d10a0a 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -855,9 +855,15 @@
default='/opt/stack/new/devstack/files/images/'
'cirros-0.3.1-x86_64-uec',
help='Directory containing image files'),
- cfg.StrOpt('qcow2_img_file',
+ cfg.StrOpt('img_file', deprecated_name='qcow2_img_file',
default='cirros-0.3.1-x86_64-disk.img',
- help='QCOW2 image file name'),
+ help='Image file name'),
+ cfg.StrOpt('img_disk_format',
+ default='qcow2',
+ help='Image disk format'),
+ cfg.StrOpt('img_container_format',
+ default='bare',
+ help='Image container format'),
cfg.StrOpt('ami_img_file',
default='cirros-0.3.1-x86_64-blank.img',
help='AMI image file name'),
@@ -915,9 +921,9 @@
cfg.BoolOpt('trove',
default=False,
help="Whether or not Trove is expected to be available"),
- cfg.BoolOpt('marconi',
+ cfg.BoolOpt('zaqar',
default=False,
- help="Whether or not Marconi is expected to be available"),
+ help="Whether or not Zaqar is expected to be available"),
]
debug_group = cfg.OptGroup(name="debug",
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 93329bc..cef010e 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -20,7 +20,7 @@
PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
'trove', 'ironic', 'savanna', 'heat', 'ceilometer',
- 'marconi', 'sahara']
+ 'zaqar', 'sahara']
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
TEST_DEFINITION = re.compile(r'^\s*def test.*')
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 856b751..0f14c94 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -87,6 +87,7 @@
cls.security_groups_client = cls.manager.security_groups_client
cls.servers_client = cls.manager.servers_client
cls.volumes_client = cls.manager.volumes_client
+ cls.snapshots_client = cls.manager.snapshots_client
@classmethod
def _get_credentials(cls, get_creds, ctype):
@@ -355,19 +356,22 @@
return image['id']
def glance_image_create(self):
- qcow2_img_path = (CONF.scenario.img_dir + "/" +
- CONF.scenario.qcow2_img_file)
+ img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
- LOG.debug("paths: img: %s, ami: %s, ari: %s, aki: %s"
- % (qcow2_img_path, ami_img_path, ari_img_path, aki_img_path))
+ img_container_format = CONF.scenario.img_container_format
+ img_disk_format = CONF.scenario.img_disk_format
+ LOG.debug("paths: img: %s, container_fomat: %s, disk_format: %s, "
+ "ami: %s, ari: %s, aki: %s" %
+ (img_path, img_container_format, img_disk_format,
+ ami_img_path, ari_img_path, aki_img_path))
try:
self.image = self._image_create('scenario-img',
- 'bare',
- qcow2_img_path,
+ img_container_format,
+ img_path,
properties={'disk_format':
- 'qcow2'})
+ img_disk_format})
except IOError:
LOG.debug("A qcow2 image was not found. Try to get a uec image.")
kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
@@ -886,19 +890,22 @@
return image.id
def glance_image_create(self):
- qcow2_img_path = (CONF.scenario.img_dir + "/" +
- CONF.scenario.qcow2_img_file)
+ img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
- LOG.debug("paths: img: %s, ami: %s, ari: %s, aki: %s"
- % (qcow2_img_path, ami_img_path, ari_img_path, aki_img_path))
+ img_container_format = CONF.scenario.img_container_format
+ img_disk_format = CONF.scenario.img_disk_format
+ LOG.debug("paths: img: %s, container_fomat: %s, disk_format: %s, "
+ "ami: %s, ari: %s, aki: %s" %
+ (img_path, img_container_format, img_disk_format,
+ ami_img_path, ari_img_path, aki_img_path))
try:
self.image = self._image_create('scenario-img',
- 'bare',
- qcow2_img_path,
+ img_container_format,
+ img_path,
properties={'disk_format':
- 'qcow2'})
+ img_disk_format})
except IOError:
LOG.debug("A qcow2 image was not found. Try to get a uec image.")
kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
@@ -1807,3 +1814,81 @@
self.client.stacks.delete(stack_identifier)
except heat_exceptions.HTTPNotFound:
pass
+
+
+class SwiftScenarioTest(ScenarioTest):
+ """
+ Provide harness to do Swift scenario tests.
+
+ Subclasses implement the tests that use the methods provided by this
+ class.
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ cls.set_network_resources()
+ super(SwiftScenarioTest, cls).setUpClass()
+ if not CONF.service_available.swift:
+ skip_msg = ("%s skipped as swift is not available" %
+ cls.__name__)
+ raise cls.skipException(skip_msg)
+ # Clients for Swift
+ cls.account_client = cls.manager.account_client
+ cls.container_client = cls.manager.container_client
+ cls.object_client = cls.manager.object_client
+
+ def _get_swift_stat(self):
+ """get swift status for our user account."""
+ self.account_client.list_account_containers()
+ LOG.debug('Swift status information obtained successfully')
+
+ def _create_container(self, container_name=None):
+ name = container_name or data_utils.rand_name(
+ 'swift-scenario-container')
+ self.container_client.create_container(name)
+ # look for the container to assure it is created
+ self._list_and_check_container_objects(name)
+ LOG.debug('Container %s created' % (name))
+ return name
+
+ def _delete_container(self, container_name):
+ self.container_client.delete_container(container_name)
+ LOG.debug('Container %s deleted' % (container_name))
+
+ def _upload_object_to_container(self, container_name, obj_name=None):
+ obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
+ obj_data = data_utils.arbitrary_string()
+ self.object_client.create_object(container_name, obj_name, obj_data)
+ return obj_name, obj_data
+
+ def _delete_object(self, container_name, filename):
+ self.object_client.delete_object(container_name, filename)
+ self._list_and_check_container_objects(container_name,
+ not_present_obj=[filename])
+
+ def _list_and_check_container_objects(self, container_name, present_obj=[],
+ not_present_obj=[]):
+ """
+ List objects for a given container and assert which are present and
+ which are not.
+ """
+ _, object_list = self.container_client.list_container_contents(
+ container_name)
+ if present_obj:
+ for obj in present_obj:
+ self.assertIn(obj, object_list)
+ if not_present_obj:
+ for obj in not_present_obj:
+ self.assertNotIn(obj, object_list)
+
+ def _change_container_acl(self, container_name, acl):
+ metadata_param = {'metadata_prefix': 'x-container-',
+ 'metadata': {'read': acl}}
+ self.container_client.update_container_metadata(container_name,
+ **metadata_param)
+ resp, _ = self.container_client.list_container_metadata(container_name)
+ self.assertEqual(resp['x-container-read'], acl)
+
+ def _download_and_verify(self, container_name, obj_name, expected_data):
+ _, obj = self.object_client.get_object(container_name, obj_name)
+ self.assertEqual(obj, expected_data)
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 7316674..4bc4a98 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -76,9 +76,9 @@
self.assertEqual(self.volume, volume)
def nova_volume_attach(self):
- # TODO(andreaf) Device should be here CONF.compute.volume_device_name
+ volume_device_path = '/dev/' + CONF.compute.volume_device_name
_, volume_attachment = self.servers_client.attach_volume(
- self.server['id'], self.volume['id'], '/dev/vdb')
+ self.server['id'], self.volume['id'], volume_device_path)
volume = volume_attachment['volumeAttachment']
self.assertEqual(self.volume['id'], volume['id'])
self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
@@ -113,7 +113,7 @@
def check_partitions(self):
# NOTE(andreaf) The device name may be different on different guest OS
partitions = self.linux_client.get_partitions()
- self.assertEqual(1, partitions.count('vdb'))
+ self.assertEqual(1, partitions.count(CONF.compute.volume_device_name))
def nova_volume_detach(self):
self.servers_client.detach_volume(self.server['id'], self.volume['id'])
diff --git a/tempest/scenario/test_swift_basic_ops.py b/tempest/scenario/test_swift_basic_ops.py
index 3fa6d2c..ad74ec4 100644
--- a/tempest/scenario/test_swift_basic_ops.py
+++ b/tempest/scenario/test_swift_basic_ops.py
@@ -14,7 +14,6 @@
# under the License.
from tempest.common import http
-from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
@@ -25,9 +24,9 @@
LOG = logging.getLogger(__name__)
-class TestSwiftBasicOps(manager.ScenarioTest):
+class TestSwiftBasicOps(manager.SwiftScenarioTest):
"""
- Test swift with the follow operations:
+ Test swift basic ops.
* get swift stat.
* create container.
* upload a file to the created container.
@@ -40,75 +39,6 @@
* change ACL of the container and make sure it works successfully
"""
- @classmethod
- def setUpClass(cls):
- cls.set_network_resources()
- super(TestSwiftBasicOps, cls).setUpClass()
- if not CONF.service_available.swift:
- skip_msg = ("%s skipped as swift is not available" %
- cls.__name__)
- raise cls.skipException(skip_msg)
- # Clients for Swift
- cls.account_client = cls.manager.account_client
- cls.container_client = cls.manager.container_client
- cls.object_client = cls.manager.object_client
-
- def _get_swift_stat(self):
- """get swift status for our user account."""
- self.account_client.list_account_containers()
- LOG.debug('Swift status information obtained successfully')
-
- def _create_container(self, container_name=None):
- name = container_name or data_utils.rand_name(
- 'swift-scenario-container')
- self.container_client.create_container(name)
- # look for the container to assure it is created
- self._list_and_check_container_objects(name)
- LOG.debug('Container %s created' % (name))
- return name
-
- def _delete_container(self, container_name):
- self.container_client.delete_container(container_name)
- LOG.debug('Container %s deleted' % (container_name))
-
- def _upload_object_to_container(self, container_name, obj_name=None):
- obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
- obj_data = data_utils.arbitrary_string()
- self.object_client.create_object(container_name, obj_name, obj_data)
- return obj_name, obj_data
-
- def _delete_object(self, container_name, filename):
- self.object_client.delete_object(container_name, filename)
- self._list_and_check_container_objects(container_name,
- not_present_obj=[filename])
-
- def _list_and_check_container_objects(self, container_name, present_obj=[],
- not_present_obj=[]):
- """
- List objects for a given container and assert which are present and
- which are not.
- """
- _, object_list = self.container_client.list_container_contents(
- container_name)
- if present_obj:
- for obj in present_obj:
- self.assertIn(obj, object_list)
- if not_present_obj:
- for obj in not_present_obj:
- self.assertNotIn(obj, object_list)
-
- def _change_container_acl(self, container_name, acl):
- metadata_param = {'metadata_prefix': 'x-container-',
- 'metadata': {'read': acl}}
- self.container_client.update_container_metadata(container_name,
- **metadata_param)
- resp, _ = self.container_client.list_container_metadata(container_name)
- self.assertEqual(resp['x-container-read'], acl)
-
- def _download_and_verify(self, container_name, obj_name, expected_data):
- _, obj = self.object_client.get_object(container_name, obj_name)
- self.assertEqual(obj, expected_data)
-
@test.services('object_storage')
def test_swift_basic_ops(self):
self._get_swift_stat()
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index bf5d1f6..ec8575a 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from cinderclient import exceptions as cinder_exc
-
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log
@@ -23,7 +21,7 @@
LOG = log.getLogger(__name__)
-class TestVolumeBootPattern(manager.OfficialClientTest):
+class TestVolumeBootPattern(manager.ScenarioTest):
"""
This test case attempts to reproduce the following steps:
@@ -54,28 +52,32 @@
# dev_name=id:type:size:delete_on_terminate
# where type needs to be "snap" if the server is booted
# from a snapshot, size instead can be safely left empty
- bd_map = {
- 'vda': vol_id + ':::0'
- }
- security_groups = [self.security_group.name]
+ bd_map = [{
+ 'device_name': 'vda',
+ 'volume_id': vol_id,
+ 'delete_on_termination': '0'}]
+ self.security_group = self._create_security_group_nova()
+ security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'block_device_mapping': bd_map,
- 'key_name': keypair.name,
+ 'key_name': keypair['name'],
'security_groups': security_groups
}
return self.create_server(image='', create_kwargs=create_kwargs)
def _create_snapshot_from_volume(self, vol_id):
- volume_snapshots = self.volume_client.volume_snapshots
snap_name = data_utils.rand_name('snapshot')
- snap = volume_snapshots.create(volume_id=vol_id,
- force=True,
- display_name=snap_name)
- self.addCleanup_with_wait(self.volume_client.volume_snapshots, snap.id,
- exc_type=cinder_exc.NotFound)
- self.status_timeout(volume_snapshots,
- snap.id,
- 'available')
+ _, snap = self.snapshots_client.create_snapshot(
+ volume_id=vol_id,
+ force=True,
+ display_name=snap_name)
+ self.addCleanup_with_wait(
+ waiter_callable=self.snapshots_client.wait_for_resource_deletion,
+ thing_id=snap['id'], thing_id_param='id',
+ cleanup_callable=self.delete_wrapper,
+ cleanup_args=[self.snapshots_client.delete_snapshot, snap['id']])
+ self.snapshots_client.wait_for_snapshot_status(snap['id'], 'available')
+ self.assertEqual(snap_name, snap['display_name'])
return snap
def _create_volume_from_snapshot(self, snap_id):
@@ -85,27 +87,26 @@
def _stop_instances(self, instances):
# NOTE(gfidente): two loops so we do not wait for the status twice
for i in instances:
- self.compute_client.servers.stop(i)
+ self.servers_client.stop(i['id'])
for i in instances:
- self.status_timeout(self.compute_client.servers,
- i.id,
- 'SHUTOFF')
+ self.servers_client.wait_for_server_status(i['id'], 'SHUTOFF')
def _detach_volumes(self, volumes):
# NOTE(gfidente): two loops so we do not wait for the status twice
for v in volumes:
- self.volume_client.volumes.detach(v)
+ self.volumes_client.detach_volume(v['id'])
for v in volumes:
- self.status_timeout(self.volume_client.volumes,
- v.id,
- 'available')
+ self.volumes_client.wait_for_volume_status(v['id'], 'available')
def _ssh_to_server(self, server, keypair):
if CONF.compute.use_floatingip_for_ssh:
- floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(self.delete_wrapper, floating_ip)
- server.add_floating_ip(floating_ip)
- ip = floating_ip.ip
+ _, floating_ip = self.floating_ips_client.create_floating_ip()
+ self.addCleanup(self.delete_wrapper,
+ self.floating_ips_client.delete_floating_ip,
+ floating_ip['id'])
+ self.floating_ips_client.associate_floating_ip_to_server(
+ floating_ip['ip'], server['id'])
+ ip = floating_ip['ip']
else:
network_name_for_ssh = CONF.compute.network_for_ssh
ip = server.networks[network_name_for_ssh][0]
@@ -113,10 +114,10 @@
try:
return self.get_remote_client(
ip,
- private_key=keypair.private_key)
+ private_key=keypair['private_key'])
except Exception:
LOG.exception('ssh to server failed')
- self._log_console_output()
+ self._log_console_output(self)
raise
def _get_content(self, ssh_client):
@@ -129,8 +130,8 @@
return self._get_content(ssh_client)
def _delete_server(self, server):
- self.compute_client.servers.delete(server)
- self.delete_timeout(self.compute_client.servers, server.id)
+ self.servers_client.delete_server(server['id'])
+ self.servers_client.wait_for_server_termination(server['id'])
def _check_content_of_written_file(self, ssh_client, expected):
actual = self._get_content(ssh_client)
@@ -143,7 +144,7 @@
# create an instance from volume
volume_origin = self._create_volume_from_image()
- instance_1st = self._boot_instance_from_volume(volume_origin.id,
+ instance_1st = self._boot_instance_from_volume(volume_origin['id'],
keypair)
# write content to volume on instance
@@ -155,7 +156,7 @@
self._delete_server(instance_1st)
# create a 2nd instance from volume
- instance_2nd = self._boot_instance_from_volume(volume_origin.id,
+ instance_2nd = self._boot_instance_from_volume(volume_origin['id'],
keypair)
# check the content of written file
@@ -164,11 +165,11 @@
self._check_content_of_written_file(ssh_client_for_instance_2nd, text)
# snapshot a volume
- snapshot = self._create_snapshot_from_volume(volume_origin.id)
+ snapshot = self._create_snapshot_from_volume(volume_origin['id'])
# create a 3rd instance from snapshot
- volume = self._create_volume_from_snapshot(snapshot.id)
- instance_from_snapshot = self._boot_instance_from_volume(volume.id,
+ volume = self._create_volume_from_snapshot(snapshot['id'])
+ instance_from_snapshot = self._boot_instance_from_volume(volume['id'],
keypair)
# check the content of written file
@@ -186,10 +187,11 @@
bdms = [{'uuid': vol_id, 'source_type': 'volume',
'destination_type': 'volume', 'boot_index': 0,
'delete_on_termination': False}]
- security_groups = [self.security_group.name]
+ self.security_group = self._create_security_group_nova()
+ security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'block_device_mapping_v2': bdms,
- 'key_name': keypair.name,
+ 'key_name': keypair['name'],
'security_groups': security_groups
}
return self.create_server(image='', create_kwargs=create_kwargs)
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index f44be29..88b68d3 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -58,6 +58,7 @@
disk_config: Determines if user or admin controls disk configuration.
return_reservation_id: Enable/Disable the return of reservation id
block_device_mapping: Block device mapping for the server.
+ block_device_mapping_v2: Block device mapping V2 for the server.
"""
post_body = {
'name': name,
@@ -70,7 +71,8 @@
'availability_zone', 'accessIPv4', 'accessIPv6',
'min_count', 'max_count', ('metadata', 'meta'),
('OS-DCF:diskConfig', 'disk_config'),
- 'return_reservation_id', 'block_device_mapping']:
+ 'return_reservation_id', 'block_device_mapping',
+ 'block_device_mapping_v2']:
if isinstance(option, tuple):
post_param = option[0]
key = option[1]
@@ -80,6 +82,7 @@
value = kwargs.get(key)
if value is not None:
post_body[post_param] = value
+
post_body = {'server': post_body}
if 'sched_hints' in kwargs:
diff --git a/tempest/test.py b/tempest/test.py
index 59da2f9..f34933e 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -70,7 +70,7 @@
"""A decorator used to wrap the setUpClass for cleaning up resources
when setUpClass failed.
"""
-
+ @functools.wraps(f)
def decorator(cls):
try:
f(cls)
@@ -399,25 +399,6 @@
cls.admin_client = os_admin.negative_client
@staticmethod
- def load_schema(file_or_dict):
- """
- Loads a schema from a file_or_dict on a specified location.
-
- :param file_or_dict: just a dict or filename
- """
- # NOTE(mkoderer): we will get rid of this function when all test are
- # ported to dicts
- if isinstance(file_or_dict, dict):
- return file_or_dict
-
- # NOTE(mkoderer): must be extended for xml support
- fn = os.path.join(
- os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
- "etc", "schemas", file_or_dict)
- LOG.debug("Open schema file: %s" % (fn))
- return json.load(open(fn))
-
- @staticmethod
def load_tests(*args):
"""
Wrapper for testscenarios to set the mandatory scenarios variable
@@ -460,7 +441,6 @@
the data is used to generate query strings appended to the url,
otherwise for the body of the http call.
"""
- description = NegativeAutoTest.load_schema(description)
LOG.debug(description)
generator = importutils.import_class(
CONF.negative.test_generator)()
@@ -514,7 +494,6 @@
otherwise for the body of the http call.
"""
- description = NegativeAutoTest.load_schema(description)
LOG.info("Executing %s" % description["name"])
LOG.debug(description)
method = description["http-method"]
@@ -604,8 +583,6 @@
"""
@attr(type=['negative', 'gate'])
def generic_test(self):
- if hasattr(self, '_schema_file'):
- self.execute(self._schema_file)
if hasattr(self, '_schema'):
self.execute(self._schema)
diff --git a/tempest/tests/negative/test_negative_auto_test.py b/tempest/tests/negative/test_negative_auto_test.py
index edff3a8..dddd083 100644
--- a/tempest/tests/negative/test_negative_auto_test.py
+++ b/tempest/tests/negative/test_negative_auto_test.py
@@ -13,10 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
-import mock
-
from tempest import config
import tempest.test as test
from tempest.tests import base
@@ -58,11 +54,9 @@
for entry in entries:
self.assertIsNotNone(entry[1]['resource'])
- @mock.patch('tempest.test.NegativeAutoTest.load_schema')
- def test_generate_scenario(self, open_mock):
- open_mock.return_value = self.fake_input_desc
+ def test_generate_scenario(self):
scenarios = test.NegativeAutoTest.\
- generate_scenario(None)
+ generate_scenario(self.fake_input_desc)
self.assertIsInstance(scenarios, list)
for scenario in scenarios:
@@ -72,13 +66,3 @@
self._check_prop_entries(scenarios, "prop_minRam")
self._check_prop_entries(scenarios, "prop_minDisk")
self._check_resource_entries(scenarios, "inv_res")
-
- def test_load_schema(self):
- json_schema = json.dumps(self.fake_input_desc)
- with mock.patch('tempest.test.open',
- mock.mock_open(read_data=json_schema),
- create=True):
- return_file = test.NegativeAutoTest.load_schema('filename')
- self.assertEqual(return_file, self.fake_input_desc)
- return_dict = test.NegativeAutoTest.load_schema(self.fake_input_desc)
- self.assertEqual(return_file, return_dict)
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index 6b678f7..12104ec 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -237,7 +237,7 @@
class TestSimpleNegativeDecorator(BaseDecoratorsTest):
@test.SimpleNegativeAutoTest
class FakeNegativeJSONTest(test.NegativeAutoTest):
- _schema_file = 'fake/schemas/file.json'
+ _schema = {}
def test_testfunc_exist(self):
self.assertIn("test_fake_negative", dir(self.FakeNegativeJSONTest))
@@ -247,4 +247,4 @@
obj = self.FakeNegativeJSONTest("test_fake_negative")
self.assertIn("test_fake_negative", dir(obj))
obj.test_fake_negative()
- mock.assert_called_once_with(self.FakeNegativeJSONTest._schema_file)
+ mock.assert_called_once_with(self.FakeNegativeJSONTest._schema)