Merge "Updated database clients for bp client-checks-success"
diff --git a/REVIEWING.rst b/REVIEWING.rst
new file mode 100644
index 0000000..d6dc83e
--- /dev/null
+++ b/REVIEWING.rst
@@ -0,0 +1,60 @@
+Reviewing Tempest Code
+======================
+
+To start read the `OpenStack Common Review Checklist
+<https://wiki.openstack.org/wiki/ReviewChecklist#Common_Review_Checklist>`_
+
+
+Ensuring code is executed
+-------------------------
+
+For any new or change to a test it has to be verified in the gate. This means
+that the first thing to check with any change is that a gate job actually runs
+it. Tests which aren't executed either because of configuration or skips should
+not be accepted.
+
+
+Unit Tests
+----------
+
+For any change that adds new functionality to either common functionality or an
+out-of-band tool unit tests are required. This is to ensure we don't introduce
+future regressions and to test conditions which we may not hit in the gate runs.
+Tests, and service clients aren't required to have unit tests since they should
+be self verifying by running them in the gate.
+
+
+API Stability
+-------------
+Tests should only be added for a published stable APIs. If a patch contains
+tests for an API which hasn't been marked as stable or for an API that which
+doesn't conform to the `API stability guidelines
+<https://wiki.openstack.org/wiki/Governance/Approved/APIStability>`_ then it
+should not be approved.
+
+
+Reject Copy and Paste Test Code
+------------------------
+When creating new tests that are similar to existing tests it is tempting to
+simply copy the code and make a few modifications. This increases code size and
+the maintenance burden. Such changes should not be approved if it is easy to
+abstract the duplicated code into a function or method.
+
+
+Being explicit
+--------------
+When tests are being added that depend on a configurable feature or extension,
+polling the API to discover that it is enabled should not be done. This will
+just result in bugs being masked because the test can be skipped automatically.
+Instead the config file should be used to determine whether a test should be
+skipped or not. Do not approve changes that depend on an API call to determine
+whether to skip or not.
+
+
+When to approve
+---------------
+ * Every patch needs two +2s before being approved.
+ * Its ok to hold off on an approval until a subject matter expert reviews it
+ * If a patch has already been approved but requires a trivial rebase to merge,
+ you do not have to wait for a second +2, since the patch has already had
+ two +2s.
diff --git a/doc/source/REVIEWING.rst b/doc/source/REVIEWING.rst
new file mode 120000
index 0000000..841e042
--- /dev/null
+++ b/doc/source/REVIEWING.rst
@@ -0,0 +1 @@
+../../REVIEWING.rst
\ No newline at end of file
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 25bc900..d3118ac 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -9,6 +9,7 @@
overview
HACKING
+ REVIEWING
------------
Field Guides
diff --git a/etc/schemas/compute/admin/flavor_create.json b/etc/schemas/compute/admin/flavor_create.json
deleted file mode 100644
index 0a3e7b3..0000000
--- a/etc/schemas/compute/admin/flavor_create.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
- "name": "flavor-create",
- "http-method": "POST",
- "admin_client": true,
- "url": "flavors",
- "default_result_code": 400,
- "json-schema": {
- "type": "object",
- "properties": {
- "name": { "type": "string"},
- "ram": { "type": "integer", "minimum": 1},
- "vcpus": { "type": "integer", "minimum": 1},
- "disk": { "type": "integer"},
- "id": { "type": "integer"},
- "swap": { "type": "integer"},
- "rxtx_factor": { "type": "integer"},
- "OS-FLV-EXT-DATA:ephemeral": { "type": "integer"}
- }
- }
-}
diff --git a/etc/schemas/compute/servers/get_console_output.json b/etc/schemas/compute/servers/get_console_output.json
deleted file mode 100644
index 8d974ba..0000000
--- a/etc/schemas/compute/servers/get_console_output.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "name": "get-console-output",
- "http-method": "POST",
- "url": "servers/%s/action",
- "resources": [
- {"name":"server", "expected_result": 404}
- ],
- "json-schema": {
- "type": "object",
- "properties": {
- "os-getConsoleOutput": {
- "type": "object",
- "properties": {
- "length": {
- "type": ["integer", "string"],
- "minimum": 0
- }
- }
- }
- },
- "additionalProperties": false
- }
-}
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 247f6d1..ef56ab3 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -933,8 +933,15 @@
# Directory containing image files (string value)
#img_dir=/opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec
-# QCOW2 image file name (string value)
-#qcow2_img_file=cirros-0.3.1-x86_64-disk.img
+# Image file name (string value)
+# Deprecated group/name - [DEFAULT]/qcow2_img_file
+#img_file=cirros-0.3.1-x86_64-disk.img
+
+# Image disk format (string value)
+#img_disk_format=qcow2
+
+# Image container format (string value)
+#img_container_format=bare
# AMI image file name (string value)
#ami_img_file=cirros-0.3.1-x86_64-blank.img
@@ -1003,9 +1010,9 @@
# value)
#trove=false
-# Whether or not Marconi is expected to be available (boolean
+# Whether or not Zaqar is expected to be available (boolean
# value)
-#marconi=false
+#zaqar=false
[stress]
diff --git a/tempest/api/baremetal/admin/test_api_discovery.py b/tempest/api/baremetal/admin/test_api_discovery.py
index 7368b3e..09788f2 100644
--- a/tempest/api/baremetal/admin/test_api_discovery.py
+++ b/tempest/api/baremetal/admin/test_api_discovery.py
@@ -19,10 +19,8 @@
@test.attr(type='smoke')
def test_api_versions(self):
- resp, descr = self.client.get_api_description()
- self.assertEqual('200', resp['status'])
+ _, descr = self.client.get_api_description()
expected_versions = ('v1',)
-
versions = [version['id'] for version in descr['versions']]
for v in expected_versions:
@@ -30,16 +28,13 @@
@test.attr(type='smoke')
def test_default_version(self):
- resp, descr = self.client.get_api_description()
- self.assertEqual('200', resp['status'])
+ _, descr = self.client.get_api_description()
default_version = descr['default_version']
-
self.assertEqual(default_version['id'], 'v1')
@test.attr(type='smoke')
def test_version_1_resources(self):
- resp, descr = self.client.get_version_description(version='v1')
- self.assertEqual('200', resp['status'])
+ _, descr = self.client.get_version_description(version='v1')
expected_resources = ('nodes', 'chassis',
'ports', 'links', 'media_types')
diff --git a/tempest/api/baremetal/admin/test_chassis.py b/tempest/api/baremetal/admin/test_chassis.py
index c306c34..254a969 100644
--- a/tempest/api/baremetal/admin/test_chassis.py
+++ b/tempest/api/baremetal/admin/test_chassis.py
@@ -35,8 +35,7 @@
@test.attr(type='smoke')
def test_create_chassis(self):
descr = data_utils.rand_name('test-chassis-')
- resp, chassis = self.create_chassis(description=descr)
- self.assertEqual('201', resp['status'])
+ _, chassis = self.create_chassis(description=descr)
self.assertEqual(chassis['description'], descr)
@test.attr(type='smoke')
@@ -44,40 +43,35 @@
# Use a unicode string for testing:
# 'We ♡ OpenStack in Ukraine'
descr = u'В Україні ♡ OpenStack!'
- resp, chassis = self.create_chassis(description=descr)
- self.assertEqual('201', resp['status'])
+ _, chassis = self.create_chassis(description=descr)
self.assertEqual(chassis['description'], descr)
@test.attr(type='smoke')
def test_show_chassis(self):
- resp, chassis = self.client.show_chassis(self.chassis['uuid'])
- self.assertEqual('200', resp['status'])
+ _, chassis = self.client.show_chassis(self.chassis['uuid'])
self._assertExpected(self.chassis, chassis)
@test.attr(type="smoke")
def test_list_chassis(self):
- resp, body = self.client.list_chassis()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_chassis()
self.assertIn(self.chassis['uuid'],
[i['uuid'] for i in body['chassis']])
@test.attr(type='smoke')
def test_delete_chassis(self):
- resp, body = self.create_chassis()
+ _, body = self.create_chassis()
uuid = body['uuid']
- resp = self.delete_chassis(uuid)
- self.assertEqual('204', resp['status'])
+ self.delete_chassis(uuid)
self.assertRaises(exc.NotFound, self.client.show_chassis, uuid)
@test.attr(type='smoke')
def test_update_chassis(self):
- resp, body = self.create_chassis()
+ _, body = self.create_chassis()
uuid = body['uuid']
new_description = data_utils.rand_name('new-description-')
- resp, body = (self.client.update_chassis(uuid,
- description=new_description))
- self.assertEqual('200', resp['status'])
- resp, chassis = self.client.show_chassis(uuid)
+ _, body = (self.client.update_chassis(uuid,
+ description=new_description))
+ _, chassis = self.client.show_chassis(uuid)
self.assertEqual(chassis['description'], new_description)
diff --git a/tempest/api/baremetal/admin/test_drivers.py b/tempest/api/baremetal/admin/test_drivers.py
index 649886b..9e215dc 100644
--- a/tempest/api/baremetal/admin/test_drivers.py
+++ b/tempest/api/baremetal/admin/test_drivers.py
@@ -29,13 +29,11 @@
@test.attr(type="smoke")
def test_list_drivers(self):
- resp, drivers = self.client.list_drivers()
- self.assertEqual('200', resp['status'])
+ _, drivers = self.client.list_drivers()
self.assertIn(self.driver_name,
[d['name'] for d in drivers['drivers']])
@test.attr(type="smoke")
def test_show_driver(self):
- resp, driver = self.client.show_driver(self.driver_name)
- self.assertEqual('200', resp['status'])
+ _, driver = self.client.show_driver(self.driver_name)
self.assertEqual(self.driver_name, driver['name'])
diff --git a/tempest/api/baremetal/admin/test_nodes.py b/tempest/api/baremetal/admin/test_nodes.py
index fc67854..43ea1e6 100644
--- a/tempest/api/baremetal/admin/test_nodes.py
+++ b/tempest/api/baremetal/admin/test_nodes.py
@@ -40,30 +40,25 @@
'storage': '10240',
'memory': '1024'}
- resp, body = self.create_node(self.chassis['uuid'], **params)
- self.assertEqual('201', resp['status'])
+ _, body = self.create_node(self.chassis['uuid'], **params)
self._assertExpected(params, body['properties'])
@test.attr(type='smoke')
def test_delete_node(self):
- resp, node = self.create_node(self.chassis['uuid'])
- self.assertEqual('201', resp['status'])
+ _, node = self.create_node(self.chassis['uuid'])
- resp = self.delete_node(node['uuid'])
+ self.delete_node(node['uuid'])
- self.assertEqual(resp['status'], '204')
self.assertRaises(exc.NotFound, self.client.show_node, node['uuid'])
@test.attr(type='smoke')
def test_show_node(self):
- resp, loaded_node = self.client.show_node(self.node['uuid'])
- self.assertEqual('200', resp['status'])
+ _, loaded_node = self.client.show_node(self.node['uuid'])
self._assertExpected(self.node, loaded_node)
@test.attr(type='smoke')
def test_list_nodes(self):
- resp, body = self.client.list_nodes()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_nodes()
self.assertIn(self.node['uuid'],
[i['uuid'] for i in body['nodes']])
@@ -74,24 +69,20 @@
'storage': '10',
'memory': '128'}
- resp, node = self.create_node(self.chassis['uuid'], **props)
- self.assertEqual('201', resp['status'])
+ _, node = self.create_node(self.chassis['uuid'], **props)
new_p = {'cpu_arch': 'x86',
'cpu_num': '1',
'storage': '10000',
'memory': '12300'}
- resp, body = self.client.update_node(node['uuid'], properties=new_p)
- self.assertEqual('200', resp['status'])
- resp, node = self.client.show_node(node['uuid'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.update_node(node['uuid'], properties=new_p)
+ _, node = self.client.show_node(node['uuid'])
self._assertExpected(new_p, node['properties'])
@test.attr(type='smoke')
def test_validate_driver_interface(self):
- resp, body = self.client.validate_driver_interface(self.node['uuid'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.validate_driver_interface(self.node['uuid'])
core_interfaces = ['power', 'deploy']
for interface in core_interfaces:
self.assertIn(interface, body)
diff --git a/tempest/api/baremetal/admin/test_nodestates.py b/tempest/api/baremetal/admin/test_nodestates.py
index f24f490..76f47f9 100644
--- a/tempest/api/baremetal/admin/test_nodestates.py
+++ b/tempest/api/baremetal/admin/test_nodestates.py
@@ -24,8 +24,8 @@
@classmethod
def setUpClass(cls):
super(TestNodeStates, cls).setUpClass()
- resp, cls.chassis = cls.create_chassis()
- resp, cls.node = cls.create_node(cls.chassis['uuid'])
+ _, cls.chassis = cls.create_chassis()
+ _, cls.node = cls.create_node(cls.chassis['uuid'])
def _validate_power_state(self, node_uuid, power_state):
# Validate that power state is set within timeout
@@ -34,8 +34,7 @@
start = timeutils.utcnow()
while timeutils.delta_seconds(
start, timeutils.utcnow()) < self.power_timeout:
- resp, node = self.client.show_node(node_uuid)
- self.assertEqual(200, resp.status)
+ _, node = self.client.show_node(node_uuid)
if node['power_state'] == power_state:
return
message = ('Failed to set power state within '
@@ -44,20 +43,16 @@
@test.attr(type='smoke')
def test_list_nodestates(self):
- resp, nodestates = self.client.list_nodestates(self.node['uuid'])
- self.assertEqual('200', resp['status'])
+ _, nodestates = self.client.list_nodestates(self.node['uuid'])
for key in nodestates:
self.assertEqual(nodestates[key], self.node[key])
@test.attr(type='smoke')
def test_set_node_power_state(self):
- resp, node = self.create_node(self.chassis['uuid'])
- self.assertEqual('201', resp['status'])
+ _, node = self.create_node(self.chassis['uuid'])
states = ["power on", "rebooting", "power off"]
for state in states:
# Set power state
- resp, _ = self.client.set_node_power_state(node['uuid'],
- state)
- self.assertEqual('202', resp['status'])
+ self.client.set_node_power_state(node['uuid'], state)
# Check power state after state is set
self._validate_power_state(node['uuid'], state)
diff --git a/tempest/api/baremetal/admin/test_ports.py b/tempest/api/baremetal/admin/test_ports.py
index d4adba9..b3f9b7f 100644
--- a/tempest/api/baremetal/admin/test_ports.py
+++ b/tempest/api/baremetal/admin/test_ports.py
@@ -39,12 +39,10 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- resp, port = self.create_port(node_id=node_id, address=address)
- self.assertEqual(201, resp.status)
+ _, port = self.create_port(node_id=node_id, address=address)
- resp, body = self.client.show_port(port['uuid'])
+ _, body = self.client.show_port(port['uuid'])
- self.assertEqual(200, resp.status)
self._assertExpected(port, body)
@test.attr(type='smoke')
@@ -53,12 +51,10 @@
address = data_utils.rand_mac_address()
uuid = data_utils.rand_uuid()
- resp, port = self.create_port(node_id=node_id,
- address=address, uuid=uuid)
- self.assertEqual(201, resp.status)
+ _, port = self.create_port(node_id=node_id,
+ address=address, uuid=uuid)
- resp, body = self.client.show_port(uuid)
- self.assertEqual(200, resp.status)
+ _, body = self.client.show_port(uuid)
self._assertExpected(port, body)
@test.attr(type='smoke')
@@ -67,44 +63,37 @@
address = data_utils.rand_mac_address()
extra = {'key': 'value'}
- resp, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
- self.assertEqual(201, resp.status)
+ _, port = self.create_port(node_id=node_id, address=address,
+ extra=extra)
- resp, body = self.client.show_port(port['uuid'])
- self.assertEqual(200, resp.status)
+ _, body = self.client.show_port(port['uuid'])
self._assertExpected(port, body)
@test.attr(type='smoke')
def test_delete_port(self):
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- resp, port = self.create_port(node_id=node_id, address=address)
- self.assertEqual(201, resp.status)
+ _, port = self.create_port(node_id=node_id, address=address)
- resp = self.delete_port(port['uuid'])
+ self.delete_port(port['uuid'])
- self.assertEqual(204, resp.status)
self.assertRaises(exc.NotFound, self.client.show_port, port['uuid'])
@test.attr(type='smoke')
def test_show_port(self):
- resp, port = self.client.show_port(self.port['uuid'])
- self.assertEqual(200, resp.status)
+ _, port = self.client.show_port(self.port['uuid'])
self._assertExpected(self.port, port)
@test.attr(type='smoke')
def test_show_port_with_links(self):
- resp, port = self.client.show_port(self.port['uuid'])
- self.assertEqual(200, resp.status)
+ _, port = self.client.show_port(self.port['uuid'])
self.assertIn('links', port.keys())
self.assertEqual(2, len(port['links']))
self.assertIn(port['uuid'], port['links'][0]['href'])
@test.attr(type='smoke')
def test_list_ports(self):
- resp, body = self.client.list_ports()
- self.assertEqual(200, resp.status)
+ _, body = self.client.list_ports()
self.assertIn(self.port['uuid'],
[i['uuid'] for i in body['ports']])
# Verify self links.
@@ -114,8 +103,7 @@
@test.attr(type='smoke')
def test_list_with_limit(self):
- resp, body = self.client.list_ports(limit=3)
- self.assertEqual(200, resp.status)
+ _, body = self.client.list_ports(limit=3)
next_marker = body['ports'][-1]['uuid']
self.assertIn(next_marker, body['next'])
@@ -128,8 +116,7 @@
address=data_utils.rand_mac_address())
[1]['uuid'] for i in range(0, 5)]
- resp, body = self.client.list_ports_detail()
- self.assertEqual(200, resp.status)
+ _, body = self.client.list_ports_detail()
ports_dict = dict((port['uuid'], port) for port in body['ports']
if port['uuid'] in uuids)
@@ -153,8 +140,7 @@
self.create_port(node_id=node_id,
address=data_utils.rand_mac_address())
- resp, body = self.client.list_ports_detail(address=address)
- self.assertEqual(200, resp.status)
+ _, body = self.client.list_ports_detail(address=address)
self.assertEqual(1, len(body['ports']))
self.assertEqual(address, body['ports'][0]['address'])
@@ -164,9 +150,8 @@
address = data_utils.rand_mac_address()
extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
- resp, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
- self.assertEqual(201, resp.status)
+ _, port = self.create_port(node_id=node_id, address=address,
+ extra=extra)
new_address = data_utils.rand_mac_address()
new_extra = {'key1': 'new-value1', 'key2': 'new-value2',
@@ -185,11 +170,9 @@
'op': 'replace',
'value': new_extra['key3']}]
- resp, _ = self.client.update_port(port['uuid'], patch)
- self.assertEqual(200, resp.status)
+ self.client.update_port(port['uuid'], patch)
- resp, body = self.client.show_port(port['uuid'])
- self.assertEqual(200, resp.status)
+ _, body = self.client.show_port(port['uuid'])
self.assertEqual(new_address, body['address'])
self.assertEqual(new_extra, body['extra'])
@@ -199,26 +182,21 @@
address = data_utils.rand_mac_address()
extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
- resp, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
- self.assertEqual(201, resp.status)
+ _, port = self.create_port(node_id=node_id, address=address,
+ extra=extra)
# Removing one item from the collection
- resp, _ = self.client.update_port(port['uuid'],
- [{'path': '/extra/key2',
- 'op': 'remove'}])
- self.assertEqual(200, resp.status)
+ self.client.update_port(port['uuid'],
+ [{'path': '/extra/key2',
+ 'op': 'remove'}])
extra.pop('key2')
- resp, body = self.client.show_port(port['uuid'])
- self.assertEqual(200, resp.status)
+ _, body = self.client.show_port(port['uuid'])
self.assertEqual(extra, body['extra'])
# Removing the collection
- resp, _ = self.client.update_port(port['uuid'], [{'path': '/extra',
- 'op': 'remove'}])
- self.assertEqual(200, resp.status)
- resp, body = self.client.show_port(port['uuid'])
- self.assertEqual(200, resp.status)
+ self.client.update_port(port['uuid'], [{'path': '/extra',
+ 'op': 'remove'}])
+ _, body = self.client.show_port(port['uuid'])
self.assertEqual({}, body['extra'])
# Assert nothing else was changed
@@ -230,8 +208,7 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- resp, port = self.create_port(node_id=node_id, address=address)
- self.assertEqual(201, resp.status)
+ _, port = self.create_port(node_id=node_id, address=address)
extra = {'key1': 'value1', 'key2': 'value2'}
@@ -242,11 +219,9 @@
'op': 'add',
'value': extra['key2']}]
- resp, _ = self.client.update_port(port['uuid'], patch)
- self.assertEqual(200, resp.status)
+ self.client.update_port(port['uuid'], patch)
- resp, body = self.client.show_port(port['uuid'])
- self.assertEqual(200, resp.status)
+ _, body = self.client.show_port(port['uuid'])
self.assertEqual(extra, body['extra'])
@test.attr(type='smoke')
@@ -255,9 +230,8 @@
address = data_utils.rand_mac_address()
extra = {'key1': 'value1', 'key2': 'value2'}
- resp, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
- self.assertEqual(201, resp.status)
+ _, port = self.create_port(node_id=node_id, address=address,
+ extra=extra)
new_address = data_utils.rand_mac_address()
new_extra = {'key1': 'new-value1', 'key3': 'new-value3'}
@@ -274,10 +248,8 @@
'op': 'add',
'value': new_extra['key3']}]
- resp, _ = self.client.update_port(port['uuid'], patch)
- self.assertEqual(200, resp.status)
+ self.client.update_port(port['uuid'], patch)
- resp, body = self.client.show_port(port['uuid'])
- self.assertEqual(200, resp.status)
+ _, body = self.client.show_port(port['uuid'])
self.assertEqual(new_address, body['address'])
self.assertEqual(new_extra, body['extra'])
diff --git a/tempest/api/baremetal/admin/test_ports_negative.py b/tempest/api/baremetal/admin/test_ports_negative.py
index 7646677..ead3799 100644
--- a/tempest/api/baremetal/admin/test_ports_negative.py
+++ b/tempest/api/baremetal/admin/test_ports_negative.py
@@ -22,11 +22,8 @@
def setUp(self):
super(TestPortsNegative, self).setUp()
- resp, self.chassis = self.create_chassis()
- self.assertEqual('201', resp['status'])
-
- resp, self.node = self.create_node(self.chassis['uuid'])
- self.assertEqual('201', resp['status'])
+ _, self.chassis = self.create_chassis()
+ _, self.node = self.create_node(self.chassis['uuid'])
@test.attr(type=['negative', 'smoke'])
def test_create_port_malformed_mac(self):
@@ -137,13 +134,11 @@
address = data_utils.rand_mac_address()
extra = {'key': 'value'}
- resp, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
- self.assertEqual('201', resp['status'])
+ _, port = self.create_port(node_id=node_id, address=address,
+ extra=extra)
port_id = port['uuid']
- resp, body = self.client.delete_port(port_id)
- self.assertEqual('204', resp['status'])
+ _, body = self.client.delete_port(port_id)
patch = [{'path': '/extra/key',
'op': 'replace',
@@ -169,8 +164,7 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- resp, port = self.create_port(node_id=node_id, address=address)
- self.assertEqual('201', resp['status'])
+ _, port = self.create_port(node_id=node_id, address=address)
port_id = port['uuid']
self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
@@ -182,8 +176,7 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- resp, port = self.create_port(node_id=node_id, address=address)
- self.assertEqual('201', resp['status'])
+ _, port = self.create_port(node_id=node_id, address=address)
port_id = port['uuid']
self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
@@ -196,8 +189,7 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- resp, port = self.create_port(node_id=node_id, address=address)
- self.assertEqual('201', resp['status'])
+ _, port = self.create_port(node_id=node_id, address=address)
port_id = port['uuid']
self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
@@ -209,8 +201,7 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- resp, port = self.create_port(node_id=node_id, address=address)
- self.assertEqual('201', resp['status'])
+ _, port = self.create_port(node_id=node_id, address=address)
port_id = port['uuid']
patch = [{'path': '/node_uuid',
@@ -225,11 +216,9 @@
address1 = data_utils.rand_mac_address()
address2 = data_utils.rand_mac_address()
- resp, port1 = self.create_port(node_id=node_id, address=address1)
- self.assertEqual('201', resp['status'])
+ _, port1 = self.create_port(node_id=node_id, address=address1)
- resp, port2 = self.create_port(node_id=node_id, address=address2)
- self.assertEqual('201', resp['status'])
+ _, port2 = self.create_port(node_id=node_id, address=address2)
port_id = port2['uuid']
patch = [{'path': '/address',
@@ -243,8 +232,7 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- resp, port = self.create_port(node_id=node_id, address=address)
- self.assertEqual('201', resp['status'])
+ _, port = self.create_port(node_id=node_id, address=address)
port_id = port['uuid']
patch = [{'path': '/node_uuid',
@@ -258,8 +246,7 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- resp, port = self.create_port(node_id=node_id, address=address)
- self.assertEqual('201', resp['status'])
+ _, port = self.create_port(node_id=node_id, address=address)
port_id = port['uuid']
patch = [{'path': '/address',
@@ -275,9 +262,8 @@
address = data_utils.rand_mac_address()
extra = {'key': 'value'}
- resp, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
- self.assertEqual('201', resp['status'])
+ _, port = self.create_port(node_id=node_id, address=address,
+ extra=extra)
port_id = port['uuid']
patch = [{'path': '/extra/key',
@@ -291,8 +277,7 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- resp, port = self.create_port(node_id=node_id, address=address)
- self.assertEqual('201', resp['status'])
+ _, port = self.create_port(node_id=node_id, address=address)
port_id = port['uuid']
patch = [{'path': '/extra',
@@ -307,8 +292,7 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- resp, port = self.create_port(node_id=node_id, address=address)
- self.assertEqual('201', resp['status'])
+ _, port = self.create_port(node_id=node_id, address=address)
port_id = port['uuid']
patch = [{'path': '/nonexistent', ' op': 'replace', 'value': 'value'}]
@@ -321,8 +305,7 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- resp, port = self.create_port(node_id=node_id, address=address)
- self.assertEqual('201', resp['status'])
+ _, port = self.create_port(node_id=node_id, address=address)
port_id = port['uuid']
self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
@@ -333,8 +316,7 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- resp, port = self.create_port(node_id=node_id, address=address)
- self.assertEqual('201', resp['status'])
+ _, port = self.create_port(node_id=node_id, address=address)
port_id = port['uuid']
self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
@@ -345,8 +327,7 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- resp, port = self.create_port(node_id=node_id, address=address)
- self.assertEqual('201', resp['status'])
+ _, port = self.create_port(node_id=node_id, address=address)
port_id = port['uuid']
self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
@@ -366,9 +347,8 @@
address = data_utils.rand_mac_address()
extra = {'key1': 'value1', 'key2': 'value2'}
- resp, port = self.create_port(node_id=node_id, address=address,
- extra=extra)
- self.assertEqual('201', resp['status'])
+ _, port = self.create_port(node_id=node_id, address=address,
+ extra=extra)
port_id = port['uuid']
new_address = data_utils.rand_mac_address()
@@ -393,7 +373,6 @@
patch)
# patch should not be applied
- resp, body = self.client.show_port(port_id)
- self.assertEqual(200, resp.status)
+ _, body = self.client.show_port(port_id)
self.assertEqual(address, body['address'])
self.assertEqual(extra, body['extra'])
diff --git a/tempest/api/compute/admin/test_flavors_negative.py b/tempest/api/compute/admin/test_flavors_negative.py
index 9e4412f..eece096 100644
--- a/tempest/api/compute/admin/test_flavors_negative.py
+++ b/tempest/api/compute/admin/test_flavors_negative.py
@@ -16,6 +16,7 @@
import uuid
from tempest.api.compute import base
+from tempest.api_schema.request.compute.v2 import flavors
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
@@ -106,4 +107,4 @@
test.NegativeAutoTest):
_interface = 'json'
_service = 'compute'
- _schema_file = 'compute/admin/flavor_create.json'
+ _schema = flavors.flavor_create
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index f684a5a..fd6df3e 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -259,6 +259,7 @@
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting not available, backup not possible.')
@test.attr(type='gate')
+ @test.services('image')
def test_create_backup(self):
# Positive test:create backup successfully and rotate backups correctly
# create the first and the second backup
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index ab98d88..b737888 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -54,7 +54,6 @@
# Server for positive tests
resp, server = cls.create_test_server(wait_until='BUILD')
- resp, resc_server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
cls.password = server['adminPass']
cls.servers_client.wait_for_server_status(cls.server_id, 'ACTIVE')
diff --git a/tempest/api/compute/servers/test_servers_negative_new.py b/tempest/api/compute/servers/test_servers_negative_new.py
index 43ddb3a..c5f9fdd 100644
--- a/tempest/api/compute/servers/test_servers_negative_new.py
+++ b/tempest/api/compute/servers/test_servers_negative_new.py
@@ -15,6 +15,7 @@
from tempest.api.compute import base
+from tempest.api_schema.request.compute.v2 import servers
from tempest import test
@@ -25,7 +26,7 @@
class GetConsoleOutputNegativeTestJSON(base.BaseV2ComputeTest,
test.NegativeAutoTest):
_service = 'compute'
- _schema_file = 'compute/servers/get_console_output.json'
+ _schema = servers.get_console_output
@classmethod
def setUpClass(cls):
diff --git a/tempest/api/database/flavors/test_flavors.py b/tempest/api/database/flavors/test_flavors.py
index 999e8ca..a5c8caa 100644
--- a/tempest/api/database/flavors/test_flavors.py
+++ b/tempest/api/database/flavors/test_flavors.py
@@ -52,6 +52,7 @@
self.assertNotIn(name, db_flavor)
@test.attr(type='smoke')
+ @test.services('compute')
def test_compare_db_flavors_with_os(self):
_, db_flavors = self.client.list_db_flavors()
_, os_flavors = self.os_flavors_client.list_flavors_with_detail()
diff --git a/tempest/api/network/admin/test_agent_management.py b/tempest/api/network/admin/test_agent_management.py
index b848994..f8782ad 100644
--- a/tempest/api/network/admin/test_agent_management.py
+++ b/tempest/api/network/admin/test_agent_management.py
@@ -32,8 +32,7 @@
@test.attr(type='smoke')
def test_list_agent(self):
- resp, body = self.admin_client.list_agents()
- self.assertEqual('200', resp['status'])
+ _, body = self.admin_client.list_agents()
agents = body['agents']
# Hearthbeats must be excluded from comparison
self.agent.pop('heartbeat_timestamp', None)
@@ -45,15 +44,13 @@
@test.attr(type=['smoke'])
def test_list_agents_non_admin(self):
- resp, body = self.client.list_agents()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_agents()
self.assertEqual(len(body["agents"]), 0)
@test.attr(type='smoke')
def test_show_agent(self):
- resp, body = self.admin_client.show_agent(self.agent['id'])
+ _, body = self.admin_client.show_agent(self.agent['id'])
agent = body['agent']
- self.assertEqual('200', resp['status'])
self.assertEqual(agent['id'], self.agent['id'])
@test.attr(type='smoke')
@@ -62,10 +59,9 @@
# Try to update the 'admin_state_up' to the original
# one to avoid the negative effect.
agent_status = {'admin_state_up': origin_status}
- resp, body = self.admin_client.update_agent(agent_id=self.agent['id'],
- agent_info=agent_status)
+ _, body = self.admin_client.update_agent(agent_id=self.agent['id'],
+ agent_info=agent_status)
updated_status = body['agent']['admin_state_up']
- self.assertEqual('200', resp['status'])
self.assertEqual(origin_status, updated_status)
@test.attr(type='smoke')
@@ -73,10 +69,8 @@
self.useFixture(fixtures.LockFixture('agent_description'))
description = 'description for update agent.'
agent_description = {'description': description}
- resp, body = self.admin_client.update_agent(
- agent_id=self.agent['id'],
- agent_info=agent_description)
- self.assertEqual('200', resp['status'])
+ _, body = self.admin_client.update_agent(agent_id=self.agent['id'],
+ agent_info=agent_description)
self.addCleanup(self._restore_agent)
updated_description = body['agent']['description']
self.assertEqual(updated_description, description)
diff --git a/tempest/api/network/admin/test_dhcp_agent_scheduler.py b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
index 25e1cc0..c84d1a7 100644
--- a/tempest/api/network/admin/test_dhcp_agent_scheduler.py
+++ b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
@@ -35,9 +35,8 @@
@test.attr(type='smoke')
def test_list_dhcp_agent_hosting_network(self):
- resp, body = self.admin_client.list_dhcp_agent_hosting_network(
+ _, body = self.admin_client.list_dhcp_agent_hosting_network(
self.network['id'])
- self.assertEqual(resp['status'], '200')
@test.attr(type='smoke')
def test_list_networks_hosted_by_one_dhcp(self):
@@ -51,9 +50,8 @@
def _check_network_in_dhcp_agent(self, network_id, agent):
network_ids = []
- resp, body = self.admin_client.list_networks_hosted_by_one_dhcp_agent(
+ _, body = self.admin_client.list_networks_hosted_by_one_dhcp_agent(
agent['id'])
- self.assertEqual(resp['status'], '200')
networks = body['networks']
for network in networks:
network_ids.append(network['id'])
@@ -85,17 +83,15 @@
self._remove_network_from_dhcp_agent(network_id, agent)
def _remove_network_from_dhcp_agent(self, network_id, agent):
- resp, body = self.admin_client.remove_network_from_dhcp_agent(
+ _, body = self.admin_client.remove_network_from_dhcp_agent(
agent_id=agent['id'],
network_id=network_id)
- self.assertEqual(resp['status'], '204')
self.assertFalse(self._check_network_in_dhcp_agent(
network_id, agent))
def _add_dhcp_agent_to_network(self, network_id, agent):
- resp, body = self.admin_client.add_dhcp_agent_to_network(
- agent['id'], network_id)
- self.assertEqual(resp['status'], '201')
+ _, body = self.admin_client.add_dhcp_agent_to_network(agent['id'],
+ network_id)
self.assertTrue(self._check_network_in_dhcp_agent(
network_id, agent))
diff --git a/tempest/api/network/admin/test_external_network_extension.py b/tempest/api/network/admin/test_external_network_extension.py
index c7fde77..710c669 100644
--- a/tempest/api/network/admin/test_external_network_extension.py
+++ b/tempest/api/network/admin/test_external_network_extension.py
@@ -26,9 +26,8 @@
post_body = {'name': data_utils.rand_name('network-')}
if external:
post_body['router:external'] = external
- resp, body = self.admin_client.create_network(**post_body)
+ _, body = self.admin_client.create_network(**post_body)
network = body['network']
- self.assertEqual('201', resp['status'])
self.addCleanup(self.admin_client.delete_network, network['id'])
return network
@@ -46,9 +45,8 @@
network = self._create_network(external=False)
self.assertFalse(network.get('router:external', False))
update_body = {'router:external': True}
- resp, body = self.admin_client.update_network(network['id'],
- **update_body)
- self.assertEqual('200', resp['status'])
+ _, body = self.admin_client.update_network(network['id'],
+ **update_body)
updated_network = body['network']
# Verify that router:external parameter was updated
self.assertTrue(updated_network['router:external'])
@@ -59,8 +57,7 @@
# List networks as a normal user and confirm the external
# network extension attribute is returned for those networks
# that were created as external
- resp, body = self.client.list_networks()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_networks()
networks_list = [net['id'] for net in body['networks']]
self.assertIn(external_network['id'], networks_list)
self.assertIn(self.network['id'], networks_list)
@@ -75,14 +72,12 @@
external_network = self._create_network()
# Show an external network as a normal user and confirm the
# external network extension attribute is returned.
- resp, body = self.client.show_network(external_network['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_network(external_network['id'])
show_ext_net = body['network']
self.assertEqual(external_network['name'], show_ext_net['name'])
self.assertEqual(external_network['id'], show_ext_net['id'])
self.assertTrue(show_ext_net['router:external'])
- resp, body = self.client.show_network(self.network['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_network(self.network['id'])
show_net = body['network']
# Verify with show that router:external is False for network
self.assertEqual(self.network['name'], show_net['name'])
diff --git a/tempest/api/network/admin/test_floating_ips_admin_actions.py b/tempest/api/network/admin/test_floating_ips_admin_actions.py
index 5728432..3718cb5 100644
--- a/tempest/api/network/admin/test_floating_ips_admin_actions.py
+++ b/tempest/api/network/admin/test_floating_ips_admin_actions.py
@@ -36,21 +36,18 @@
@test.attr(type='smoke')
def test_list_floating_ips_from_admin_and_nonadmin(self):
# Create floating ip from admin user
- resp, floating_ip_admin = self.admin_client.create_floatingip(
+ _, floating_ip_admin = self.admin_client.create_floatingip(
floating_network_id=self.ext_net_id)
- self.assertEqual('201', resp['status'])
self.addCleanup(self.admin_client.delete_floatingip,
floating_ip_admin['floatingip']['id'])
# Create floating ip from alt user
- resp, body = self.alt_client.create_floatingip(
+ _, body = self.alt_client.create_floatingip(
floating_network_id=self.ext_net_id)
- self.assertEqual('201', resp['status'])
floating_ip_alt = body['floatingip']
self.addCleanup(self.alt_client.delete_floatingip,
floating_ip_alt['id'])
# List floating ips from admin
- resp, body = self.admin_client.list_floatingips()
- self.assertEqual('200', resp['status'])
+ _, body = self.admin_client.list_floatingips()
floating_ip_ids_admin = [f['id'] for f in body['floatingips']]
# Check that admin sees all floating ips
self.assertIn(self.floating_ip['id'], floating_ip_ids_admin)
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
index 3b05f42..d7de73b 100644
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ b/tempest/api/network/admin/test_l3_agent_scheduler.py
@@ -52,9 +52,7 @@
@test.attr(type='smoke')
def test_list_routers_on_l3_agent(self):
- resp, body = self.admin_client.list_routers_on_l3_agent(
- self.agent['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.admin_client.list_routers_on_l3_agent(self.agent['id'])
@test.attr(type='smoke')
def test_add_list_remove_router_on_l3_agent(self):
@@ -62,21 +60,20 @@
name = data_utils.rand_name('router1-')
resp, router = self.client.create_router(name)
self.addCleanup(self.client.delete_router, router['router']['id'])
- resp, body = self.admin_client.add_router_to_l3_agent(
- self.agent['id'], router['router']['id'])
- self.assertEqual('201', resp['status'])
- resp, body = self.admin_client.list_l3_agents_hosting_router(
+ _, body = self.admin_client.add_router_to_l3_agent(
+ self.agent['id'],
router['router']['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.admin_client.list_l3_agents_hosting_router(
+ router['router']['id'])
for agent in body['agents']:
l3_agent_ids.append(agent['id'])
self.assertIn('agent_type', agent)
self.assertEqual('L3 agent', agent['agent_type'])
self.assertIn(self.agent['id'], l3_agent_ids)
del l3_agent_ids[:]
- resp, body = self.admin_client.remove_router_from_l3_agent(
- self.agent['id'], router['router']['id'])
- self.assertEqual('204', resp['status'])
+ _, body = self.admin_client.remove_router_from_l3_agent(
+ self.agent['id'],
+ router['router']['id'])
# NOTE(afazekas): The deletion not asserted, because neutron
# is not forbidden to reschedule the router to the same agent
diff --git a/tempest/api/network/admin/test_lbaas_agent_scheduler.py b/tempest/api/network/admin/test_lbaas_agent_scheduler.py
index 675c62d..d0c31b3 100644
--- a/tempest/api/network/admin/test_lbaas_agent_scheduler.py
+++ b/tempest/api/network/admin/test_lbaas_agent_scheduler.py
@@ -50,17 +50,15 @@
@test.attr(type='smoke')
def test_list_pools_on_lbaas_agent(self):
found = False
- resp, body = self.admin_client.list_agents(
+ _, body = self.admin_client.list_agents(
agent_type="Loadbalancer agent")
- self.assertEqual('200', resp['status'])
agents = body['agents']
for a in agents:
msg = 'Load Balancer agent expected'
self.assertEqual(a['agent_type'], 'Loadbalancer agent', msg)
- resp, body = (
+ _, body = (
self.admin_client.list_pools_hosted_by_one_lbaas_agent(
a['id']))
- self.assertEqual('200', resp['status'])
pools = body['pools']
if self.pool['id'] in [p['id'] for p in pools]:
found = True
@@ -69,9 +67,8 @@
@test.attr(type='smoke')
def test_show_lbaas_agent_hosting_pool(self):
- resp, body = self.admin_client.show_lbaas_agent_hosting_pool(
+ _, body = self.admin_client.show_lbaas_agent_hosting_pool(
self.pool['id'])
- self.assertEqual('200', resp['status'])
self.assertEqual('Loadbalancer agent', body['agent']['agent_type'])
diff --git a/tempest/api/network/admin/test_load_balancer_admin_actions.py b/tempest/api/network/admin/test_load_balancer_admin_actions.py
index fe4fc60..a97d275 100644
--- a/tempest/api/network/admin/test_load_balancer_admin_actions.py
+++ b/tempest/api/network/admin/test_load_balancer_admin_actions.py
@@ -47,56 +47,54 @@
@test.attr(type='smoke')
def test_create_vip_as_admin_for_another_tenant(self):
name = data_utils.rand_name('vip-')
- resp, body = self.admin_client.create_pool(
- name=data_utils.rand_name('pool-'), lb_method="ROUND_ROBIN",
- protocol="HTTP", subnet_id=self.subnet['id'],
+ _, body = self.admin_client.create_pool(
+ name=data_utils.rand_name('pool-'),
+ lb_method="ROUND_ROBIN",
+ protocol="HTTP",
+ subnet_id=self.subnet['id'],
tenant_id=self.tenant_id)
- self.assertEqual('201', resp['status'])
pool = body['pool']
self.addCleanup(self.admin_client.delete_pool, pool['id'])
- resp, body = self.admin_client.create_vip(name=name,
- protocol="HTTP",
- protocol_port=80,
- subnet_id=self.subnet['id'],
- pool_id=pool['id'],
- tenant_id=self.tenant_id)
- self.assertEqual('201', resp['status'])
+ _, body = self.admin_client.create_vip(name=name,
+ protocol="HTTP",
+ protocol_port=80,
+ subnet_id=self.subnet['id'],
+ pool_id=pool['id'],
+ tenant_id=self.tenant_id)
vip = body['vip']
self.addCleanup(self.admin_client.delete_vip, vip['id'])
self.assertIsNotNone(vip['id'])
self.assertEqual(self.tenant_id, vip['tenant_id'])
- resp, body = self.client.show_vip(vip['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_vip(vip['id'])
show_vip = body['vip']
self.assertEqual(vip['id'], show_vip['id'])
self.assertEqual(vip['name'], show_vip['name'])
@test.attr(type='smoke')
def test_create_health_monitor_as_admin_for_another_tenant(self):
- resp, body = (
+ _, body = (
self.admin_client.create_health_monitor(delay=4,
max_retries=3,
type="TCP",
timeout=1,
tenant_id=self.tenant_id))
- self.assertEqual('201', resp['status'])
health_monitor = body['health_monitor']
self.addCleanup(self.admin_client.delete_health_monitor,
health_monitor['id'])
self.assertIsNotNone(health_monitor['id'])
self.assertEqual(self.tenant_id, health_monitor['tenant_id'])
- resp, body = self.client.show_health_monitor(health_monitor['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_health_monitor(health_monitor['id'])
show_health_monitor = body['health_monitor']
self.assertEqual(health_monitor['id'], show_health_monitor['id'])
@test.attr(type='smoke')
def test_create_pool_from_admin_user_other_tenant(self):
- resp, body = self.admin_client.create_pool(
- name=data_utils.rand_name('pool-'), lb_method="ROUND_ROBIN",
- protocol="HTTP", subnet_id=self.subnet['id'],
+ _, body = self.admin_client.create_pool(
+ name=data_utils.rand_name('pool-'),
+ lb_method="ROUND_ROBIN",
+ protocol="HTTP",
+ subnet_id=self.subnet['id'],
tenant_id=self.tenant_id)
- self.assertEqual('201', resp['status'])
pool = body['pool']
self.addCleanup(self.admin_client.delete_pool, pool['id'])
self.assertIsNotNone(pool['id'])
@@ -104,10 +102,10 @@
@test.attr(type='smoke')
def test_create_member_from_admin_user_other_tenant(self):
- resp, body = self.admin_client.create_member(
- address="10.0.9.47", protocol_port=80, pool_id=self.pool['id'],
- tenant_id=self.tenant_id)
- self.assertEqual('201', resp['status'])
+ _, body = self.admin_client.create_member(address="10.0.9.47",
+ protocol_port=80,
+ pool_id=self.pool['id'],
+ tenant_id=self.tenant_id)
member = body['member']
self.addCleanup(self.admin_client.delete_member, member['id'])
self.assertIsNotNone(member['id'])
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index 9fa54b1..9ac97f9 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -57,16 +57,14 @@
self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
# Change quotas for tenant
- resp, quota_set = self.admin_client.update_quotas(tenant_id,
- **new_quotas)
- self.assertEqual('200', resp['status'])
+ _, quota_set = self.admin_client.update_quotas(tenant_id,
+ **new_quotas)
self.addCleanup(self.admin_client.reset_quotas, tenant_id)
for key, value in new_quotas.iteritems():
self.assertEqual(value, quota_set[key])
# Confirm our tenant is listed among tenants with non default quotas
- resp, non_default_quotas = self.admin_client.list_quotas()
- self.assertEqual('200', resp['status'])
+ _, non_default_quotas = self.admin_client.list_quotas()
found = False
for qs in non_default_quotas['quotas']:
if qs['tenant_id'] == tenant_id:
@@ -74,17 +72,14 @@
self.assertTrue(found)
# Confirm from API quotas were changed as requested for tenant
- resp, quota_set = self.admin_client.show_quotas(tenant_id)
+ _, quota_set = self.admin_client.show_quotas(tenant_id)
quota_set = quota_set['quota']
- self.assertEqual('200', resp['status'])
for key, value in new_quotas.iteritems():
self.assertEqual(value, quota_set[key])
# Reset quotas to default and confirm
- resp, body = self.admin_client.reset_quotas(tenant_id)
- self.assertEqual('204', resp['status'])
- resp, non_default_quotas = self.admin_client.list_quotas()
- self.assertEqual('200', resp['status'])
+ _, body = self.admin_client.reset_quotas(tenant_id)
+ _, non_default_quotas = self.admin_client.list_quotas()
for q in non_default_quotas['quotas']:
self.assertNotEqual(tenant_id, q['tenant_id'])
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index d75339c..1a23cb6 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -49,6 +49,7 @@
neutron as True
"""
+ _interface = 'json'
force_tenant_isolation = False
# Default to ipv4.
diff --git a/tempest/api/network/base_routers.py b/tempest/api/network/base_routers.py
index 1303bcf..f69e6fd 100644
--- a/tempest/api/network/base_routers.py
+++ b/tempest/api/network/base_routers.py
@@ -26,34 +26,29 @@
super(BaseRouterTest, cls).setUpClass()
def _delete_router(self, router_id):
- resp, _ = self.client.delete_router(router_id)
- self.assertEqual(204, resp.status)
+ self.client.delete_router(router_id)
# Asserting that the router is not found in the list
# after deletion
- resp, list_body = self.client.list_routers()
- self.assertEqual('200', resp['status'])
+ _, list_body = self.client.list_routers()
routers_list = list()
for router in list_body['routers']:
routers_list.append(router['id'])
self.assertNotIn(router_id, routers_list)
def _add_router_interface_with_subnet_id(self, router_id, subnet_id):
- resp, interface = self.client.add_router_interface_with_subnet_id(
+ _, interface = self.client.add_router_interface_with_subnet_id(
router_id, subnet_id)
- self.assertEqual('200', resp['status'])
self.addCleanup(self._remove_router_interface_with_subnet_id,
router_id, subnet_id)
self.assertEqual(subnet_id, interface['subnet_id'])
return interface
def _remove_router_interface_with_subnet_id(self, router_id, subnet_id):
- resp, body = self.client.remove_router_interface_with_subnet_id(
+ _, body = self.client.remove_router_interface_with_subnet_id(
router_id, subnet_id)
- self.assertEqual('200', resp['status'])
self.assertEqual(subnet_id, body['subnet_id'])
def _remove_router_interface_with_port_id(self, router_id, port_id):
- resp, body = self.client.remove_router_interface_with_port_id(
- router_id, port_id)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.remove_router_interface_with_port_id(router_id,
+ port_id)
self.assertEqual(port_id, body['port_id'])
diff --git a/tempest/api/network/base_security_groups.py b/tempest/api/network/base_security_groups.py
index 90be454..c2af2f2 100644
--- a/tempest/api/network/base_security_groups.py
+++ b/tempest/api/network/base_security_groups.py
@@ -26,32 +26,27 @@
def _create_security_group(self):
# Create a security group
name = data_utils.rand_name('secgroup-')
- resp, group_create_body = self.client.create_security_group(name=name)
- self.assertEqual('201', resp['status'])
+ _, group_create_body = self.client.create_security_group(name=name)
self.addCleanup(self._delete_security_group,
group_create_body['security_group']['id'])
self.assertEqual(group_create_body['security_group']['name'], name)
return group_create_body, name
def _delete_security_group(self, secgroup_id):
- resp, _ = self.client.delete_security_group(secgroup_id)
- self.assertEqual(204, resp.status)
+ self.client.delete_security_group(secgroup_id)
# Asserting that the security group is not found in the list
# after deletion
- resp, list_body = self.client.list_security_groups()
- self.assertEqual('200', resp['status'])
+ _, list_body = self.client.list_security_groups()
secgroup_list = list()
for secgroup in list_body['security_groups']:
secgroup_list.append(secgroup['id'])
self.assertNotIn(secgroup_id, secgroup_list)
def _delete_security_group_rule(self, rule_id):
- resp, _ = self.client.delete_security_group_rule(rule_id)
- self.assertEqual(204, resp.status)
+ self.client.delete_security_group_rule(rule_id)
# Asserting that the security group is not found in the list
# after deletion
- resp, list_body = self.client.list_security_group_rules()
- self.assertEqual('200', resp['status'])
+ _, list_body = self.client.list_security_group_rules()
rules_list = list()
for rule in list_body['security_group_rules']:
rules_list.append(rule['id'])
diff --git a/tempest/api/network/common.py b/tempest/api/network/common.py
index 97e120f..5ac8b5a 100644
--- a/tempest/api/network/common.py
+++ b/tempest/api/network/common.py
@@ -13,6 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import abc
+
+import six
+
class AttributeDict(dict):
@@ -27,6 +31,7 @@
return super(AttributeDict, self).__getattribute__(name)
+@six.add_metaclass(abc.ABCMeta)
class DeletableResource(AttributeDict):
"""
@@ -42,8 +47,9 @@
return '<%s id="%s" name="%s">' % (self.__class__.__name__,
self.id, self.name)
+ @abc.abstractmethod
def delete(self):
- raise NotImplemented()
+ return
def __hash__(self):
return id(self)
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index 8d984d1..86acc71 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -54,16 +54,14 @@
# Create port with allowed address pair attribute
allowed_address_pairs = [{'ip_address': self.ip_address,
'mac_address': self.mac_address}]
- resp, body = self.client.create_port(
+ _, body = self.client.create_port(
network_id=self.network['id'],
allowed_address_pairs=allowed_address_pairs)
- self.assertEqual('201', resp['status'])
port_id = body['port']['id']
self.addCleanup(self.client.delete_port, port_id)
# Confirm port was created with allowed address pair attribute
- resp, body = self.client.list_ports()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_ports()
ports = body['ports']
port = [p for p in ports if p['id'] == port_id]
msg = 'Created port not found in list of ports returned by Neutron'
@@ -73,21 +71,18 @@
@test.attr(type='smoke')
def test_update_port_with_address_pair(self):
# Create a port without allowed address pair
- resp, body = self.client.create_port(network_id=self.network['id'])
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_port(network_id=self.network['id'])
port_id = body['port']['id']
self.addCleanup(self.client.delete_port, port_id)
# Confirm port is created
- resp, body = self.client.show_port(port_id)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_port(port_id)
# Update allowed address pair attribute of port
allowed_address_pairs = [{'ip_address': self.ip_address,
'mac_address': self.mac_address}]
- resp, body = self.client.update_port(
+ _, body = self.client.update_port(
port_id, allowed_address_pairs=allowed_address_pairs)
- self.assertEqual('200', resp['status'])
newport = body['port']
self._confirm_allowed_address_pair(newport, self.ip_address)
diff --git a/tempest/api/network/test_extensions.py b/tempest/api/network/test_extensions.py
index 529f8e9..c3607c8 100644
--- a/tempest/api/network/test_extensions.py
+++ b/tempest/api/network/test_extensions.py
@@ -47,16 +47,14 @@
expected_alias = [ext for ext in expected_alias if
test.is_extension_enabled(ext, 'network')]
actual_alias = list()
- resp, extensions = self.client.list_extensions()
- self.assertEqual('200', resp['status'])
+ _, extensions = self.client.list_extensions()
list_extensions = extensions['extensions']
# Show and verify the details of the available extensions
for ext in list_extensions:
ext_name = ext['name']
ext_alias = ext['alias']
actual_alias.append(ext['alias'])
- resp, ext_details = self.client.show_extension(ext_alias)
- self.assertEqual('200', resp['status'])
+ _, ext_details = self.client.show_extension(ext_alias)
ext_details = ext_details['extension']
self.assertIsNotNone(ext_details)
diff --git a/tempest/api/network/test_extra_dhcp_options.py b/tempest/api/network/test_extra_dhcp_options.py
index 371c651..82ebc5a 100644
--- a/tempest/api/network/test_extra_dhcp_options.py
+++ b/tempest/api/network/test_extra_dhcp_options.py
@@ -54,16 +54,13 @@
{'opt_value': '123.123.123.123', 'opt_name': 'tftp-server'},
{'opt_value': '123.123.123.45', 'opt_name': 'server-ip-address'}
]
- resp, body = self.client.create_port(
- network_id=self.network['id'],
- extra_dhcp_opts=extra_dhcp_opts)
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_port(network_id=self.network['id'],
+ extra_dhcp_opts=extra_dhcp_opts)
port_id = body['port']['id']
self.addCleanup(self.client.delete_port, port_id)
# Confirm port created has Extra DHCP Options
- resp, body = self.client.list_ports()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_ports()
ports = body['ports']
port = [p for p in ports if p['id'] == port_id]
self.assertTrue(port)
@@ -78,13 +75,11 @@
{'opt_value': '123.123.123.45', 'opt_name': 'server-ip-address'}
]
name = data_utils.rand_name('new-port-name')
- resp, body = self.client.update_port(
- self.port['id'], name=name, extra_dhcp_opts=extra_dhcp_opts)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.update_port(self.port['id'], name=name,
+ extra_dhcp_opts=extra_dhcp_opts)
# Confirm extra dhcp options were added to the port
- resp, body = self.client.show_port(self.port['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_port(self.port['id'])
self._confirm_extra_dhcp_options(body['port'], extra_dhcp_opts)
def _confirm_extra_dhcp_options(self, port, extra_dhcp_opts):
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index 2463654..8b42a9e 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -68,9 +68,9 @@
@test.attr(type='smoke')
def test_create_list_show_update_delete_floating_ip(self):
# Creates a floating IP
- resp, body = self.client.create_floatingip(
- floating_network_id=self.ext_net_id, port_id=self.ports[0]['id'])
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_floatingip(
+ floating_network_id=self.ext_net_id,
+ port_id=self.ports[0]['id'])
created_floating_ip = body['floatingip']
self.addCleanup(self.client.delete_floatingip,
created_floating_ip['id'])
@@ -83,9 +83,7 @@
self.assertIn(created_floating_ip['fixed_ip_address'],
[ip['ip_address'] for ip in self.ports[0]['fixed_ips']])
# Verifies the details of a floating_ip
- resp, floating_ip = self.client.show_floatingip(
- created_floating_ip['id'])
- self.assertEqual('200', resp['status'])
+ _, floating_ip = self.client.show_floatingip(created_floating_ip['id'])
shown_floating_ip = floating_ip['floatingip']
self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
self.assertEqual(shown_floating_ip['floating_network_id'],
@@ -97,16 +95,15 @@
self.assertEqual(shown_floating_ip['port_id'], self.ports[0]['id'])
# Verify the floating ip exists in the list of all floating_ips
- resp, floating_ips = self.client.list_floatingips()
- self.assertEqual('200', resp['status'])
+ _, floating_ips = self.client.list_floatingips()
floatingip_id_list = list()
for f in floating_ips['floatingips']:
floatingip_id_list.append(f['id'])
self.assertIn(created_floating_ip['id'], floatingip_id_list)
# Associate floating IP to the other port
- resp, floating_ip = self.client.update_floatingip(
- created_floating_ip['id'], port_id=self.ports[1]['id'])
- self.assertEqual('200', resp['status'])
+ _, floating_ip = self.client.update_floatingip(
+ created_floating_ip['id'],
+ port_id=self.ports[1]['id'])
updated_floating_ip = floating_ip['floatingip']
self.assertEqual(updated_floating_ip['port_id'], self.ports[1]['id'])
self.assertEqual(updated_floating_ip['fixed_ip_address'],
@@ -114,9 +111,9 @@
self.assertEqual(updated_floating_ip['router_id'], self.router['id'])
# Disassociate floating IP from the port
- resp, floating_ip = self.client.update_floatingip(
- created_floating_ip['id'], port_id=None)
- self.assertEqual('200', resp['status'])
+ _, floating_ip = self.client.update_floatingip(
+ created_floating_ip['id'],
+ port_id=None)
updated_floating_ip = floating_ip['floatingip']
self.assertIsNone(updated_floating_ip['port_id'])
self.assertIsNone(updated_floating_ip['fixed_ip_address'])
@@ -125,24 +122,21 @@
@test.attr(type='smoke')
def test_floating_ip_delete_port(self):
# Create a floating IP
- resp, body = self.client.create_floatingip(
+ _, body = self.client.create_floatingip(
floating_network_id=self.ext_net_id)
- self.assertEqual('201', resp['status'])
created_floating_ip = body['floatingip']
self.addCleanup(self.client.delete_floatingip,
created_floating_ip['id'])
# Create a port
resp, port = self.client.create_port(network_id=self.network['id'])
created_port = port['port']
- resp, floating_ip = self.client.update_floatingip(
- created_floating_ip['id'], port_id=created_port['id'])
- self.assertEqual('200', resp['status'])
+ _, floating_ip = self.client.update_floatingip(
+ created_floating_ip['id'],
+ port_id=created_port['id'])
# Delete port
self.client.delete_port(created_port['id'])
# Verifies the details of the floating_ip
- resp, floating_ip = self.client.show_floatingip(
- created_floating_ip['id'])
- self.assertEqual('200', resp['status'])
+ _, floating_ip = self.client.show_floatingip(created_floating_ip['id'])
shown_floating_ip = floating_ip['floatingip']
# Confirm the fields are back to None
self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
@@ -153,9 +147,9 @@
@test.attr(type='smoke')
def test_floating_ip_update_different_router(self):
# Associate a floating IP to a port on a router
- resp, body = self.client.create_floatingip(
- floating_network_id=self.ext_net_id, port_id=self.ports[1]['id'])
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_floatingip(
+ floating_network_id=self.ext_net_id,
+ port_id=self.ports[1]['id'])
created_floating_ip = body['floatingip']
self.addCleanup(self.client.delete_floatingip,
created_floating_ip['id'])
@@ -167,9 +161,9 @@
self.create_router_interface(router2['id'], subnet2['id'])
port_other_router = self.create_port(network2)
# Associate floating IP to the other port on another router
- resp, floating_ip = self.client.update_floatingip(
- created_floating_ip['id'], port_id=port_other_router['id'])
- self.assertEqual('200', resp['status'])
+ _, floating_ip = self.client.update_floatingip(
+ created_floating_ip['id'],
+ port_id=port_other_router['id'])
updated_floating_ip = floating_ip['floatingip']
self.assertEqual(updated_floating_ip['router_id'], router2['id'])
self.assertEqual(updated_floating_ip['port_id'],
@@ -178,20 +172,19 @@
@test.attr(type='smoke')
def test_create_floating_ip_specifying_a_fixed_ip_address(self):
- resp, body = self.client.create_floatingip(
+ _, body = self.client.create_floatingip(
floating_network_id=self.ext_net_id,
port_id=self.ports[1]['id'],
fixed_ip_address=self.ports[1]['fixed_ips'][0]['ip_address'])
- self.assertEqual('201', resp['status'])
created_floating_ip = body['floatingip']
self.addCleanup(self.client.delete_floatingip,
created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['id'])
self.assertEqual(created_floating_ip['fixed_ip_address'],
self.ports[1]['fixed_ips'][0]['ip_address'])
- resp, floating_ip = self.client.update_floatingip(
- created_floating_ip['id'], port_id=None)
- self.assertEqual('200', resp['status'])
+ _, floating_ip = self.client.update_floatingip(
+ created_floating_ip['id'],
+ port_id=None)
self.assertIsNone(floating_ip['floatingip']['port_id'])
@test.attr(type='smoke')
@@ -201,25 +194,23 @@
list_ips = [str(ip) for ip in ips[-3:-1]]
fixed_ips = [{'ip_address': list_ips[0]}, {'ip_address': list_ips[1]}]
# Create port
- resp, body = self.client.create_port(network_id=self.network['id'],
- fixed_ips=fixed_ips)
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_port(network_id=self.network['id'],
+ fixed_ips=fixed_ips)
port = body['port']
self.addCleanup(self.client.delete_port, port['id'])
# Create floating ip
- resp, body = self.client.create_floatingip(
- floating_network_id=self.ext_net_id, port_id=port['id'],
+ _, body = self.client.create_floatingip(
+ floating_network_id=self.ext_net_id,
+ port_id=port['id'],
fixed_ip_address=list_ips[0])
- self.assertEqual('201', resp['status'])
floating_ip = body['floatingip']
self.addCleanup(self.client.delete_floatingip, floating_ip['id'])
self.assertIsNotNone(floating_ip['id'])
self.assertEqual(floating_ip['fixed_ip_address'], list_ips[0])
# Update floating ip
- resp, body = self.client.update_floatingip(
- floating_ip['id'], port_id=port['id'],
- fixed_ip_address=list_ips[1])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.update_floatingip(floating_ip['id'],
+ port_id=port['id'],
+ fixed_ip_address=list_ips[1])
update_floating_ip = body['floatingip']
self.assertEqual(update_floating_ip['fixed_ip_address'],
list_ips[1])
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
index 555cbda..6eec79e 100644
--- a/tempest/api/network/test_fwaas_extensions.py
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -74,8 +74,7 @@
def _wait_for_active(self, fw_id):
def _wait():
- resp, firewall = self.client.show_firewall(fw_id)
- self.assertEqual('200', resp['status'])
+ _, firewall = self.client.show_firewall(fw_id)
firewall = firewall['firewall']
return firewall['status'] == 'ACTIVE'
@@ -87,8 +86,7 @@
@test.attr(type='smoke')
def test_list_firewall_rules(self):
# List firewall rules
- resp, fw_rules = self.client.list_firewall_rules()
- self.assertEqual('200', resp['status'])
+ _, fw_rules = self.client.list_firewall_rules()
fw_rules = fw_rules['firewall_rules']
self.assertIn((self.fw_rule['id'],
self.fw_rule['name'],
@@ -106,22 +104,19 @@
@test.attr(type='smoke')
def test_create_update_delete_firewall_rule(self):
# Create firewall rule
- resp, body = self.client.create_firewall_rule(
+ _, body = self.client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action="allow",
protocol="tcp")
- self.assertEqual('201', resp['status'])
fw_rule_id = body['firewall_rule']['id']
# Update firewall rule
- resp, body = self.client.update_firewall_rule(fw_rule_id,
- shared=True)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.update_firewall_rule(fw_rule_id,
+ shared=True)
self.assertTrue(body["firewall_rule"]['shared'])
# Delete firewall rule
- resp, _ = self.client.delete_firewall_rule(fw_rule_id)
- self.assertEqual('204', resp['status'])
+ self.client.delete_firewall_rule(fw_rule_id)
# Confirm deletion
resp, fw_rules = self.client.list_firewall_rules()
self.assertNotIn(fw_rule_id,
@@ -130,15 +125,13 @@
@test.attr(type='smoke')
def test_show_firewall_rule(self):
# show a created firewall rule
- resp, fw_rule = self.client.show_firewall_rule(self.fw_rule['id'])
- self.assertEqual('200', resp['status'])
+ _, fw_rule = self.client.show_firewall_rule(self.fw_rule['id'])
for key, value in fw_rule['firewall_rule'].iteritems():
self.assertEqual(self.fw_rule[key], value)
@test.attr(type='smoke')
def test_list_firewall_policies(self):
- resp, fw_policies = self.client.list_firewall_policies()
- self.assertEqual('200', resp['status'])
+ _, fw_policies = self.client.list_firewall_policies()
fw_policies = fw_policies['firewall_policies']
self.assertIn((self.fw_policy['id'],
self.fw_policy['name'],
@@ -150,24 +143,21 @@
@test.attr(type='smoke')
def test_create_update_delete_firewall_policy(self):
# Create firewall policy
- resp, body = self.client.create_firewall_policy(
+ _, body = self.client.create_firewall_policy(
name=data_utils.rand_name("fw-policy"))
- self.assertEqual('201', resp['status'])
fw_policy_id = body['firewall_policy']['id']
self.addCleanup(self._try_delete_policy, fw_policy_id)
# Update firewall policy
- resp, body = self.client.update_firewall_policy(fw_policy_id,
- shared=True,
- name="updated_policy")
- self.assertEqual('200', resp['status'])
+ _, body = self.client.update_firewall_policy(fw_policy_id,
+ shared=True,
+ name="updated_policy")
updated_fw_policy = body["firewall_policy"]
self.assertTrue(updated_fw_policy['shared'])
self.assertEqual("updated_policy", updated_fw_policy['name'])
# Delete firewall policy
- resp, _ = self.client.delete_firewall_policy(fw_policy_id)
- self.assertEqual('204', resp['status'])
+ self.client.delete_firewall_policy(fw_policy_id)
# Confirm deletion
resp, fw_policies = self.client.list_firewall_policies()
fw_policies = fw_policies['firewall_policies']
@@ -176,9 +166,7 @@
@test.attr(type='smoke')
def test_show_firewall_policy(self):
# show a created firewall policy
- resp, fw_policy = self.client.show_firewall_policy(
- self.fw_policy['id'])
- self.assertEqual('200', resp['status'])
+ _, fw_policy = self.client.show_firewall_policy(self.fw_policy['id'])
fw_policy = fw_policy['firewall_policy']
for key, value in fw_policy.iteritems():
self.assertEqual(self.fw_policy[key], value)
@@ -195,10 +183,9 @@
router['id'], subnet['id'])
# Create firewall
- resp, body = self.client.create_firewall(
+ _, body = self.client.create_firewall(
name=data_utils.rand_name("firewall"),
firewall_policy_id=self.fw_policy['id'])
- self.assertEqual('201', resp['status'])
created_firewall = body['firewall']
firewall_id = created_firewall['id']
self.addCleanup(self._try_delete_firewall, firewall_id)
@@ -206,8 +193,7 @@
self._wait_for_active(firewall_id)
# show a created firewall
- resp, firewall = self.client.show_firewall(firewall_id)
- self.assertEqual('200', resp['status'])
+ _, firewall = self.client.show_firewall(firewall_id)
firewall = firewall['firewall']
for key, value in firewall.iteritems():
@@ -216,8 +202,7 @@
self.assertEqual(created_firewall[key], value)
# list firewall
- resp, firewalls = self.client.list_firewalls()
- self.assertEqual('200', resp['status'])
+ _, firewalls = self.client.list_firewalls()
firewalls = firewalls['firewalls']
self.assertIn((created_firewall['id'],
created_firewall['name'],
@@ -227,8 +212,7 @@
m['firewall_policy_id']) for m in firewalls])
# Delete firewall
- resp, _ = self.client.delete_firewall(firewall_id)
- self.assertEqual('204', resp['status'])
+ self.client.delete_firewall(firewall_id)
class FWaaSExtensionTestXML(FWaaSExtensionTestJSON):
diff --git a/tempest/api/network/test_load_balancer.py b/tempest/api/network/test_load_balancer.py
index 7a12ef6..e3109ea 100644
--- a/tempest/api/network/test_load_balancer.py
+++ b/tempest/api/network/test_load_balancer.py
@@ -67,34 +67,31 @@
delete_obj = getattr(self.client, 'delete_' + obj_name)
list_objs = getattr(self.client, 'list_' + obj_name + 's')
- resp, body = create_obj(**kwargs)
- self.assertEqual('201', resp['status'])
+ _, body = create_obj(**kwargs)
obj = body[obj_name]
self.addCleanup(delete_obj, obj['id'])
for key, value in obj.iteritems():
# It is not relevant to filter by all arguments. That is why
# there is a list of attr to except
if key not in attr_exceptions:
- resp, body = list_objs(**{key: value})
- self.assertEqual('200', resp['status'])
+ _, body = list_objs(**{key: value})
objs = [v[key] for v in body[obj_name + 's']]
self.assertIn(value, objs)
@test.attr(type='smoke')
def test_list_vips(self):
# Verify the vIP exists in the list of all vIPs
- resp, body = self.client.list_vips()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_vips()
vips = body['vips']
self.assertIn(self.vip['id'], [v['id'] for v in vips])
@test.attr(type='smoke')
def test_list_vips_with_filter(self):
name = data_utils.rand_name('vip-')
- resp, body = self.client.create_pool(
- name=data_utils.rand_name("pool-"), lb_method="ROUND_ROBIN",
- protocol="HTTPS", subnet_id=self.subnet['id'])
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_pool(name=data_utils.rand_name("pool-"),
+ lb_method="ROUND_ROBIN",
+ protocol="HTTPS",
+ subnet_id=self.subnet['id'])
pool = body['pool']
self.addCleanup(self.client.delete_pool, pool['id'])
attr_exceptions = ['status', 'session_persistence',
@@ -116,18 +113,16 @@
protocol='HTTP',
subnet_id=self.subnet['id'])
pool = body['pool']
- resp, body = self.client.create_vip(name=name,
- protocol="HTTP",
- protocol_port=80,
- subnet_id=self.subnet['id'],
- pool_id=pool['id'],
- address=address)
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_vip(name=name,
+ protocol="HTTP",
+ protocol_port=80,
+ subnet_id=self.subnet['id'],
+ pool_id=pool['id'],
+ address=address)
vip = body['vip']
vip_id = vip['id']
# Confirm VIP's address correctness with a show
- resp, body = self.client.show_vip(vip_id)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_vip(vip_id)
vip = body['vip']
self.assertEqual(address, vip['address'])
# Verification of vip update
@@ -136,13 +131,12 @@
persistence_type = "HTTP_COOKIE"
update_data = {"session_persistence": {
"type": persistence_type}}
- resp, body = self.client.update_vip(vip_id,
- name=new_name,
- description=new_description,
- connection_limit=10,
- admin_state_up=False,
- **update_data)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.update_vip(vip_id,
+ name=new_name,
+ description=new_description,
+ connection_limit=10,
+ admin_state_up=False,
+ **update_data)
updated_vip = body['vip']
self.assertEqual(new_name, updated_vip['name'])
self.assertEqual(new_description, updated_vip['description'])
@@ -150,30 +144,24 @@
self.assertFalse(updated_vip['admin_state_up'])
self.assertEqual(persistence_type,
updated_vip['session_persistence']['type'])
- # Verification of vip delete
- resp, body = self.client.delete_vip(vip['id'])
- self.assertEqual('204', resp['status'])
+ self.client.delete_vip(vip['id'])
self.client.wait_for_resource_deletion('vip', vip['id'])
# Verification of pool update
new_name = "New_pool"
- resp, body = self.client.update_pool(pool['id'],
- name=new_name,
- description="new_description",
- lb_method='LEAST_CONNECTIONS')
- self.assertEqual('200', resp['status'])
+ _, body = self.client.update_pool(pool['id'],
+ name=new_name,
+ description="new_description",
+ lb_method='LEAST_CONNECTIONS')
updated_pool = body['pool']
self.assertEqual(new_name, updated_pool['name'])
self.assertEqual('new_description', updated_pool['description'])
self.assertEqual('LEAST_CONNECTIONS', updated_pool['lb_method'])
- # Verification of pool delete
- resp, body = self.client.delete_pool(pool['id'])
- self.assertEqual('204', resp['status'])
+ self.client.delete_pool(pool['id'])
@test.attr(type='smoke')
def test_show_vip(self):
# Verifies the details of a vip
- resp, body = self.client.show_vip(self.vip['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_vip(self.vip['id'])
vip = body['vip']
for key, value in vip.iteritems():
# 'status' should not be confirmed in api tests
@@ -183,17 +171,14 @@
@test.attr(type='smoke')
def test_show_pool(self):
# Here we need to new pool without any dependence with vips
- resp, body = self.client.create_pool(
- name=data_utils.rand_name("pool-"),
- lb_method='ROUND_ROBIN',
- protocol='HTTP',
- subnet_id=self.subnet['id'])
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_pool(name=data_utils.rand_name("pool-"),
+ lb_method='ROUND_ROBIN',
+ protocol='HTTP',
+ subnet_id=self.subnet['id'])
pool = body['pool']
self.addCleanup(self.client.delete_pool, pool['id'])
# Verifies the details of a pool
- resp, body = self.client.show_pool(pool['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_pool(pool['id'])
shown_pool = body['pool']
for key, value in pool.iteritems():
# 'status' should not be confirmed in api tests
@@ -203,8 +188,7 @@
@test.attr(type='smoke')
def test_list_pools(self):
# Verify the pool exists in the list of all pools
- resp, body = self.client.list_pools()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_pools()
pools = body['pools']
self.assertIn(self.pool['id'], [p['id'] for p in pools])
@@ -222,8 +206,7 @@
@test.attr(type='smoke')
def test_list_members(self):
# Verify the member exists in the list of all members
- resp, body = self.client.list_members()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_members()
members = body['members']
self.assertIn(self.member['id'], [m['id'] for m in members])
@@ -237,26 +220,22 @@
@test.attr(type='smoke')
def test_create_update_delete_member(self):
# Creates a member
- resp, body = self.client.create_member(address="10.0.9.47",
- protocol_port=80,
- pool_id=self.pool['id'])
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_member(address="10.0.9.47",
+ protocol_port=80,
+ pool_id=self.pool['id'])
member = body['member']
# Verification of member update
- resp, body = self.client.update_member(member['id'],
- admin_state_up=False)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.update_member(member['id'],
+ admin_state_up=False)
updated_member = body['member']
self.assertFalse(updated_member['admin_state_up'])
# Verification of member delete
- resp, body = self.client.delete_member(member['id'])
- self.assertEqual('204', resp['status'])
+ self.client.delete_member(member['id'])
@test.attr(type='smoke')
def test_show_member(self):
# Verifies the details of a member
- resp, body = self.client.show_member(self.member['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_member(self.member['id'])
member = body['member']
for key, value in member.iteritems():
# 'status' should not be confirmed in api tests
@@ -266,8 +245,7 @@
@test.attr(type='smoke')
def test_list_health_monitors(self):
# Verify the health monitor exists in the list of all health monitors
- resp, body = self.client.list_health_monitors()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_health_monitors()
health_monitors = body['health_monitors']
self.assertIn(self.health_monitor['id'],
[h['id'] for h in health_monitors])
@@ -282,31 +260,27 @@
@test.attr(type='smoke')
def test_create_update_delete_health_monitor(self):
# Creates a health_monitor
- resp, body = self.client.create_health_monitor(delay=4,
- max_retries=3,
- type="TCP",
- timeout=1)
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_health_monitor(delay=4,
+ max_retries=3,
+ type="TCP",
+ timeout=1)
health_monitor = body['health_monitor']
# Verification of health_monitor update
- resp, body = (self.client.update_health_monitor
- (health_monitor['id'],
- admin_state_up=False))
- self.assertEqual('200', resp['status'])
+ _, body = (self.client.update_health_monitor
+ (health_monitor['id'],
+ admin_state_up=False))
updated_health_monitor = body['health_monitor']
self.assertFalse(updated_health_monitor['admin_state_up'])
# Verification of health_monitor delete
- resp, body = self.client.delete_health_monitor(health_monitor['id'])
- self.assertEqual('204', resp['status'])
+ _, body = self.client.delete_health_monitor(health_monitor['id'])
@test.attr(type='smoke')
def test_create_health_monitor_http_type(self):
hm_type = "HTTP"
- resp, body = self.client.create_health_monitor(delay=4,
- max_retries=3,
- type=hm_type,
- timeout=1)
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_health_monitor(delay=4,
+ max_retries=3,
+ type=hm_type,
+ timeout=1)
health_monitor = body['health_monitor']
self.addCleanup(self.client.delete_health_monitor,
health_monitor['id'])
@@ -314,20 +288,18 @@
@test.attr(type='smoke')
def test_update_health_monitor_http_method(self):
- resp, body = self.client.create_health_monitor(delay=4,
- max_retries=3,
- type="HTTP",
- timeout=1)
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_health_monitor(delay=4,
+ max_retries=3,
+ type="HTTP",
+ timeout=1)
health_monitor = body['health_monitor']
self.addCleanup(self.client.delete_health_monitor,
health_monitor['id'])
- resp, body = (self.client.update_health_monitor
- (health_monitor['id'],
- http_method="POST",
- url_path="/home/user",
- expected_codes="290"))
- self.assertEqual('200', resp['status'])
+ _, body = (self.client.update_health_monitor
+ (health_monitor['id'],
+ http_method="POST",
+ url_path="/home/user",
+ expected_codes="290"))
updated_health_monitor = body['health_monitor']
self.assertEqual("POST", updated_health_monitor['http_method'])
self.assertEqual("/home/user", updated_health_monitor['url_path'])
@@ -336,8 +308,7 @@
@test.attr(type='smoke')
def test_show_health_monitor(self):
# Verifies the details of a health_monitor
- resp, body = self.client.show_health_monitor(self.health_monitor['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_health_monitor(self.health_monitor['id'])
health_monitor = body['health_monitor']
for key, value in health_monitor.iteritems():
# 'status' should not be confirmed in api tests
@@ -347,9 +318,8 @@
@test.attr(type='smoke')
def test_associate_disassociate_health_monitor_with_pool(self):
# Verify that a health monitor can be associated with a pool
- resp, body = (self.client.associate_health_monitor_with_pool
- (self.health_monitor['id'], self.pool['id']))
- self.assertEqual('201', resp['status'])
+ _, body = (self.client.associate_health_monitor_with_pool
+ (self.health_monitor['id'], self.pool['id']))
resp, body = self.client.show_health_monitor(
self.health_monitor['id'])
health_monitor = body['health_monitor']
@@ -359,10 +329,9 @@
[p['pool_id'] for p in health_monitor['pools']])
self.assertIn(health_monitor['id'], pool['health_monitors'])
# Verify that a health monitor can be disassociated from a pool
- resp, body = (self.client.disassociate_health_monitor_with_pool
- (self.health_monitor['id'], self.pool['id']))
- self.assertEqual('204', resp['status'])
- resp, body = self.client.show_pool(self.pool['id'])
+ (self.client.disassociate_health_monitor_with_pool
+ (self.health_monitor['id'], self.pool['id']))
+ _, body = self.client.show_pool(self.pool['id'])
pool = body['pool']
resp, body = self.client.show_health_monitor(
self.health_monitor['id'])
@@ -374,8 +343,7 @@
@test.attr(type='smoke')
def test_get_lb_pool_stats(self):
# Verify the details of pool stats
- resp, body = self.client.list_lb_pool_stats(self.pool['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_lb_pool_stats(self.pool['id'])
stats = body['stats']
self.assertIn("bytes_in", stats)
self.assertIn("total_connections", stats)
@@ -384,52 +352,41 @@
@test.attr(type='smoke')
def test_update_list_of_health_monitors_associated_with_pool(self):
- resp, _ = (self.client.associate_health_monitor_with_pool
- (self.health_monitor['id'], self.pool['id']))
- self.assertEqual('201', resp['status'])
- resp, _ = self.client.update_health_monitor(
+ (self.client.associate_health_monitor_with_pool
+ (self.health_monitor['id'], self.pool['id']))
+ self.client.update_health_monitor(
self.health_monitor['id'], admin_state_up=False)
- self.assertEqual('200', resp['status'])
- resp, body = self.client.show_pool(self.pool['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_pool(self.pool['id'])
health_monitors = body['pool']['health_monitors']
for health_monitor_id in health_monitors:
- resp, body = self.client.show_health_monitor(health_monitor_id)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_health_monitor(health_monitor_id)
self.assertFalse(body['health_monitor']['admin_state_up'])
- resp, _ = (self.client.disassociate_health_monitor_with_pool
- (self.health_monitor['id'], self.pool['id']))
- self.assertEqual('204', resp['status'])
+ (self.client.disassociate_health_monitor_with_pool
+ (self.health_monitor['id'], self.pool['id']))
@test.attr(type='smoke')
def test_update_admin_state_up_of_pool(self):
- resp, _ = self.client.update_pool(self.pool['id'],
- admin_state_up=False)
- self.assertEqual('200', resp['status'])
- resp, body = self.client.show_pool(self.pool['id'])
- self.assertEqual('200', resp['status'])
+ self.client.update_pool(self.pool['id'],
+ admin_state_up=False)
+ _, body = self.client.show_pool(self.pool['id'])
pool = body['pool']
self.assertFalse(pool['admin_state_up'])
@test.attr(type='smoke')
def test_show_vip_associated_with_pool(self):
- resp, body = self.client.show_pool(self.pool['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_pool(self.pool['id'])
pool = body['pool']
- resp, body = self.client.show_vip(pool['vip_id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_vip(pool['vip_id'])
vip = body['vip']
self.assertEqual(self.vip['name'], vip['name'])
self.assertEqual(self.vip['id'], vip['id'])
@test.attr(type='smoke')
def test_show_members_associated_with_pool(self):
- resp, body = self.client.show_pool(self.pool['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_pool(self.pool['id'])
members = body['pool']['members']
for member_id in members:
- resp, body = self.client.show_member(member_id)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_member(member_id)
self.assertIsNotNone(body['member']['status'])
self.assertEqual(member_id, body['member']['id'])
self.assertIsNotNone(body['member']['admin_state_up'])
@@ -437,34 +394,28 @@
@test.attr(type='smoke')
def test_update_pool_related_to_member(self):
# Create new pool
- resp, body = self.client.create_pool(
- name=data_utils.rand_name("pool-"),
- lb_method='ROUND_ROBIN',
- protocol='HTTP',
- subnet_id=self.subnet['id'])
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_pool(name=data_utils.rand_name("pool-"),
+ lb_method='ROUND_ROBIN',
+ protocol='HTTP',
+ subnet_id=self.subnet['id'])
new_pool = body['pool']
self.addCleanup(self.client.delete_pool, new_pool['id'])
# Update member with new pool's id
- resp, body = self.client.update_member(self.member['id'],
- pool_id=new_pool['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.update_member(self.member['id'],
+ pool_id=new_pool['id'])
# Confirm with show that pool_id change
resp, body = self.client.show_member(self.member['id'])
member = body['member']
self.assertEqual(member['pool_id'], new_pool['id'])
# Update member with old pool id, this is needed for clean up
- resp, body = self.client.update_member(self.member['id'],
- pool_id=self.pool['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.update_member(self.member['id'],
+ pool_id=self.pool['id'])
@test.attr(type='smoke')
def test_update_member_weight(self):
- resp, _ = self.client.update_member(self.member['id'],
- weight=2)
- self.assertEqual('200', resp['status'])
- resp, body = self.client.show_member(self.member['id'])
- self.assertEqual('200', resp['status'])
+ self.client.update_member(self.member['id'],
+ weight=2)
+ _, body = self.client.show_member(self.member['id'])
member = body['member']
self.assertEqual(2, member['weight'])
diff --git a/tempest/api/network/test_metering_extensions.py b/tempest/api/network/test_metering_extensions.py
index 08ccbfe..5b8db43 100644
--- a/tempest/api/network/test_metering_extensions.py
+++ b/tempest/api/network/test_metering_extensions.py
@@ -56,8 +56,7 @@
def _delete_metering_label(self, metering_label_id):
# Deletes a label and verifies if it is deleted or not
- resp, body = self.admin_client.delete_metering_label(metering_label_id)
- self.assertEqual(204, resp.status)
+ _, body = self.admin_client.delete_metering_label(metering_label_id)
# Asserting that the label is not found in list after deletion
resp, labels = (self.admin_client.list_metering_labels(
id=metering_label_id))
@@ -65,9 +64,8 @@
def _delete_metering_label_rule(self, metering_label_rule_id):
# Deletes a rule and verifies if it is deleted or not
- resp, body = (self.admin_client.delete_metering_label_rule(
- metering_label_rule_id))
- self.assertEqual(204, resp.status)
+ _, body = (self.admin_client.delete_metering_label_rule(
+ metering_label_rule_id))
# Asserting that the rule is not found in list after deletion
resp, rules = (self.admin_client.list_metering_label_rules(
id=metering_label_rule_id))
@@ -76,8 +74,7 @@
@test.attr(type='smoke')
def test_list_metering_labels(self):
# Verify label filtering
- resp, body = self.admin_client.list_metering_labels(id=33)
- self.assertEqual('200', resp['status'])
+ _, body = self.admin_client.list_metering_labels(id=33)
metering_labels = body['metering_labels']
self.assertEqual(0, len(metering_labels))
@@ -86,9 +83,8 @@
# Creates a label
name = data_utils.rand_name('metering-label-')
description = "label created by tempest"
- resp, body = (self.admin_client.create_metering_label(name=name,
- description=description))
- self.assertEqual('201', resp['status'])
+ _, body = (self.admin_client.create_metering_label(name=name,
+ description=description))
metering_label = body['metering_label']
self.addCleanup(self._delete_metering_label,
metering_label['id'])
@@ -101,9 +97,8 @@
@test.attr(type='smoke')
def test_show_metering_label(self):
# Verifies the details of a label
- resp, body = (self.admin_client.show_metering_label(
- self.metering_label['id']))
- self.assertEqual('200', resp['status'])
+ _, body = (self.admin_client.show_metering_label(
+ self.metering_label['id']))
metering_label = body['metering_label']
self.assertEqual(self.metering_label['id'], metering_label['id'])
self.assertEqual(self.metering_label['tenant_id'],
@@ -115,19 +110,17 @@
@test.attr(type='smoke')
def test_list_metering_label_rules(self):
# Verify rule filtering
- resp, body = self.admin_client.list_metering_label_rules(id=33)
- self.assertEqual('200', resp['status'])
+ _, body = self.admin_client.list_metering_label_rules(id=33)
metering_label_rules = body['metering_label_rules']
self.assertEqual(0, len(metering_label_rules))
@test.attr(type='smoke')
def test_create_delete_metering_label_rule_with_filters(self):
# Creates a rule
- resp, body = (self.admin_client.create_metering_label_rule(
- remote_ip_prefix="10.0.1.0/24",
- direction="ingress",
- metering_label_id=self.metering_label['id']))
- self.assertEqual('201', resp['status'])
+ _, body = (self.admin_client.create_metering_label_rule(
+ remote_ip_prefix="10.0.1.0/24",
+ direction="ingress",
+ metering_label_id=self.metering_label['id']))
metering_label_rule = body['metering_label_rule']
self.addCleanup(self._delete_metering_label_rule,
metering_label_rule['id'])
@@ -140,9 +133,8 @@
@test.attr(type='smoke')
def test_show_metering_label_rule(self):
# Verifies the details of a rule
- resp, body = (self.admin_client.show_metering_label_rule(
- self.metering_label_rule['id']))
- self.assertEqual('200', resp['status'])
+ _, body = (self.admin_client.show_metering_label_rule(
+ self.metering_label_rule['id']))
metering_label_rule = body['metering_label_rule']
self.assertEqual(self.metering_label_rule['id'],
metering_label_rule['id'])
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index ac3a072..f3da614 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -71,14 +71,13 @@
def test_create_update_delete_network_subnet(self):
# Create a network
name = data_utils.rand_name('network-')
- resp, body = self.client.create_network(name=name)
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_network(name=name)
network = body['network']
net_id = network['id']
+ self.assertEqual('ACTIVE', network['status'])
# Verify network update
new_name = "New_network"
- resp, body = self.client.update_network(net_id, name=new_name)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.update_network(net_id, name=new_name)
updated_net = body['network']
self.assertEqual(updated_net['name'], new_name)
# Find a cidr that is not in use yet and create a subnet with it
@@ -86,23 +85,19 @@
subnet_id = subnet['id']
# Verify subnet update
new_name = "New_subnet"
- resp, body = self.client.update_subnet(subnet_id, name=new_name)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.update_subnet(subnet_id, name=new_name)
updated_subnet = body['subnet']
self.assertEqual(updated_subnet['name'], new_name)
# Delete subnet and network
- resp, body = self.client.delete_subnet(subnet_id)
- self.assertEqual('204', resp['status'])
+ _, body = self.client.delete_subnet(subnet_id)
# Remove subnet from cleanup list
self.subnets.pop()
- resp, body = self.client.delete_network(net_id)
- self.assertEqual('204', resp['status'])
+ _, body = self.client.delete_network(net_id)
@test.attr(type='smoke')
def test_show_network(self):
# Verify the details of a network
- resp, body = self.client.show_network(self.network['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_network(self.network['id'])
network = body['network']
for key in ['id', 'name']:
self.assertEqual(network[key], self.network[key])
@@ -111,9 +106,8 @@
def test_show_network_fields(self):
# Verify specific fields of a network
fields = ['id', 'name']
- resp, body = self.client.show_network(self.network['id'],
- fields=fields)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_network(self.network['id'],
+ fields=fields)
network = body['network']
self.assertEqual(sorted(network.keys()), sorted(fields))
for field_name in fields:
@@ -122,8 +116,7 @@
@test.attr(type='smoke')
def test_list_networks(self):
# Verify the network exists in the list of all networks
- resp, body = self.client.list_networks()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_networks()
networks = [network['id'] for network in body['networks']
if network['id'] == self.network['id']]
self.assertNotEmpty(networks, "Created network not found in the list")
@@ -132,8 +125,7 @@
def test_list_networks_fields(self):
# Verify specific fields of the networks
fields = ['id', 'name']
- resp, body = self.client.list_networks(fields=fields)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_networks(fields=fields)
networks = body['networks']
self.assertNotEmpty(networks, "Network list returned is empty")
for network in networks:
@@ -142,8 +134,7 @@
@test.attr(type='smoke')
def test_show_subnet(self):
# Verify the details of a subnet
- resp, body = self.client.show_subnet(self.subnet['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_subnet(self.subnet['id'])
subnet = body['subnet']
self.assertNotEmpty(subnet, "Subnet returned has no fields")
for key in ['id', 'cidr']:
@@ -154,9 +145,8 @@
def test_show_subnet_fields(self):
# Verify specific fields of a subnet
fields = ['id', 'network_id']
- resp, body = self.client.show_subnet(self.subnet['id'],
- fields=fields)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_subnet(self.subnet['id'],
+ fields=fields)
subnet = body['subnet']
self.assertEqual(sorted(subnet.keys()), sorted(fields))
for field_name in fields:
@@ -165,8 +155,7 @@
@test.attr(type='smoke')
def test_list_subnets(self):
# Verify the subnet exists in the list of all subnets
- resp, body = self.client.list_subnets()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_subnets()
subnets = [subnet['id'] for subnet in body['subnets']
if subnet['id'] == self.subnet['id']]
self.assertNotEmpty(subnets, "Created subnet not found in the list")
@@ -175,8 +164,7 @@
def test_list_subnets_fields(self):
# Verify specific fields of subnets
fields = ['id', 'network_id']
- resp, body = self.client.list_subnets(fields=fields)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_subnets(fields=fields)
subnets = body['subnets']
self.assertNotEmpty(subnets, "Subnet list returned is empty")
for subnet in subnets:
@@ -194,8 +182,7 @@
def test_delete_network_with_subnet(self):
# Creates a network
name = data_utils.rand_name('network-')
- resp, body = self.client.create_network(name=name)
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_network(name=name)
network = body['network']
net_id = network['id']
self.addCleanup(self._try_delete_network, net_id)
@@ -205,8 +192,7 @@
subnet_id = subnet['id']
# Delete network while the subnet still exists
- resp, body = self.client.delete_network(net_id)
- self.assertEqual('204', resp['status'])
+ _, body = self.client.delete_network(net_id)
# Verify that the subnet got automatically deleted.
self.assertRaises(exceptions.NotFound, self.client.show_subnet,
@@ -221,16 +207,14 @@
def test_create_delete_subnet_with_gw(self):
gateway = '10.100.0.13'
name = data_utils.rand_name('network-')
- resp, body = self.client.create_network(name=name)
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_network(name=name)
network = body['network']
net_id = network['id']
subnet = self.create_subnet(network, gateway)
# Verifies Subnet GW in IPv4
self.assertEqual(subnet['gateway_ip'], gateway)
# Delete network and subnet
- resp, body = self.client.delete_network(net_id)
- self.assertEqual('204', resp['status'])
+ self.client.delete_network(net_id)
self.subnets.pop()
@test.attr(type='smoke')
@@ -238,16 +222,14 @@
net = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
gateway_ip = str(netaddr.IPAddress(net.first + 1))
name = data_utils.rand_name('network-')
- resp, body = self.client.create_network(name=name)
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_network(name=name)
network = body['network']
net_id = network['id']
subnet = self.create_subnet(network)
# Verifies Subnet GW in IPv4
self.assertEqual(subnet['gateway_ip'], gateway_ip)
# Delete network and subnet
- resp, body = self.client.delete_network(net_id)
- self.assertEqual('204', resp['status'])
+ self.client.delete_network(net_id)
self.subnets.pop()
@@ -279,8 +261,7 @@
def _delete_networks(self, created_networks):
for n in created_networks:
- resp, body = self.client.delete_network(n['id'])
- self.assertEqual(204, resp.status)
+ self.client.delete_network(n['id'])
# Asserting that the networks are not found in the list after deletion
resp, body = self.client.list_networks()
networks_list = [network['id'] for network in body['networks']]
@@ -289,8 +270,7 @@
def _delete_subnets(self, created_subnets):
for n in created_subnets:
- resp, body = self.client.delete_subnet(n['id'])
- self.assertEqual(204, resp.status)
+ self.client.delete_subnet(n['id'])
# Asserting that the subnets are not found in the list after deletion
resp, body = self.client.list_subnets()
subnets_list = [subnet['id'] for subnet in body['subnets']]
@@ -299,8 +279,7 @@
def _delete_ports(self, created_ports):
for n in created_ports:
- resp, body = self.client.delete_port(n['id'])
- self.assertEqual(204, resp.status)
+ self.client.delete_port(n['id'])
# Asserting that the ports are not found in the list after deletion
resp, body = self.client.list_ports()
ports_list = [port['id'] for port in body['ports']]
@@ -312,9 +291,8 @@
# Creates 2 networks in one request
network_names = [data_utils.rand_name('network-'),
data_utils.rand_name('network-')]
- resp, body = self.client.create_bulk_network(2, network_names)
+ _, body = self.client.create_bulk_network(network_names)
created_networks = body['networks']
- self.assertEqual('201', resp['status'])
self.addCleanup(self._delete_networks, created_networks)
# Asserting that the networks are found in the list after creation
resp, body = self.client.list_networks()
@@ -344,10 +322,9 @@
}
subnets_list.append(p1)
del subnets_list[1]['name']
- resp, body = self.client.create_bulk_subnet(subnets_list)
+ _, body = self.client.create_bulk_subnet(subnets_list)
created_subnets = body['subnets']
self.addCleanup(self._delete_subnets, created_subnets)
- self.assertEqual('201', resp['status'])
# Asserting that the subnets are found in the list after creation
resp, body = self.client.list_subnets()
subnets_list = [subnet['id'] for subnet in body['subnets']]
@@ -370,10 +347,9 @@
}
port_list.append(p1)
del port_list[1]['name']
- resp, body = self.client.create_bulk_port(port_list)
+ _, body = self.client.create_bulk_port(port_list)
created_ports = body['ports']
self.addCleanup(self._delete_ports, created_ports)
- self.assertEqual('201', resp['status'])
# Asserting that the ports are found in the list after creation
resp, body = self.client.list_ports()
ports_list = [port['id'] for port in body['ports']]
@@ -398,33 +374,32 @@
@test.attr(type='smoke')
def test_create_delete_subnet_with_gw(self):
- gateway = '2003::2'
+ net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
+ gateway = str(netaddr.IPAddress(net.first + 2))
name = data_utils.rand_name('network-')
- resp, body = self.client.create_network(name=name)
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_network(name=name)
network = body['network']
net_id = network['id']
subnet = self.create_subnet(network, gateway)
# Verifies Subnet GW in IPv6
self.assertEqual(subnet['gateway_ip'], gateway)
# Delete network and subnet
- resp, body = self.client.delete_network(net_id)
- self.assertEqual('204', resp['status'])
+ self.client.delete_network(net_id)
self.subnets.pop()
@test.attr(type='smoke')
def test_create_delete_subnet_without_gw(self):
+ net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
+ gateway_ip = str(netaddr.IPAddress(net.first + 1))
name = data_utils.rand_name('network-')
- resp, body = self.client.create_network(name=name)
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_network(name=name)
network = body['network']
net_id = network['id']
subnet = self.create_subnet(network)
# Verifies Subnet GW in IPv6
- self.assertEqual(subnet['gateway_ip'], '2003::1')
+ self.assertEqual(subnet['gateway_ip'], gateway_ip)
# Delete network and subnet
- resp, body = self.client.delete_network(net_id)
- self.assertEqual('204', resp['status'])
+ _, body = self.client.delete_network(net_id)
self.subnets.pop()
@testtools.skipUnless(CONF.network_feature_enabled.ipv6_subnet_attributes,
@@ -433,8 +408,7 @@
@test.attr(type='smoke')
def test_create_delete_subnet_with_v6_attributes(self):
name = data_utils.rand_name('network-')
- resp, body = self.client.create_network(name=name)
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_network(name=name)
network = body['network']
net_id = network['id']
subnet = self.create_subnet(network,
@@ -446,8 +420,7 @@
self.assertEqual(subnet['ipv6_ra_mode'], 'slaac')
self.assertEqual(subnet['ipv6_address_mode'], 'slaac')
# Delete network and subnet
- resp, body = self.client.delete_network(net_id)
- self.assertEqual('204', resp['status'])
+ self.client.delete_network(net_id)
self.subnets.pop()
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index e6e6ea1..f06d17c 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -44,29 +44,24 @@
cls.port = cls.create_port(cls.network)
def _delete_port(self, port_id):
- resp, body = self.client.delete_port(port_id)
- self.assertEqual('204', resp['status'])
- resp, body = self.client.list_ports()
- self.assertEqual('200', resp['status'])
+ self.client.delete_port(port_id)
+ _, body = self.client.list_ports()
ports_list = body['ports']
self.assertFalse(port_id in [n['id'] for n in ports_list])
@test.attr(type='smoke')
def test_create_update_delete_port(self):
# Verify port creation
- resp, body = self.client.create_port(network_id=self.network['id'])
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_port(network_id=self.network['id'])
port = body['port']
# Schedule port deletion with verification upon test completion
self.addCleanup(self._delete_port, port['id'])
self.assertTrue(port['admin_state_up'])
# Verify port update
new_name = "New_Port"
- resp, body = self.client.update_port(
- port['id'],
- name=new_name,
- admin_state_up=False)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.update_port(port['id'],
+ name=new_name,
+ admin_state_up=False)
updated_port = body['port']
self.assertEqual(updated_port['name'], new_name)
self.assertFalse(updated_port['admin_state_up'])
@@ -74,8 +69,7 @@
@test.attr(type='smoke')
def test_show_port(self):
# Verify the details of port
- resp, body = self.client.show_port(self.port['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_port(self.port['id'])
port = body['port']
self.assertIn('id', port)
self.assertEqual(port['id'], self.port['id'])
@@ -95,9 +89,8 @@
def test_show_port_fields(self):
# Verify specific fields of a port
fields = ['id', 'mac_address']
- resp, body = self.client.show_port(self.port['id'],
- fields=fields)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_port(self.port['id'],
+ fields=fields)
port = body['port']
self.assertEqual(sorted(port.keys()), sorted(fields))
for field_name in fields:
@@ -106,8 +99,7 @@
@test.attr(type='smoke')
def test_list_ports(self):
# Verify the port exists in the list of all ports
- resp, body = self.client.list_ports()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_ports()
ports = [port['id'] for port in body['ports']
if port['id'] == self.port['id']]
self.assertNotEmpty(ports, "Created port not found in the list")
@@ -125,9 +117,7 @@
self.addCleanup(self.client.remove_router_interface_with_port_id,
router['id'], port['port']['id'])
# List ports filtered by router_id
- resp, port_list = self.client.list_ports(
- device_id=router['id'])
- self.assertEqual('200', resp['status'])
+ _, port_list = self.client.list_ports(device_id=router['id'])
ports = port_list['ports']
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['id'], port['port']['id'])
@@ -137,8 +127,7 @@
def test_list_ports_fields(self):
# Verify specific fields of ports
fields = ['id', 'mac_address']
- resp, body = self.client.list_ports(fields=fields)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_ports(fields=fields)
ports = body['ports']
self.assertNotEmpty(ports, "Port list returned is empty")
# Asserting the fields returned are correct
@@ -190,8 +179,7 @@
def test_create_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id'],
"binding:host_id": self.host_id}
- resp, body = self.admin_client.create_port(**post_body)
- self.assertEqual('201', resp['status'])
+ _, body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
host_id = port['binding:host_id']
@@ -201,13 +189,11 @@
@test.attr(type='smoke')
def test_update_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id']}
- resp, body = self.admin_client.create_port(**post_body)
- self.assertEqual('201', resp['status'])
+ _, body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
update_body = {"binding:host_id": self.host_id}
- resp, body = self.admin_client.update_port(port['id'], **update_body)
- self.assertEqual('200', resp['status'])
+ _, body = self.admin_client.update_port(port['id'], **update_body)
updated_port = body['port']
host_id = updated_port['binding:host_id']
self.assertIsNotNone(host_id)
@@ -217,21 +203,18 @@
def test_list_ports_binding_ext_attr(self):
# Create a new port
post_body = {"network_id": self.network['id']}
- resp, body = self.admin_client.create_port(**post_body)
- self.assertEqual('201', resp['status'])
+ _, body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
# Update the port's binding attributes so that is now 'bound'
# to a host
update_body = {"binding:host_id": self.host_id}
- resp, _ = self.admin_client.update_port(port['id'], **update_body)
- self.assertEqual('200', resp['status'])
+ self.admin_client.update_port(port['id'], **update_body)
# List all ports, ensure new port is part of list and its binding
# attributes are set and accurate
- resp, body = self.admin_client.list_ports()
- self.assertEqual('200', resp['status'])
+ _, body = self.admin_client.list_ports()
ports_list = body['ports']
pids_list = [p['id'] for p in ports_list]
self.assertIn(port['id'], pids_list)
@@ -243,13 +226,10 @@
@test.attr(type='smoke')
def test_show_port_binding_ext_attr(self):
- resp, body = self.admin_client.create_port(
- network_id=self.network['id'])
- self.assertEqual('201', resp['status'])
+ _, body = self.admin_client.create_port(network_id=self.network['id'])
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
- resp, body = self.admin_client.show_port(port['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.admin_client.show_port(port['id'])
show_port = body['port']
self.assertEqual(port['binding:host_id'],
show_port['binding:host_id'])
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 878335d..bcd8113 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -54,11 +54,10 @@
# NOTE(salv-orlando): Do not invoke self.create_router
# as we need to check the response code
name = data_utils.rand_name('router-')
- resp, create_body = self.client.create_router(
+ _, create_body = self.client.create_router(
name, external_gateway_info={
"network_id": CONF.network.public_network_id},
admin_state_up=False)
- self.assertEqual('201', resp['status'])
self.addCleanup(self._delete_router, create_body['router']['id'])
self.assertEqual(create_body['router']['name'], name)
self.assertEqual(
@@ -66,26 +65,22 @@
CONF.network.public_network_id)
self.assertEqual(create_body['router']['admin_state_up'], False)
# Show details of the created router
- resp, show_body = self.client.show_router(
- create_body['router']['id'])
- self.assertEqual('200', resp['status'])
+ _, show_body = self.client.show_router(create_body['router']['id'])
self.assertEqual(show_body['router']['name'], name)
self.assertEqual(
show_body['router']['external_gateway_info']['network_id'],
CONF.network.public_network_id)
self.assertEqual(show_body['router']['admin_state_up'], False)
# List routers and verify if created router is there in response
- resp, list_body = self.client.list_routers()
- self.assertEqual('200', resp['status'])
+ _, list_body = self.client.list_routers()
routers_list = list()
for router in list_body['routers']:
routers_list.append(router['id'])
self.assertIn(create_body['router']['id'], routers_list)
# Update the name of router and verify if it is updated
updated_name = 'updated ' + name
- resp, update_body = self.client.update_router(
- create_body['router']['id'], name=updated_name)
- self.assertEqual('200', resp['status'])
+ _, update_body = self.client.update_router(create_body['router']['id'],
+ name=updated_name)
self.assertEqual(update_body['router']['name'], updated_name)
resp, show_body = self.client.show_router(
create_body['router']['id'])
@@ -97,15 +92,13 @@
test_tenant = data_utils.rand_name('test_tenant_')
test_description = data_utils.rand_name('desc_')
_, tenant = self.identity_admin_client.create_tenant(
- name=test_tenant,
- description=test_description)
+ name=test_tenant, description=test_description)
tenant_id = tenant['id']
self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
name = data_utils.rand_name('router-')
- resp, create_body = self.admin_client.create_router(
- name, tenant_id=tenant_id)
- self.assertEqual('201', resp['status'])
+ _, create_body = self.admin_client.create_router(name,
+ tenant_id=tenant_id)
self.addCleanup(self.admin_client.delete_router,
create_body['router']['id'])
self.assertEqual(tenant_id, create_body['router']['tenant_id'])
@@ -131,9 +124,8 @@
external_gateway_info = {
'network_id': CONF.network.public_network_id,
'enable_snat': enable_snat}
- resp, create_body = self.admin_client.create_router(
+ _, create_body = self.admin_client.create_router(
name, external_gateway_info=external_gateway_info)
- self.assertEqual('201', resp['status'])
self.addCleanup(self.admin_client.delete_router,
create_body['router']['id'])
# Verify snat attributes after router creation
@@ -146,9 +138,8 @@
subnet = self.create_subnet(network)
router = self._create_router(data_utils.rand_name('router-'))
# Add router interface with subnet id
- resp, interface = self.client.add_router_interface_with_subnet_id(
+ _, interface = self.client.add_router_interface_with_subnet_id(
router['id'], subnet['id'])
- self.assertEqual('200', resp['status'])
self.addCleanup(self._remove_router_interface_with_subnet_id,
router['id'], subnet['id'])
self.assertIn('subnet_id', interface.keys())
@@ -167,9 +158,8 @@
resp, port_body = self.client.create_port(
network_id=network['id'])
# add router interface to port created above
- resp, interface = self.client.add_router_interface_with_port_id(
+ _, interface = self.client.add_router_interface_with_port_id(
router['id'], port_body['port']['id'])
- self.assertEqual('200', resp['status'])
self.addCleanup(self._remove_router_interface_with_port_id,
router['id'], port_body['port']['id'])
self.assertIn('subnet_id', interface.keys())
@@ -181,8 +171,7 @@
router['id'])
def _verify_router_gateway(self, router_id, exp_ext_gw_info=None):
- resp, show_body = self.admin_client.show_router(router_id)
- self.assertEqual('200', resp['status'])
+ _, show_body = self.admin_client.show_router(router_id)
actual_ext_gw_info = show_body['router']['external_gateway_info']
if exp_ext_gw_info is None:
self.assertIsNone(actual_ext_gw_info)
@@ -212,8 +201,7 @@
external_gateway_info={
'network_id': CONF.network.public_network_id})
# Verify operation - router
- resp, show_body = self.client.show_router(router['id'])
- self.assertEqual('200', resp['status'])
+ _, show_body = self.client.show_router(router['id'])
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id})
@@ -297,16 +285,14 @@
cidr = netaddr.IPNetwork(self.subnet['cidr'])
next_hop = str(cidr[2])
destination = str(self.subnet['cidr'])
- resp, extra_route = self.client.update_extra_routes(
- self.router['id'], next_hop, destination)
- self.assertEqual('200', resp['status'])
+ _, extra_route = self.client.update_extra_routes(self.router['id'],
+ next_hop, destination)
self.assertEqual(1, len(extra_route['router']['routes']))
self.assertEqual(destination,
extra_route['router']['routes'][0]['destination'])
self.assertEqual(next_hop,
extra_route['router']['routes'][0]['nexthop'])
- resp, show_body = self.client.show_router(self.router['id'])
- self.assertEqual('200', resp['status'])
+ _, show_body = self.client.show_router(self.router['id'])
self.assertEqual(destination,
show_body['router']['routes'][0]['destination'])
self.assertEqual(next_hop,
@@ -320,12 +306,10 @@
self.router = self._create_router(data_utils.rand_name('router-'))
self.assertFalse(self.router['admin_state_up'])
# Update router admin state
- resp, update_body = self.client.update_router(self.router['id'],
- admin_state_up=True)
- self.assertEqual('200', resp['status'])
+ _, update_body = self.client.update_router(self.router['id'],
+ admin_state_up=True)
self.assertTrue(update_body['router']['admin_state_up'])
- resp, show_body = self.client.show_router(self.router['id'])
- self.assertEqual('200', resp['status'])
+ _, show_body = self.client.show_router(self.router['id'])
self.assertTrue(show_body['router']['admin_state_up'])
@test.attr(type='smoke')
@@ -348,8 +332,7 @@
interface02['port_id'])
def _verify_router_interface(self, router_id, subnet_id, port_id):
- resp, show_port_body = self.client.show_port(port_id)
- self.assertEqual('200', resp['status'])
+ _, show_port_body = self.client.show_port(port_id)
interface_port = show_port_body['port']
self.assertEqual(router_id, interface_port['device_id'])
self.assertEqual(subnet_id,
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index b98cea1..cea8344 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -33,8 +33,7 @@
@test.attr(type='smoke')
def test_list_security_groups(self):
# Verify the that security group belonging to tenant exist in list
- resp, body = self.client.list_security_groups()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_security_groups()
security_groups = body['security_groups']
found = None
for n in security_groups:
@@ -48,8 +47,7 @@
group_create_body, name = self._create_security_group()
# List security groups and verify if created group is there in response
- resp, list_body = self.client.list_security_groups()
- self.assertEqual('200', resp['status'])
+ _, list_body = self.client.list_security_groups()
secgroup_list = list()
for secgroup in list_body['security_groups']:
secgroup_list.append(secgroup['id'])
@@ -57,12 +55,11 @@
# Update the security group
new_name = data_utils.rand_name('security-')
new_description = data_utils.rand_name('security-description')
- resp, update_body = self.client.update_security_group(
+ _, update_body = self.client.update_security_group(
group_create_body['security_group']['id'],
name=new_name,
description=new_description)
# Verify if security group is updated
- self.assertEqual('200', resp['status'])
self.assertEqual(update_body['security_group']['name'], new_name)
self.assertEqual(update_body['security_group']['description'],
new_description)
@@ -80,18 +77,16 @@
# Create rules for each protocol
protocols = ['tcp', 'udp', 'icmp']
for protocol in protocols:
- resp, rule_create_body = self.client.create_security_group_rule(
+ _, rule_create_body = self.client.create_security_group_rule(
security_group_id=group_create_body['security_group']['id'],
protocol=protocol,
direction='ingress'
)
- self.assertEqual('201', resp['status'])
# Show details of the created security rule
- resp, show_rule_body = self.client.show_security_group_rule(
+ _, show_rule_body = self.client.show_security_group_rule(
rule_create_body['security_group_rule']['id']
)
- self.assertEqual('200', resp['status'])
create_dict = rule_create_body['security_group_rule']
for key, value in six.iteritems(create_dict):
self.assertEqual(value,
@@ -99,8 +94,7 @@
"%s does not match." % key)
# List rules and verify created rule is in response
- resp, rule_list_body = self.client.list_security_group_rules()
- self.assertEqual('200', resp['status'])
+ _, rule_list_body = self.client.list_security_group_rules()
rule_list = [rule['id']
for rule in rule_list_body['security_group_rules']]
self.assertIn(rule_create_body['security_group_rule']['id'],
@@ -117,7 +111,7 @@
protocol = 'tcp'
port_range_min = 77
port_range_max = 77
- resp, rule_create_body = self.client.create_security_group_rule(
+ _, rule_create_body = self.client.create_security_group_rule(
security_group_id=group_create_body['security_group']['id'],
direction=direction,
protocol=protocol,
@@ -125,7 +119,6 @@
port_range_max=port_range_max
)
- self.assertEqual('201', resp['status'])
sec_group_rule = rule_create_body['security_group_rule']
self.assertEqual(sec_group_rule['direction'], direction)
diff --git a/tempest/api/network/test_service_type_management.py b/tempest/api/network/test_service_type_management.py
index d272c47..dbb72fb 100644
--- a/tempest/api/network/test_service_type_management.py
+++ b/tempest/api/network/test_service_type_management.py
@@ -26,8 +26,7 @@
@test.attr(type='smoke')
def test_service_provider_list(self):
- resp, body = self.client.list_service_providers()
- self.assertEqual(resp['status'], '200')
+ _, body = self.client.list_service_providers()
self.assertIsInstance(body['service_providers'], list)
diff --git a/tempest/api/network/test_vpnaas_extensions.py b/tempest/api/network/test_vpnaas_extensions.py
index 0cc3f19..09e9640 100644
--- a/tempest/api/network/test_vpnaas_extensions.py
+++ b/tempest/api/network/test_vpnaas_extensions.py
@@ -61,8 +61,7 @@
for ike in all_ike['ikepolicies']:
ike_list.append(ike['id'])
if ike_policy_id in ike_list:
- resp, _ = self.client.delete_ikepolicy(ike_policy_id)
- self.assertEqual(204, resp.status)
+ self.client.delete_ikepolicy(ike_policy_id)
# Asserting that the policy is not found in list after deletion
resp, ikepolicies = self.client.list_ikepolicies()
ike_id_list = list()
@@ -85,8 +84,7 @@
self.assertEqual(value, actual[key])
def _delete_vpn_service(self, vpn_service_id):
- resp, _ = self.client.delete_vpnservice(vpn_service_id)
- self.assertEqual('204', resp['status'])
+ self.client.delete_vpnservice(vpn_service_id)
# Asserting if vpn service is found in the list after deletion
_, body = self.client.list_vpnservices()
vpn_services = [vs['id'] for vs in body['vpnservices']]
@@ -107,9 +105,8 @@
tenant_id = self._get_tenant_id()
# Create IPSec policy for the newly created tenant
name = data_utils.rand_name('ipsec-policy')
- resp, body = (self.admin_client.
- create_ipsecpolicy(name=name, tenant_id=tenant_id))
- self.assertEqual('201', resp['status'])
+ _, body = (self.admin_client.
+ create_ipsecpolicy(name=name, tenant_id=tenant_id))
ipsecpolicy = body['ipsecpolicy']
self.assertIsNotNone(ipsecpolicy['id'])
self.addCleanup(self.admin_client.delete_ipsecpolicy,
@@ -126,13 +123,12 @@
# Create vpn service for the newly created tenant
name = data_utils.rand_name('vpn-service')
- resp, body = self.admin_client.create_vpnservice(
+ _, body = self.admin_client.create_vpnservice(
subnet_id=self.subnet['id'],
router_id=self.router['id'],
name=name,
admin_state_up=True,
tenant_id=tenant_id)
- self.assertEqual('201', resp['status'])
vpnservice = body['vpnservice']
self.assertIsNotNone(vpnservice['id'])
self.addCleanup(self.admin_client.delete_vpnservice, vpnservice['id'])
@@ -148,12 +144,11 @@
# Create IKE policy for the newly created tenant
name = data_utils.rand_name('ike-policy')
- resp, body = (self.admin_client.
- create_ikepolicy(name=name, ike_version="v1",
- encryption_algorithm="aes-128",
- auth_algorithm="sha1",
- tenant_id=tenant_id))
- self.assertEqual('201', resp['status'])
+ _, body = (self.admin_client.
+ create_ikepolicy(name=name, ike_version="v1",
+ encryption_algorithm="aes-128",
+ auth_algorithm="sha1",
+ tenant_id=tenant_id))
ikepolicy = body['ikepolicy']
self.assertIsNotNone(ikepolicy['id'])
self.addCleanup(self.admin_client.delete_ikepolicy, ikepolicy['id'])
@@ -166,8 +161,7 @@
@test.attr(type='smoke')
def test_list_vpn_services(self):
# Verify the VPN service exists in the list of all VPN services
- resp, body = self.client.list_vpnservices()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_vpnservices()
vpnservices = body['vpnservices']
self.assertIn(self.vpnservice['id'], [v['id'] for v in vpnservices])
@@ -175,11 +169,10 @@
def test_create_update_delete_vpn_service(self):
# Creates a VPN service and sets up deletion
name = data_utils.rand_name('vpn-service')
- resp, body = self.client.create_vpnservice(subnet_id=self.subnet['id'],
- router_id=self.router['id'],
- name=name,
- admin_state_up=True)
- self.assertEqual('201', resp['status'])
+ _, body = self.client.create_vpnservice(subnet_id=self.subnet['id'],
+ router_id=self.router['id'],
+ name=name,
+ admin_state_up=True)
vpnservice = body['vpnservice']
self.addCleanup(self._delete_vpn_service, vpnservice['id'])
# Assert if created vpnservices are not found in vpnservices list
@@ -196,8 +189,7 @@
@test.attr(type='smoke')
def test_show_vpn_service(self):
# Verifies the details of a vpn service
- resp, body = self.client.show_vpnservice(self.vpnservice['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_vpnservice(self.vpnservice['id'])
vpnservice = body['vpnservice']
self.assertEqual(self.vpnservice['id'], vpnservice['id'])
self.assertEqual(self.vpnservice['name'], vpnservice['name'])
@@ -213,8 +205,7 @@
@test.attr(type='smoke')
def test_list_ike_policies(self):
# Verify the ike policy exists in the list of all IKE policies
- resp, body = self.client.list_ikepolicies()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_ikepolicies()
ikepolicies = body['ikepolicies']
self.assertIn(self.ikepolicy['id'], [i['id'] for i in ikepolicies])
@@ -222,12 +213,11 @@
def test_create_update_delete_ike_policy(self):
# Creates a IKE policy
name = data_utils.rand_name('ike-policy')
- resp, body = (self.client.create_ikepolicy(
- name=name,
- ike_version="v1",
- encryption_algorithm="aes-128",
- auth_algorithm="sha1"))
- self.assertEqual('201', resp['status'])
+ _, body = (self.client.create_ikepolicy(
+ name=name,
+ ike_version="v1",
+ encryption_algorithm="aes-128",
+ auth_algorithm="sha1"))
ikepolicy = body['ikepolicy']
self.assertIsNotNone(ikepolicy['id'])
self.addCleanup(self._delete_ike_policy, ikepolicy['id'])
@@ -239,8 +229,7 @@
'ike_version': "v2",
'pfs': "group14",
'lifetime': {'units': "seconds", 'value': 2000}}
- resp, _ = self.client.update_ikepolicy(ikepolicy['id'], **new_ike)
- self.assertEqual('200', resp['status'])
+ self.client.update_ikepolicy(ikepolicy['id'], **new_ike)
# Confirm that update was successful by verifying using 'show'
_, body = self.client.show_ikepolicy(ikepolicy['id'])
ike_policy = body['ikepolicy']
@@ -249,8 +238,7 @@
self.assertEqual(value, ike_policy[key])
# Verification of ike policy delete
- resp, _ = self.client.delete_ikepolicy(ikepolicy['id'])
- self.assertEqual('204', resp['status'])
+ self.client.delete_ikepolicy(ikepolicy['id'])
_, body = self.client.list_ikepolicies()
ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
self.assertNotIn(ike_policy['id'], ikepolicies)
@@ -258,8 +246,7 @@
@test.attr(type='smoke')
def test_show_ike_policy(self):
# Verifies the details of a ike policy
- resp, body = self.client.show_ikepolicy(self.ikepolicy['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_ikepolicy(self.ikepolicy['id'])
ikepolicy = body['ikepolicy']
self.assertEqual(self.ikepolicy['id'], ikepolicy['id'])
self.assertEqual(self.ikepolicy['name'], ikepolicy['name'])
@@ -281,8 +268,7 @@
@test.attr(type='smoke')
def test_list_ipsec_policies(self):
# Verify the ipsec policy exists in the list of all ipsec policies
- resp, body = self.client.list_ipsecpolicies()
- self.assertEqual('200', resp['status'])
+ _, body = self.client.list_ipsecpolicies()
ipsecpolicies = body['ipsecpolicies']
self.assertIn(self.ipsecpolicy['id'], [i['id'] for i in ipsecpolicies])
@@ -293,8 +279,7 @@
'pfs': 'group5',
'encryption_algorithm': "aes-128",
'auth_algorithm': 'sha1'}
- resp, resp_body = self.client.create_ipsecpolicy(**ipsec_policy_body)
- self.assertEqual('201', resp['status'])
+ _, resp_body = self.client.create_ipsecpolicy(**ipsec_policy_body)
ipsecpolicy = resp_body['ipsecpolicy']
self.addCleanup(self._delete_ipsec_policy, ipsecpolicy['id'])
self._assertExpected(ipsec_policy_body, ipsecpolicy)
@@ -304,22 +289,19 @@
'name': data_utils.rand_name("New-IPSec"),
'encryption_algorithm': "aes-256",
'lifetime': {'units': "seconds", 'value': '2000'}}
- resp, body = self.client.update_ipsecpolicy(ipsecpolicy['id'],
- **new_ipsec)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.update_ipsecpolicy(ipsecpolicy['id'],
+ **new_ipsec)
updated_ipsec_policy = body['ipsecpolicy']
self._assertExpected(new_ipsec, updated_ipsec_policy)
# Verification of ipsec policy delete
- resp, _ = self.client.delete_ipsecpolicy(ipsecpolicy['id'])
- self.assertEqual('204', resp['status'])
+ self.client.delete_ipsecpolicy(ipsecpolicy['id'])
self.assertRaises(exceptions.NotFound,
self.client.delete_ipsecpolicy, ipsecpolicy['id'])
@test.attr(type='smoke')
def test_show_ipsec_policy(self):
# Verifies the details of an ipsec policy
- resp, body = self.client.show_ipsecpolicy(self.ipsecpolicy['id'])
- self.assertEqual('200', resp['status'])
+ _, body = self.client.show_ipsecpolicy(self.ipsecpolicy['id'])
ipsecpolicy = body['ipsecpolicy']
self._assertExpected(self.ipsecpolicy, ipsecpolicy)
diff --git a/tempest/api/object_storage/test_object_temp_url.py b/tempest/api/object_storage/test_object_temp_url.py
index c597255..264a18a 100644
--- a/tempest/api/object_storage/test_object_temp_url.py
+++ b/tempest/api/object_storage/test_object_temp_url.py
@@ -185,3 +185,20 @@
resp, body = self.object_client.head(url)
self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
self.assertHeaders(resp, 'Object', 'HEAD')
+
+ @test.attr(type='gate')
+ @test.requires_ext(extension='tempurl', service='object')
+ def test_get_object_using_temp_url_with_inline_query_parameter(self):
+ expires = self._get_expiry_date()
+
+ # get a temp URL for the created object
+ url = self._get_temp_url(self.container_name, self.object_name, "GET",
+ expires, self.key)
+ url = url + '&inline'
+
+ # trying to get object using temp url within expiry time
+ resp, body = self.object_client.get(url)
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'GET')
+ self.assertEqual(body, self.content)
+ self.assertEqual(resp['content-disposition'], 'inline')
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index cfebc2c..d0fb825 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -51,7 +51,7 @@
@classmethod
def _get_default_network(cls):
- __, networks = cls.network_client.list_networks()
+ _, networks = cls.network_client.list_networks()
for net in networks['networks']:
if net['name'] == CONF.compute.fixed_network_name:
return net
@@ -86,13 +86,16 @@
pass
for stack_identifier in cls.stacks:
- cls.client.wait_for_stack_status(
- stack_identifier, 'DELETE_COMPLETE')
+ try:
+ cls.client.wait_for_stack_status(
+ stack_identifier, 'DELETE_COMPLETE')
+ except exceptions.NotFound:
+ pass
@classmethod
def _create_keypair(cls, name_start='keypair-heat-'):
kp_name = data_utils.rand_name(name_start)
- __, body = cls.keypairs_client.create_keypair(kp_name)
+ _, body = cls.keypairs_client.create_keypair(kp_name)
cls.keypairs.append(kp_name)
return body
@@ -108,9 +111,9 @@
def _create_image(cls, name_start='image-heat-', container_format='bare',
disk_format='iso'):
image_name = data_utils.rand_name(name_start)
- __, body = cls.images_v2_client.create_image(image_name,
- container_format,
- disk_format)
+ _, body = cls.images_v2_client.create_image(image_name,
+ container_format,
+ disk_format)
image_id = body['id']
cls.images.append(image_id)
return body
@@ -159,8 +162,7 @@
def list_resources(self, stack_identifier):
"""Get a dict mapping of resource names to types."""
- resp, resources = self.client.list_resources(stack_identifier)
- self.assertEqual('200', resp['status'])
+ _, resources = self.client.list_resources(stack_identifier)
self.assertIsInstance(resources, list)
for res in resources:
self.assert_fields_in_dict(res, 'logical_resource_id',
@@ -171,6 +173,5 @@
for r in resources)
def get_stack_output(self, stack_identifier, output_key):
- resp, body = self.client.get_stack(stack_identifier)
- self.assertEqual('200', resp['status'])
+ _, body = self.client.get_stack(stack_identifier)
return self.stack_output(body, output_key)
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index 26e3ac6..c1e2d59 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -71,11 +71,11 @@
# attempt to log the server console to help with debugging
# the cause of the server not signalling the waitcondition
# to heat.
- resp, body = cls.client.get_resource(cls.stack_identifier,
- 'Server')
+ _, body = cls.client.get_resource(cls.stack_identifier,
+ 'Server')
server_id = body['physical_resource_id']
LOG.debug('Console output for %s', server_id)
- resp, output = cls.servers_client.get_console_output(
+ _, output = cls.servers_client.get_console_output(
server_id, None)
LOG.debug(output)
raise e
@@ -99,22 +99,22 @@
self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
@test.attr(type='slow')
+ @test.services('network')
def test_created_network(self):
"""Verifies created network."""
network_id = self.test_resources.get('Network')['physical_resource_id']
- resp, body = self.network_client.show_network(network_id)
- self.assertEqual('200', resp['status'])
+ _, body = self.network_client.show_network(network_id)
network = body['network']
self.assertIsInstance(network, dict)
self.assertEqual(network_id, network['id'])
self.assertEqual('NewNetwork', network['name'])
@test.attr(type='slow')
+ @test.services('network')
def test_created_subnet(self):
"""Verifies created subnet."""
subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
- resp, body = self.network_client.show_subnet(subnet_id)
- self.assertEqual('200', resp['status'])
+ _, body = self.network_client.show_subnet(subnet_id)
subnet = body['subnet']
network_id = self.test_resources.get('Network')['physical_resource_id']
self.assertEqual(subnet_id, subnet['id'])
@@ -126,11 +126,11 @@
self.assertEqual(str(self.subnet_cidr), subnet['cidr'])
@test.attr(type='slow')
+ @test.services('network')
def test_created_router(self):
"""Verifies created router."""
router_id = self.test_resources.get('Router')['physical_resource_id']
- resp, body = self.network_client.show_router(router_id)
- self.assertEqual('200', resp['status'])
+ _, body = self.network_client.show_router(router_id)
router = body['router']
self.assertEqual('NewRouter', router['name'])
self.assertEqual(self.external_network_id,
@@ -138,13 +138,13 @@
self.assertEqual(True, router['admin_state_up'])
@test.attr(type='slow')
+ @test.services('network')
def test_created_router_interface(self):
"""Verifies created router interface."""
router_id = self.test_resources.get('Router')['physical_resource_id']
network_id = self.test_resources.get('Network')['physical_resource_id']
subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
- resp, body = self.network_client.list_ports()
- self.assertEqual('200', resp['status'])
+ _, body = self.network_client.list_ports()
ports = body['ports']
router_ports = filter(lambda port: port['device_id'] ==
router_id, ports)
@@ -161,11 +161,11 @@
router_interface_ip)
@test.attr(type='slow')
+ @test.services('compute', 'network')
def test_created_server(self):
"""Verifies created sever."""
server_id = self.test_resources.get('Server')['physical_resource_id']
- resp, server = self.servers_client.get_server(server_id)
- self.assertEqual('200', resp['status'])
+ _, server = self.servers_client.get_server(server_id)
self.assertEqual(self.keypair_name, server['key_name'])
self.assertEqual('ACTIVE', server['status'])
network = server['addresses']['NewNetwork'][0]
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index a97c561..72ad5f5 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -45,8 +45,7 @@
cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
def _list_stacks(self, expected_num=None, **filter_kwargs):
- resp, stacks = self.client.list_stacks(params=filter_kwargs)
- self.assertEqual('200', resp['status'])
+ _, stacks = self.client.list_stacks(params=filter_kwargs)
self.assertIsInstance(stacks, list)
if expected_num is not None:
self.assertEqual(expected_num, len(stacks))
@@ -62,8 +61,7 @@
@test.attr(type='gate')
def test_stack_show(self):
"""Getting details about created stack should be possible."""
- resp, stack = self.client.get_stack(self.stack_name)
- self.assertEqual('200', resp['status'])
+ _, stack = self.client.get_stack(self.stack_name)
self.assertIsInstance(stack, dict)
self.assert_fields_in_dict(stack, 'stack_name', 'id', 'links',
'parameters', 'outputs', 'disable_rollback',
@@ -82,12 +80,10 @@
@test.attr(type='gate')
def test_suspend_resume_stack(self):
"""Suspend and resume a stack."""
- resp, suspend_stack = self.client.suspend_stack(self.stack_identifier)
- self.assertEqual('200', resp['status'])
+ _, suspend_stack = self.client.suspend_stack(self.stack_identifier)
self.client.wait_for_stack_status(self.stack_identifier,
'SUSPEND_COMPLETE')
- resp, resume_stack = self.client.resume_stack(self.stack_identifier)
- self.assertEqual('200', resp['status'])
+ _, resume_stack = self.client.resume_stack(self.stack_identifier)
self.client.wait_for_stack_status(self.stack_identifier,
'RESUME_COMPLETE')
@@ -101,8 +97,8 @@
@test.attr(type='gate')
def test_show_resource(self):
"""Getting details about created resource should be possible."""
- resp, resource = self.client.get_resource(self.stack_identifier,
- self.resource_name)
+ _, resource = self.client.get_resource(self.stack_identifier,
+ self.resource_name)
self.assertIsInstance(resource, dict)
self.assert_fields_in_dict(resource, 'resource_name', 'description',
'links', 'logical_resource_id',
@@ -115,18 +111,16 @@
@test.attr(type='gate')
def test_resource_metadata(self):
"""Getting metadata for created resources should be possible."""
- resp, metadata = self.client.show_resource_metadata(
+ _, metadata = self.client.show_resource_metadata(
self.stack_identifier,
self.resource_name)
- self.assertEqual('200', resp['status'])
self.assertIsInstance(metadata, dict)
self.assertEqual(['Tom', 'Stinky'], metadata.get('kittens', None))
@test.attr(type='gate')
def test_list_events(self):
"""Getting list of created events for the stack should be possible."""
- resp, events = self.client.list_events(self.stack_identifier)
- self.assertEqual('200', resp['status'])
+ _, events = self.client.list_events(self.stack_identifier)
self.assertIsInstance(events, list)
for event in events:
@@ -141,14 +135,13 @@
@test.attr(type='gate')
def test_show_event(self):
"""Getting details about an event should be possible."""
- resp, events = self.client.list_resource_events(self.stack_identifier,
- self.resource_name)
+ _, events = self.client.list_resource_events(self.stack_identifier,
+ self.resource_name)
self.assertNotEqual([], events)
events.sort(key=lambda event: event['event_time'])
event_id = events[0]['id']
- resp, event = self.client.show_event(self.stack_identifier,
- self.resource_name, event_id)
- self.assertEqual('200', resp['status'])
+ _, event = self.client.show_event(self.stack_identifier,
+ self.resource_name, event_id)
self.assertIsInstance(event, dict)
self.assert_fields_in_dict(event, 'resource_name', 'event_time',
'links', 'logical_resource_id',
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index c6f880b..2f58611 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -70,8 +70,7 @@
@test.attr(type='gate')
def test_stack_keypairs_output(self):
- resp, stack = self.client.get_stack(self.stack_name)
- self.assertEqual('200', resp['status'])
+ _, stack = self.client.get_stack(self.stack_name)
self.assertIsInstance(stack, dict)
output_map = {}
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index d5e66e8..8023f2c 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -28,8 +28,7 @@
@test.attr(type='smoke')
def test_stack_list_responds(self):
- resp, stacks = self.client.list_stacks()
- self.assertEqual('200', resp['status'])
+ _, stacks = self.client.list_stacks()
self.assertIsInstance(stacks, list)
@test.attr(type='smoke')
@@ -45,23 +44,22 @@
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
# check for stack in list
- resp, stacks = self.client.list_stacks()
+ _, stacks = self.client.list_stacks()
list_ids = list([stack['id'] for stack in stacks])
self.assertIn(stack_id, list_ids)
# fetch the stack
- resp, stack = self.client.get_stack(stack_identifier)
+ _, stack = self.client.get_stack(stack_identifier)
self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
# fetch the stack by name
- resp, stack = self.client.get_stack(stack_name)
+ _, stack = self.client.get_stack(stack_name)
self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
# fetch the stack by id
- resp, stack = self.client.get_stack(stack_id)
+ _, stack = self.client.get_stack(stack_id)
self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
# delete the stack
- resp = self.client.delete_stack(stack_identifier)
- self.assertEqual('204', resp[0]['status'])
+ self.client.delete_stack(stack_identifier)
self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index adab8c3..cbe62a1 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -61,15 +61,16 @@
self.assertEqual(resource_name, resource['logical_resource_id'])
self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
+ @test.services('object_storage')
def test_created_containers(self):
params = {'format': 'json'}
- resp, container_list = \
+ _, container_list = \
self.account_client.list_account_containers(params=params)
- self.assertEqual('200', resp['status'])
self.assertEqual(2, len(container_list))
for cont in container_list:
self.assertTrue(cont['name'].startswith(self.stack_name))
+ @test.services('object_storage')
def test_acl(self):
acl_headers = ('x-container-meta-web-index', 'x-container-read')
@@ -86,6 +87,7 @@
for h in acl_headers:
self.assertIn(h, headers)
+ @test.services('object_storage')
def test_metadata(self):
swift_basic_template = self.load_template('swift_basic')
metadatas = swift_basic_template['resources']['SwiftContainerWebsite'][
diff --git a/tempest/api/orchestration/stacks/test_templates.py b/tempest/api/orchestration/stacks/test_templates.py
index 74950a9..0d6060d 100644
--- a/tempest/api/orchestration/stacks/test_templates.py
+++ b/tempest/api/orchestration/stacks/test_templates.py
@@ -39,15 +39,13 @@
@test.attr(type='gate')
def test_show_template(self):
"""Getting template used to create the stack."""
- resp, template = self.client.show_template(self.stack_identifier)
- self.assertEqual('200', resp['status'])
+ _, template = self.client.show_template(self.stack_identifier)
@test.attr(type='gate')
def test_validate_template(self):
"""Validating template passing it content."""
- resp, parameters = self.client.validate_template(self.template,
- self.parameters)
- self.assertEqual('200', resp['status'])
+ _, parameters = self.client.validate_template(self.template,
+ self.parameters)
class TemplateAWSTestJSON(TemplateYAMLTestJSON):
diff --git a/tempest/api/orchestration/stacks/test_update.py b/tempest/api/orchestration/stacks/test_update.py
index a9a43b6..791a19b 100644
--- a/tempest/api/orchestration/stacks/test_update.py
+++ b/tempest/api/orchestration/stacks/test_update.py
@@ -40,11 +40,10 @@
def update_stack(self, stack_identifier, template):
stack_name = stack_identifier.split('/')[0]
- resp = self.client.update_stack(
+ self.client.update_stack(
stack_identifier=stack_identifier,
name=stack_name,
template=template)
- self.assertEqual('202', resp[0]['status'])
self.client.wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
@test.attr(type='gate')
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
index f11ac2a..f371370 100644
--- a/tempest/api/orchestration/stacks/test_volumes.py
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -33,8 +33,7 @@
def _cinder_verify(self, volume_id, template):
self.assertIsNotNone(volume_id)
- resp, volume = self.volumes_client.get_volume(volume_id)
- self.assertEqual(200, resp.status)
+ _, volume = self.volumes_client.get_volume(volume_id)
self.assertEqual('available', volume.get('status'))
self.assertEqual(template['resources']['volume']['properties'][
'size'], volume.get('size'))
@@ -55,6 +54,7 @@
'name'], self.get_stack_output(stack_identifier, 'display_name'))
@test.attr(type='gate')
+ @test.services('volume')
def test_cinder_volume_create_delete(self):
"""Create and delete a volume via OS::Cinder::Volume."""
stack_name = data_utils.rand_name('heat')
@@ -83,6 +83,7 @@
self.volumes_client.wait_for_resource_deletion(volume_id)
@test.attr(type='gate')
+ @test.services('volume')
def test_cinder_volume_create_delete_retain(self):
"""Ensure the 'Retain' deletion policy is respected."""
stack_name = data_utils.rand_name('heat')
diff --git a/tempest/api/queuing/base.py b/tempest/api/queuing/base.py
index f4ff7f1..41a02f2 100644
--- a/tempest/api/queuing/base.py
+++ b/tempest/api/queuing/base.py
@@ -26,7 +26,7 @@
class BaseQueuingTest(test.BaseTestCase):
"""
- Base class for the Queuing tests that use the Tempest Marconi REST client
+ Base class for the Queuing tests that use the Tempest Zaqar REST client
It is assumed that the following option is defined in the
[service_available] section of etc/tempest.conf
@@ -37,8 +37,8 @@
@classmethod
def setUpClass(cls):
super(BaseQueuingTest, cls).setUpClass()
- if not CONF.service_available.marconi:
- raise cls.skipException("Marconi support is required")
+ if not CONF.service_available.zaqar:
+ raise cls.skipException("Zaqar support is required")
os = cls.get_client_manager()
cls.queuing_cfg = CONF.queuing
cls.client = os.queuing_client
diff --git a/tempest/api/telemetry/base.py b/tempest/api/telemetry/base.py
index b5b2bb1..8c2f37b 100644
--- a/tempest/api/telemetry/base.py
+++ b/tempest/api/telemetry/base.py
@@ -29,6 +29,7 @@
def setUpClass(cls):
if not CONF.service_available.ceilometer:
raise cls.skipException("Ceilometer support is required")
+ cls.set_network_resources()
super(BaseTelemetryTest, cls).setUpClass()
os = cls.get_client_manager()
cls.telemetry_client = os.telemetry_client
diff --git a/tempest/api_schema/request/compute/flavors.py b/tempest/api_schema/request/compute/flavors.py
index 36e5a19..8fe9e3a 100644
--- a/tempest/api_schema/request/compute/flavors.py
+++ b/tempest/api_schema/request/compute/flavors.py
@@ -30,3 +30,24 @@
}
}
}
+
+common_admin_flavor_create = {
+ "name": "flavor-create",
+ "http-method": "POST",
+ "admin_client": True,
+ "url": "flavors",
+ "default_result_code": 400,
+ "json-schema": {
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "ram": {"type": "integer", "minimum": 1},
+ "vcpus": {"type": "integer", "minimum": 1},
+ "disk": {"type": "integer"},
+ "id": {"type": "integer"},
+ "swap": {"type": "integer"},
+ "rxtx_factor": {"type": "integer"},
+ "OS-FLV-EXT-DATA:ephemeral": {"type": "integer"}
+ }
+ }
+}
diff --git a/tempest/api_schema/request/compute/servers.py b/tempest/api_schema/request/compute/servers.py
new file mode 100644
index 0000000..731649c
--- /dev/null
+++ b/tempest/api_schema/request/compute/servers.py
@@ -0,0 +1,36 @@
+# (c) 2014 Deutsche Telekom AG
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+common_get_console_output = {
+ "name": "get-console-output",
+ "http-method": "POST",
+ "url": "servers/%s/action",
+ "resources": [
+ {"name": "server", "expected_result": 404}
+ ],
+ "json-schema": {
+ "type": "object",
+ "properties": {
+ "os-getConsoleOutput": {
+ "type": "object",
+ "properties": {
+ "length": {
+ "type": ["integer", "string"],
+ "minimum": 0
+ }
+ }
+ }
+ },
+ "additionalProperties": False
+ }
+}
diff --git a/tempest/api_schema/request/compute/v2/flavors.py b/tempest/api_schema/request/compute/v2/flavors.py
index 08f6c28..bc459ad 100644
--- a/tempest/api_schema/request/compute/v2/flavors.py
+++ b/tempest/api_schema/request/compute/v2/flavors.py
@@ -19,6 +19,8 @@
flavor_list = copy.deepcopy(flavors.common_flavor_list)
+flavor_create = copy.deepcopy(flavors.common_admin_flavor_create)
+
flavor_list["json-schema"]["properties"] = {
"minRam": {
"type": "integer",
diff --git a/tempest/api_schema/request/compute/v2/servers.py b/tempest/api_schema/request/compute/v2/servers.py
new file mode 100644
index 0000000..c9002ed
--- /dev/null
+++ b/tempest/api_schema/request/compute/v2/servers.py
@@ -0,0 +1,18 @@
+# (c) 2014 Deutsche Telekom AG
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+
+from tempest.api_schema.request.compute import servers
+
+get_console_output = copy.deepcopy(servers.common_get_console_output)
diff --git a/tempest/api_schema/response/compute/servers.py b/tempest/api_schema/response/compute/servers.py
index d6c2ddb..f9c957b 100644
--- a/tempest/api_schema/response/compute/servers.py
+++ b/tempest/api_schema/response/compute/servers.py
@@ -54,14 +54,15 @@
'id': {'type': 'string'},
'name': {'type': 'string'},
'status': {'type': 'string'},
- 'image': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'links': parameter_types.links
- },
- 'required': ['id', 'links']
- },
+ 'image': {'oneOf': [
+ {'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']},
+ {'type': ['string', 'null']}
+ ]},
'flavor': {
'type': 'object',
'properties': {
diff --git a/tempest/cli/simple_read_only/test_nova_manage.py b/tempest/cli/simple_read_only/test_nova_manage.py
index dae0cf8..c27b12e 100644
--- a/tempest/cli/simple_read_only/test_nova_manage.py
+++ b/tempest/cli/simple_read_only/test_nova_manage.py
@@ -79,10 +79,6 @@
def test_flavor_list(self):
self.assertNotEqual("", self.nova_manage('flavor list'))
- def test_db_archive_deleted_rows(self):
- # make sure command doesn't error out
- self.nova_manage('db archive_deleted_rows 50')
-
def test_db_sync(self):
# make sure command doesn't error out
self.nova_manage('db sync')
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 70fd27b..cd696a9 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -247,7 +247,7 @@
'data_processing': 'sahara',
'baremetal': 'ironic',
'identity': 'keystone',
- 'queuing': 'marconi',
+ 'queuing': 'zaqar',
'database': 'trove'
}
# Get catalog list for endpoints to use for validation
diff --git a/tempest/common/generator/base_generator.py b/tempest/common/generator/base_generator.py
index 57b98f7..0398af1 100644
--- a/tempest/common/generator/base_generator.py
+++ b/tempest/common/generator/base_generator.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
+
import jsonschema
from tempest.openstack.common import log as logging
@@ -39,6 +41,7 @@
"""
Decorator for simple generators that return one value
"""
+ @functools.wraps(fn)
def wrapped(self, schema):
result = fn(self, schema)
if result is not None:
diff --git a/tempest/common/generator/valid_generator.py b/tempest/common/generator/valid_generator.py
index a99bbc0..0d7b398 100644
--- a/tempest/common/generator/valid_generator.py
+++ b/tempest/common/generator/valid_generator.py
@@ -24,7 +24,7 @@
@base.generator_type("string")
@base.simple_generator
def generate_valid_string(self, schema):
- size = schema.get("minLength", 0)
+ size = schema.get("minLength", 1)
# TODO(dkr mko): handle format and pattern
return "x" * size
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index f711f2f..dca1f86 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -13,6 +13,7 @@
# under the License.
import netaddr
+from neutronclient.common import exceptions as n_exc
from tempest import auth
from tempest import clients
@@ -162,9 +163,11 @@
email = data_utils.rand_name(root) + suffix + "@example.com"
user = self._create_user(username, self.password,
tenant, email)
- # NOTE(andrey-mp): user needs this role to create containers in swift
- swift_operator_role = CONF.object_storage.operator_role
- self._assign_user_role(tenant, user, swift_operator_role)
+ if CONF.service_available.swift:
+ # NOTE(andrey-mp): user needs this role to create containers
+ # in swift
+ swift_operator_role = CONF.object_storage.operator_role
+ self._assign_user_role(tenant, user, swift_operator_role)
if admin:
self._assign_user_role(tenant, user, CONF.identity.admin_role)
return self._get_credentials(user, tenant)
@@ -261,7 +264,7 @@
body['subnet']['cidr'] = str(subnet_cidr)
resp_body = self.network_admin_client.create_subnet(body)
break
- except exceptions.BadRequest as e:
+ except (n_exc.BadRequest, exceptions.BadRequest) as e:
if 'overlaps with another subnet' not in str(e):
raise
else:
diff --git a/tempest/common/xml_utils.py b/tempest/common/xml_utils.py
index b1bf789..7d460a4 100644
--- a/tempest/common/xml_utils.py
+++ b/tempest/common/xml_utils.py
@@ -14,6 +14,7 @@
# under the License.
import collections
+import copy
XMLNS_11 = "http://docs.openstack.org/compute/api/v1.1"
XMLNS_V3 = "http://docs.openstack.org/compute/api/v1.1"
@@ -78,16 +79,19 @@
class Document(Element):
def __init__(self, *args, **kwargs):
- if 'version' not in kwargs:
- kwargs['version'] = '1.0'
- if 'encoding' not in kwargs:
- kwargs['encoding'] = 'UTF-8'
Element.__init__(self, '?xml', *args, **kwargs)
def __str__(self):
- args = " ".join(['%s="%s"' %
- (k, v if v is not None else "")
- for k, v in self._attrs.items()])
+ attrs = copy.copy(self._attrs)
+ # pop the required standard attrs out and render in required
+ # order.
+ vers = attrs.pop('version', '1.0')
+ enc = attrs.pop('encoding', 'UTF-8')
+ args = 'version="%s" encoding="%s"' % (vers, enc)
+ if attrs:
+ args = " ".join([args] + ['%s="%s"' %
+ (k, v if v is not None else "")
+ for k, v in attrs.items()])
string = '<?xml %s?>\n' % args
for element in self._elements:
string += str(element)
diff --git a/tempest/config.py b/tempest/config.py
index 4836c63..1d10a0a 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -855,9 +855,15 @@
default='/opt/stack/new/devstack/files/images/'
'cirros-0.3.1-x86_64-uec',
help='Directory containing image files'),
- cfg.StrOpt('qcow2_img_file',
+ cfg.StrOpt('img_file', deprecated_name='qcow2_img_file',
default='cirros-0.3.1-x86_64-disk.img',
- help='QCOW2 image file name'),
+ help='Image file name'),
+ cfg.StrOpt('img_disk_format',
+ default='qcow2',
+ help='Image disk format'),
+ cfg.StrOpt('img_container_format',
+ default='bare',
+ help='Image container format'),
cfg.StrOpt('ami_img_file',
default='cirros-0.3.1-x86_64-blank.img',
help='AMI image file name'),
@@ -915,9 +921,9 @@
cfg.BoolOpt('trove',
default=False,
help="Whether or not Trove is expected to be available"),
- cfg.BoolOpt('marconi',
+ cfg.BoolOpt('zaqar',
default=False,
- help="Whether or not Marconi is expected to be available"),
+ help="Whether or not Zaqar is expected to be available"),
]
debug_group = cfg.OptGroup(name="debug",
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 93329bc..cef010e 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -20,7 +20,7 @@
PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
'trove', 'ironic', 'savanna', 'heat', 'ceilometer',
- 'marconi', 'sahara']
+ 'zaqar', 'sahara']
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
TEST_DEFINITION = re.compile(r'^\s*def test.*')
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 76d82aa..0f14c94 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -54,16 +54,40 @@
class ScenarioTest(tempest.test.BaseTestCase):
+ """Replaces the OfficialClientTest base class.
+
+ Uses tempest own clients as opposed to OfficialClients.
+
+ Common differences:
+ - replace resource.attribute with resource['attribute']
+ - replace resouce.delete with delete_callable(resource['id'])
+ - replace local waiters with common / rest_client waiters
+ """
@classmethod
def setUpClass(cls):
super(ScenarioTest, cls).setUpClass()
+ # Using tempest client for isolated credentials as well
cls.isolated_creds = isolated_creds.IsolatedCreds(
cls.__name__, tempest_client=True,
network_resources=cls.network_resources)
cls.manager = clients.Manager(
credentials=cls.credentials()
)
+ cls.admin_manager = clients.Manager(cls.admin_credentials())
+ # Clients (in alphabetical order)
+ cls.floating_ips_client = cls.manager.floating_ips_client
+ # Glance image client v1
+ cls.image_client = cls.manager.image_client
+ # Compute image client
+ cls.images_client = cls.manager.images_client
+ cls.keypairs_client = cls.manager.keypairs_client
+ cls.networks_client = cls.admin_manager.networks_client
+ # Nova security groups client
+ cls.security_groups_client = cls.manager.security_groups_client
+ cls.servers_client = cls.manager.servers_client
+ cls.volumes_client = cls.manager.volumes_client
+ cls.snapshots_client = cls.manager.snapshots_client
@classmethod
def _get_credentials(cls, get_creds, ctype):
@@ -83,6 +107,315 @@
return cls._get_credentials(cls.isolated_creds.get_admin_creds,
'identity_admin')
+ # ## Methods to handle sync and async deletes
+
+ def setUp(self):
+ super(ScenarioTest, self).setUp()
+ self.cleanup_waits = []
+ # NOTE(mtreinish) This is safe to do in setUp instead of setUp class
+ # because scenario tests in the same test class should not share
+ # resources. If resources were shared between test cases then it
+ # should be a single scenario test instead of multiples.
+
+ # NOTE(yfried): this list is cleaned at the end of test_methods and
+ # not at the end of the class
+ self.addCleanup(self._wait_for_cleanups)
+
+ def delete_wrapper(self, delete_thing, thing_id):
+ """Ignores NotFound exceptions for delete operations.
+
+ @param delete_thing: delete method of a resource
+ @param thing_id: id of the resource to be deleted
+ """
+ try:
+ # Tempest clients return dicts, so there is no common delete
+ # method available. Using a callable instead
+ delete_thing(thing_id)
+ except exceptions.NotFound:
+ # If the resource is already missing, mission accomplished.
+ pass
+
+ def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
+ cleanup_callable, cleanup_args=[],
+ cleanup_kwargs={}, ignore_error=True):
+ """Adds wait for ansyc resource deletion at the end of cleanups
+
+ @param waiter_callable: callable to wait for the resource to delete
+ @param thing_id: the id of the resource to be cleaned-up
+ @param thing_id_param: the name of the id param in the waiter
+ @param cleanup_callable: method to load pass to self.addCleanup with
+ the following *cleanup_args, **cleanup_kwargs.
+ usually a delete method.
+ """
+ self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
+ wait_dict = {
+ 'waiter_callable': waiter_callable,
+ thing_id_param: thing_id
+ }
+ self.cleanup_waits.append(wait_dict)
+
+ def _wait_for_cleanups(self):
+ """To handle async delete actions, a list of waits is added
+ which will be iterated over as the last step of clearing the
+ cleanup queue. That way all the delete calls are made up front
+ and the tests won't succeed unless the deletes are eventually
+ successful. This is the same basic approach used in the api tests to
+ limit cleanup execution time except here it is multi-resource,
+ because of the nature of the scenario tests.
+ """
+ for wait in self.cleanup_waits:
+ waiter_callable = wait.pop('waiter_callable')
+ waiter_callable(**wait)
+
+ # ## Test functions library
+ #
+ # The create_[resource] functions only return body and discard the
+ # resp part which is not used in scenario tests
+
+ def create_keypair(self):
+ name = data_utils.rand_name(self.__class__.__name__)
+ # We don't need to create a keypair by pubkey in scenario
+ resp, body = self.keypairs_client.create_keypair(name)
+ self.addCleanup(self.keypairs_client.delete_keypair, name)
+ return body
+
+ def create_server(self, name=None, image=None, flavor=None,
+ wait_on_boot=True, wait_on_delete=True,
+ create_kwargs={}):
+ """Creates VM instance.
+
+ @param image: image from which to create the instance
+ @param wait_on_boot: wait for status ACTIVE before continue
+ @param wait_on_delete: force synchronous delete on cleanup
+ @param create_kwargs: additional details for instance creation
+ @return: server dict
+ """
+ if name is None:
+ name = data_utils.rand_name(self.__class__.__name__)
+ if image is None:
+ image = CONF.compute.image_ref
+ if flavor is None:
+ flavor = CONF.compute.flavor_ref
+
+ fixed_network_name = CONF.compute.fixed_network_name
+ if 'nics' not in create_kwargs and fixed_network_name:
+ _, networks = self.networks_client.list_networks()
+ # If several networks found, set the NetID on which to connect the
+ # server to avoid the following error "Multiple possible networks
+ # found, use a Network ID to be more specific."
+ # See Tempest #1250866
+ if len(networks) > 1:
+ for network in networks:
+ if network['label'] == fixed_network_name:
+ create_kwargs['nics'] = [{'net-id': network['id']}]
+ break
+ # If we didn't find the network we were looking for :
+ else:
+ msg = ("The network on which the NIC of the server must "
+ "be connected can not be found : "
+ "fixed_network_name=%s. Starting instance without "
+ "specifying a network.") % fixed_network_name
+ LOG.info(msg)
+
+ LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
+ name, image, flavor)
+ _, server = self.servers_client.create_server(name, image, flavor,
+ **create_kwargs)
+ if wait_on_delete:
+ self.addCleanup(self.servers_client.wait_for_server_termination,
+ server['id'])
+ self.addCleanup_with_wait(
+ waiter_callable=self.servers_client.wait_for_server_termination,
+ thing_id=server['id'], thing_id_param='server_id',
+ cleanup_callable=self.delete_wrapper,
+ cleanup_args=[self.servers_client.delete_server, server['id']])
+ if wait_on_boot:
+ self.servers_client.wait_for_server_status(server_id=server['id'],
+ status='ACTIVE')
+ # The instance retrieved on creation is missing network
+ # details, necessitating retrieval after it becomes active to
+ # ensure correct details.
+ _, server = self.servers_client.get_server(server['id'])
+ self.assertEqual(server['name'], name)
+ return server
+
+ def create_volume(self, size=1, name=None, snapshot_id=None,
+ imageRef=None, volume_type=None, wait_on_delete=True):
+ if name is None:
+ name = data_utils.rand_name(self.__class__.__name__)
+ _, volume = self.volumes_client.create_volume(
+ size=size, display_name=name, snapshot_id=snapshot_id,
+ imageRef=imageRef, volume_type=volume_type)
+ if wait_on_delete:
+ self.addCleanup(self.volumes_client.wait_for_resource_deletion,
+ volume['id'])
+ self.addCleanup_with_wait(
+ waiter_callable=self.volumes_client.wait_for_resource_deletion,
+ thing_id=volume['id'], thing_id_param='id',
+ cleanup_callable=self.delete_wrapper,
+ cleanup_args=[self.volumes_client.delete_volume, volume['id']])
+
+ self.assertEqual(name, volume['display_name'])
+ self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+ # The volume retrieved on creation has a non-up-to-date status.
+ # Retrieval after it becomes active ensures correct details.
+ _, volume = self.volumes_client.get_volume(volume['id'])
+ return volume
+
+ def _create_loginable_secgroup_rule_nova(self, secgroup_id=None):
+ _client = self.security_groups_client
+ if secgroup_id is None:
+ _, sgs = _client.list_security_groups()
+ for sg in sgs:
+ if sg['name'] == 'default':
+ secgroup_id = sg['id']
+
+ # These rules are intended to permit inbound ssh and icmp
+ # traffic from all sources, so no group_id is provided.
+ # Setting a group_id would only permit traffic from ports
+ # belonging to the same security group.
+ rulesets = [
+ {
+ # ssh
+ 'ip_proto': 'tcp',
+ 'from_port': 22,
+ 'to_port': 22,
+ 'cidr': '0.0.0.0/0',
+ },
+ {
+ # ping
+ 'ip_proto': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '0.0.0.0/0',
+ }
+ ]
+ rules = list()
+ for ruleset in rulesets:
+ _, sg_rule = _client.create_security_group_rule(secgroup_id,
+ **ruleset)
+ self.addCleanup(self.delete_wrapper,
+ _client.delete_security_group_rule,
+ sg_rule['id'])
+ rules.append(sg_rule)
+ return rules
+
+ def _create_security_group_nova(self):
+ # Create security group
+ sg_name = data_utils.rand_name(self.__class__.__name__)
+ sg_desc = sg_name + " description"
+ _, secgroup = self.security_groups_client.create_security_group(
+ sg_name, sg_desc)
+ self.assertEqual(secgroup['name'], sg_name)
+ self.assertEqual(secgroup['description'], sg_desc)
+ self.addCleanup(self.delete_wrapper,
+ self.security_groups_client.delete_security_group,
+ secgroup['id'])
+
+ # Add rules to the security group
+ self._create_loginable_secgroup_rule_nova(secgroup['id'])
+
+ return secgroup
+
+ def get_remote_client(self, server_or_ip, username=None, private_key=None):
+ if isinstance(server_or_ip, six.string_types):
+ ip = server_or_ip
+ else:
+ network_name_for_ssh = CONF.compute.network_for_ssh
+ ip = server_or_ip.networks[network_name_for_ssh][0]
+ if username is None:
+ username = CONF.scenario.ssh_user
+ if private_key is None:
+ private_key = self.keypair['private_key']
+ linux_client = remote_client.RemoteClient(ip, username,
+ pkey=private_key)
+ try:
+ linux_client.validate_authentication()
+ except exceptions.SSHTimeout:
+ LOG.exception('ssh connection to %s failed' % ip)
+ debug.log_net_debug()
+ raise
+
+ return linux_client
+
+ def _image_create(self, name, fmt, path, properties={}):
+ name = data_utils.rand_name('%s-' % name)
+ image_file = open(path, 'rb')
+ self.addCleanup(image_file.close)
+ params = {
+ 'name': name,
+ 'container_format': fmt,
+ 'disk_format': fmt,
+ 'is_public': 'False',
+ }
+ params.update(properties)
+ _, image = self.image_client.create_image(**params)
+ self.addCleanup(self.image_client.delete_image, image['id'])
+ self.assertEqual("queued", image['status'])
+ self.image_client.update_image(image['id'], data=image_file)
+ return image['id']
+
+ def glance_image_create(self):
+ img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
+ aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
+ ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
+ ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
+ img_container_format = CONF.scenario.img_container_format
+ img_disk_format = CONF.scenario.img_disk_format
+ LOG.debug("paths: img: %s, container_fomat: %s, disk_format: %s, "
+ "ami: %s, ari: %s, aki: %s" %
+ (img_path, img_container_format, img_disk_format,
+ ami_img_path, ari_img_path, aki_img_path))
+ try:
+ self.image = self._image_create('scenario-img',
+ img_container_format,
+ img_path,
+ properties={'disk_format':
+ img_disk_format})
+ except IOError:
+ LOG.debug("A qcow2 image was not found. Try to get a uec image.")
+ kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
+ ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
+ properties = {
+ 'properties': {'kernel_id': kernel, 'ramdisk_id': ramdisk}
+ }
+ self.image = self._image_create('scenario-ami', 'ami',
+ path=ami_img_path,
+ properties=properties)
+ LOG.debug("image:%s" % self.image)
+
+ def _log_console_output(self, servers=None):
+ if not servers:
+ _, servers = self.servers_client.list_servers()
+ servers = servers['servers']
+ for server in servers:
+ LOG.debug('Console output for %s', server['id'])
+ LOG.debug(self.servers_client.get_console_output(server['id'],
+ length=None))
+
+ def create_server_snapshot(self, server, name=None):
+ # Glance client
+ _image_client = self.image_client
+ # Compute client
+ _images_client = self.images_client
+ if name is None:
+ name = data_utils.rand_name('scenario-snapshot-')
+ LOG.debug("Creating a snapshot image for server: %s", server['name'])
+ resp, image = _images_client.create_image(server['id'], name)
+ image_id = resp['location'].split('images/')[1]
+ _image_client.wait_for_image_status(image_id, 'active')
+ self.addCleanup_with_wait(
+ waiter_callable=_image_client.wait_for_resource_deletion,
+ thing_id=image_id, thing_id_param='id',
+ cleanup_callable=self.delete_wrapper,
+ cleanup_args=[_image_client.delete_image, image_id])
+ _, snapshot_image = _image_client.get_image_meta(image_id)
+ image_name = snapshot_image['name']
+ self.assertEqual(name, image_name)
+ LOG.debug("Created snapshot image %s for server %s",
+ image_name, server['name'])
+ return snapshot_image
+
class OfficialClientTest(tempest.test.BaseTestCase):
"""
@@ -328,11 +661,25 @@
'cidr': '0.0.0.0/0',
},
{
+ # ssh -6
+ 'ip_protocol': 'tcp',
+ 'from_port': 22,
+ 'to_port': 22,
+ 'cidr': '::/0',
+ },
+ {
# ping
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
+ },
+ {
+ # ping6
+ 'ip_protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '::/0',
}
]
rules = list()
@@ -543,19 +890,22 @@
return image.id
def glance_image_create(self):
- qcow2_img_path = (CONF.scenario.img_dir + "/" +
- CONF.scenario.qcow2_img_file)
+ img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
- LOG.debug("paths: img: %s, ami: %s, ari: %s, aki: %s"
- % (qcow2_img_path, ami_img_path, ari_img_path, aki_img_path))
+ img_container_format = CONF.scenario.img_container_format
+ img_disk_format = CONF.scenario.img_disk_format
+ LOG.debug("paths: img: %s, container_fomat: %s, disk_format: %s, "
+ "ami: %s, ari: %s, aki: %s" %
+ (img_path, img_container_format, img_disk_format,
+ ami_img_path, ari_img_path, aki_img_path))
try:
self.image = self._image_create('scenario-img',
- 'bare',
- qcow2_img_path,
+ img_container_format,
+ img_path,
properties={'disk_format':
- 'qcow2'})
+ img_disk_format})
except IOError:
LOG.debug("A qcow2 image was not found. Try to get a uec image.")
kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
@@ -1464,3 +1814,81 @@
self.client.stacks.delete(stack_identifier)
except heat_exceptions.HTTPNotFound:
pass
+
+
+class SwiftScenarioTest(ScenarioTest):
+ """
+ Provide harness to do Swift scenario tests.
+
+ Subclasses implement the tests that use the methods provided by this
+ class.
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ cls.set_network_resources()
+ super(SwiftScenarioTest, cls).setUpClass()
+ if not CONF.service_available.swift:
+ skip_msg = ("%s skipped as swift is not available" %
+ cls.__name__)
+ raise cls.skipException(skip_msg)
+ # Clients for Swift
+ cls.account_client = cls.manager.account_client
+ cls.container_client = cls.manager.container_client
+ cls.object_client = cls.manager.object_client
+
+ def _get_swift_stat(self):
+ """get swift status for our user account."""
+ self.account_client.list_account_containers()
+ LOG.debug('Swift status information obtained successfully')
+
+ def _create_container(self, container_name=None):
+ name = container_name or data_utils.rand_name(
+ 'swift-scenario-container')
+ self.container_client.create_container(name)
+ # look for the container to assure it is created
+ self._list_and_check_container_objects(name)
+ LOG.debug('Container %s created' % (name))
+ return name
+
+ def _delete_container(self, container_name):
+ self.container_client.delete_container(container_name)
+ LOG.debug('Container %s deleted' % (container_name))
+
+ def _upload_object_to_container(self, container_name, obj_name=None):
+ obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
+ obj_data = data_utils.arbitrary_string()
+ self.object_client.create_object(container_name, obj_name, obj_data)
+ return obj_name, obj_data
+
+ def _delete_object(self, container_name, filename):
+ self.object_client.delete_object(container_name, filename)
+ self._list_and_check_container_objects(container_name,
+ not_present_obj=[filename])
+
+ def _list_and_check_container_objects(self, container_name, present_obj=[],
+ not_present_obj=[]):
+ """
+ List objects for a given container and assert which are present and
+ which are not.
+ """
+ _, object_list = self.container_client.list_container_contents(
+ container_name)
+ if present_obj:
+ for obj in present_obj:
+ self.assertIn(obj, object_list)
+ if not_present_obj:
+ for obj in not_present_obj:
+ self.assertNotIn(obj, object_list)
+
+ def _change_container_acl(self, container_name, acl):
+ metadata_param = {'metadata_prefix': 'x-container-',
+ 'metadata': {'read': acl}}
+ self.container_client.update_container_metadata(container_name,
+ **metadata_param)
+ resp, _ = self.container_client.list_container_metadata(container_name)
+ self.assertEqual(resp['x-container-read'], acl)
+
+ def _download_and_verify(self, container_name, obj_name, expected_data):
+ _, obj = self.object_client.get_object(container_name, obj_name)
+ self.assertEqual(obj, expected_data)
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 29fdc74..4bc4a98 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.common import custom_matchers
from tempest.common import debug
from tempest import config
from tempest.openstack.common import log as logging
@@ -24,7 +25,7 @@
LOG = logging.getLogger(__name__)
-class TestMinimumBasicScenario(manager.OfficialClientTest):
+class TestMinimumBasicScenario(manager.ScenarioTest):
"""
This is a basic minimum scenario test.
@@ -38,61 +39,69 @@
"""
def _wait_for_server_status(self, status):
- server_id = self.server.id
- self.status_timeout(
- self.compute_client.servers, server_id, status)
+ server_id = self.server['id']
+ # Raise on error defaults to True, which is consistent with the
+ # original function from scenario tests here
+ self.servers_client.wait_for_server_status(server_id, status)
def nova_keypair_add(self):
self.keypair = self.create_keypair()
def nova_boot(self):
- create_kwargs = {'key_name': self.keypair.name}
+ create_kwargs = {'key_name': self.keypair['name']}
self.server = self.create_server(image=self.image,
create_kwargs=create_kwargs)
def nova_list(self):
- servers = self.compute_client.servers.list()
- LOG.debug("server_list:%s" % servers)
- self.assertIn(self.server, servers)
+ _, servers = self.servers_client.list_servers()
+ # The list servers in the compute client is inconsistent...
+ servers = servers['servers']
+ self.assertIn(self.server['id'], [x['id'] for x in servers])
def nova_show(self):
- got_server = self.compute_client.servers.get(self.server)
- LOG.debug("got server:%s" % got_server)
- self.assertEqual(self.server, got_server)
+ _, got_server = self.servers_client.get_server(self.server['id'])
+ self.assertThat(
+ self.server, custom_matchers.MatchesDictExceptForKeys(
+ got_server, excluded_keys=['OS-EXT-AZ:availability_zone']))
def cinder_create(self):
self.volume = self.create_volume()
def cinder_list(self):
- volumes = self.volume_client.volumes.list()
- self.assertIn(self.volume, volumes)
+ _, volumes = self.volumes_client.list_volumes()
+ self.assertIn(self.volume['id'], [x['id'] for x in volumes])
def cinder_show(self):
- volume = self.volume_client.volumes.get(self.volume.id)
+ _, volume = self.volumes_client.get_volume(self.volume['id'])
self.assertEqual(self.volume, volume)
def nova_volume_attach(self):
- attach_volume_client = self.compute_client.volumes.create_server_volume
- volume = attach_volume_client(self.server.id,
- self.volume.id,
- '/dev/vdb')
- self.assertEqual(self.volume.id, volume.id)
- self.wait_for_volume_status('in-use')
+ volume_device_path = '/dev/' + CONF.compute.volume_device_name
+ _, volume_attachment = self.servers_client.attach_volume(
+ self.server['id'], self.volume['id'], volume_device_path)
+ volume = volume_attachment['volumeAttachment']
+ self.assertEqual(self.volume['id'], volume['id'])
+ self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+ # Refresh the volume after the attachment
+ _, self.volume = self.volumes_client.get_volume(volume['id'])
def nova_reboot(self):
- self.server.reboot()
+ self.servers_client.reboot(self.server['id'], 'SOFT')
self._wait_for_server_status('ACTIVE')
def nova_floating_ip_create(self):
- self.floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(self.delete_wrapper, self.floating_ip)
+ _, self.floating_ip = self.floating_ips_client.create_floating_ip()
+ self.addCleanup(self.delete_wrapper,
+ self.floating_ips_client.delete_floating_ip,
+ self.floating_ip['id'])
def nova_floating_ip_add(self):
- self.server.add_floating_ip(self.floating_ip)
+ self.floating_ips_client.associate_floating_ip_to_server(
+ self.floating_ip['ip'], self.server['id'])
def ssh_to_server(self):
try:
- self.linux_client = self.get_remote_client(self.floating_ip.ip)
+ self.linux_client = self.get_remote_client(self.floating_ip['ip'])
except Exception as e:
LOG.exception('ssh to server failed')
self._log_console_output()
@@ -102,21 +111,24 @@
raise
def check_partitions(self):
+ # NOTE(andreaf) The device name may be different on different guest OS
partitions = self.linux_client.get_partitions()
- self.assertEqual(1, partitions.count('vdb'))
+ self.assertEqual(1, partitions.count(CONF.compute.volume_device_name))
def nova_volume_detach(self):
- detach_volume_client = self.compute_client.volumes.delete_server_volume
- detach_volume_client(self.server.id, self.volume.id)
- self.wait_for_volume_status('available')
+ self.servers_client.detach_volume(self.server['id'], self.volume['id'])
+ self.volumes_client.wait_for_volume_status(self.volume['id'],
+ 'available')
- volume = self.volume_client.volumes.get(self.volume.id)
- self.assertEqual('available', volume.status)
+ _, volume = self.volumes_client.get_volume(self.volume['id'])
+ self.assertEqual('available', volume['status'])
def create_and_add_security_group(self):
secgroup = self._create_security_group_nova()
- self.server.add_security_group(secgroup.name)
- self.addCleanup(self.server.remove_security_group, secgroup.name)
+ self.servers_client.add_security_group(self.server['id'],
+ secgroup['name'])
+ self.addCleanup(self.servers_client.remove_security_group,
+ self.server['id'], secgroup['name'])
@test.services('compute', 'volume', 'image', 'network')
def test_minimum_basic_scenario(self):
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 38686d9..77e195d 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -26,7 +26,7 @@
load_tests = test_utils.load_tests_input_scenario_utils
-class TestServerBasicOps(manager.OfficialClientTest):
+class TestServerBasicOps(manager.ScenarioTest):
"""
This smoke test case follows this basic set of operations:
@@ -69,9 +69,9 @@
def boot_instance(self):
# Create server with image and flavor from input scenario
- security_groups = [self.security_group.name]
+ security_groups = [self.security_group]
create_kwargs = {
- 'key_name': self.keypair.id,
+ 'key_name': self.keypair['name'],
'security_groups': security_groups
}
self.instance = self.create_server(image=self.image_ref,
@@ -81,16 +81,19 @@
def verify_ssh(self):
if self.run_ssh:
# Obtain a floating IP
- floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(self.delete_wrapper, floating_ip)
+ _, floating_ip = self.floating_ips_client.create_floating_ip()
+ self.addCleanup(self.delete_wrapper,
+ self.floating_ips_client.delete_floating_ip,
+ floating_ip['id'])
# Attach a floating IP
- self.instance.add_floating_ip(floating_ip)
+ self.floating_ips_client.associate_floating_ip_to_server(
+ floating_ip['ip'], self.instance['id'])
# Check ssh
try:
self.get_remote_client(
- server_or_ip=floating_ip.ip,
+ server_or_ip=floating_ip['ip'],
username=self.image_utils.ssh_user(self.image_ref),
- private_key=self.keypair.private_key)
+ private_key=self.keypair['private_key'])
except Exception:
LOG.exception('ssh to server failed')
self._log_console_output()
@@ -102,4 +105,4 @@
self.security_group = self._create_security_group_nova()
self.boot_instance()
self.verify_ssh()
- self.instance.delete()
+ self.servers_client.delete_server(self.instance['id'])
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index ffdd006..d500065 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -25,7 +25,7 @@
LOG = log.getLogger(__name__)
-class TestSnapshotPattern(manager.OfficialClientTest):
+class TestSnapshotPattern(manager.ScenarioTest):
"""
This test is for snapshotting an instance and booting with it.
The following is the scenario outline:
@@ -37,9 +37,9 @@
"""
def _boot_image(self, image_id):
- security_groups = [self.security_group.name]
+ security_groups = [self.security_group]
create_kwargs = {
- 'key_name': self.keypair.name,
+ 'key_name': self.keypair['name'],
'security_groups': security_groups
}
return self.create_server(image=image_id, create_kwargs=create_kwargs)
@@ -66,12 +66,15 @@
self.assertEqual(self.timestamp, got_timestamp)
def _create_floating_ip(self):
- floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(self.delete_wrapper, floating_ip)
+ _, floating_ip = self.floating_ips_client.create_floating_ip()
+ self.addCleanup(self.delete_wrapper,
+ self.floating_ips_client.delete_floating_ip,
+ floating_ip['id'])
return floating_ip
def _set_floating_ip_to_server(self, server, floating_ip):
- server.add_floating_ip(floating_ip)
+ self.floating_ips_client.associate_floating_ip_to_server(
+ floating_ip['ip'], server['id'])
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
@@ -86,7 +89,7 @@
if CONF.compute.use_floatingip_for_ssh:
fip_for_server = self._create_floating_ip()
self._set_floating_ip_to_server(server, fip_for_server)
- self._write_timestamp(fip_for_server.ip)
+ self._write_timestamp(fip_for_server['ip'])
else:
self._write_timestamp(server)
@@ -94,13 +97,13 @@
snapshot_image = self.create_server_snapshot(server=server)
# boot a second instance from the snapshot
- server_from_snapshot = self._boot_image(snapshot_image.id)
+ server_from_snapshot = self._boot_image(snapshot_image['id'])
# check the existence of the timestamp file in the second instance
if CONF.compute.use_floatingip_for_ssh:
fip_for_snapshot = self._create_floating_ip()
self._set_floating_ip_to_server(server_from_snapshot,
fip_for_snapshot)
- self._check_timestamp(fip_for_snapshot.ip)
+ self._check_timestamp(fip_for_snapshot['ip'])
else:
self._check_timestamp(server_from_snapshot)
diff --git a/tempest/scenario/test_swift_basic_ops.py b/tempest/scenario/test_swift_basic_ops.py
index 86e0867..ad74ec4 100644
--- a/tempest/scenario/test_swift_basic_ops.py
+++ b/tempest/scenario/test_swift_basic_ops.py
@@ -13,8 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-
-from tempest.common.utils import data_utils
+from tempest.common import http
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
@@ -25,80 +24,49 @@
LOG = logging.getLogger(__name__)
-class TestSwiftBasicOps(manager.OfficialClientTest):
+class TestSwiftBasicOps(manager.SwiftScenarioTest):
"""
- Test swift with the follow operations:
+ Test swift basic ops.
* get swift stat.
* create container.
* upload a file to the created container.
* list container's objects and assure that the uploaded file is present.
+ * download the object and check the content
* delete object from container.
* list container's objects and assure that the deleted file is gone.
* delete a container.
* list containers and assure that the deleted container is gone.
+ * change ACL of the container and make sure it works successfully
"""
- @classmethod
- def setUpClass(cls):
- cls.set_network_resources()
- super(TestSwiftBasicOps, cls).setUpClass()
- if not CONF.service_available.swift:
- skip_msg = ("%s skipped as swift is not available" %
- cls.__name__)
- raise cls.skipException(skip_msg)
-
- def _get_swift_stat(self):
- """get swift status for our user account."""
- self.object_storage_client.get_account()
- LOG.debug('Swift status information obtained successfully')
-
- def _create_container(self, container_name=None):
- name = container_name or data_utils.rand_name(
- 'swift-scenario-container')
- self.object_storage_client.put_container(name)
- # look for the container to assure it is created
- self._list_and_check_container_objects(name)
- LOG.debug('Container %s created' % (name))
- return name
-
- def _delete_container(self, container_name):
- self.object_storage_client.delete_container(container_name)
- LOG.debug('Container %s deleted' % (container_name))
-
- def _upload_object_to_container(self, container_name, obj_name=None):
- obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
- self.object_storage_client.put_object(container_name, obj_name,
- data_utils.rand_name('obj_data'),
- content_type='text/plain')
- return obj_name
-
- def _delete_object(self, container_name, filename):
- self.object_storage_client.delete_object(container_name, filename)
- self._list_and_check_container_objects(container_name,
- not_present_obj=[filename])
-
- def _list_and_check_container_objects(self, container_name, present_obj=[],
- not_present_obj=[]):
- """
- List objects for a given container and assert which are present and
- which are not.
- """
- meta, response = self.object_storage_client.get_container(
- container_name)
- # create a list with file name only
- object_list = [obj['name'] for obj in response]
- if present_obj:
- for obj in present_obj:
- self.assertIn(obj, object_list)
- if not_present_obj:
- for obj in not_present_obj:
- self.assertNotIn(obj, object_list)
-
@test.services('object_storage')
def test_swift_basic_ops(self):
self._get_swift_stat()
container_name = self._create_container()
- obj_name = self._upload_object_to_container(container_name)
+ obj_name, obj_data = self._upload_object_to_container(container_name)
self._list_and_check_container_objects(container_name, [obj_name])
+ self._download_and_verify(container_name, obj_name, obj_data)
+ self._delete_object(container_name, obj_name)
+ self._delete_container(container_name)
+
+ @test.services('object_storage')
+ def test_swift_acl_anonymous_download(self):
+ """This test will cover below steps:
+ 1. Create container
+ 2. Upload object to the new container
+ 3. Change the ACL of the container
+ 4. Check if the object can be download by anonymous user
+ 5. Delete the object and container
+ """
+ container_name = self._create_container()
+ obj_name, _ = self._upload_object_to_container(container_name)
+ obj_url = '%s/%s/%s' % (self.object_client.base_url,
+ container_name, obj_name)
+ http_client = http.ClosingHttp()
+ resp, _ = http_client.request(obj_url, 'GET')
+ self.assertEqual(resp.status, 401)
+ self._change_container_acl(container_name, '.r:*')
+ resp, _ = http_client.request(obj_url, 'GET')
+ self.assertEqual(resp.status, 200)
self._delete_object(container_name, obj_name)
self._delete_container(container_name)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index bf5d1f6..ec8575a 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from cinderclient import exceptions as cinder_exc
-
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log
@@ -23,7 +21,7 @@
LOG = log.getLogger(__name__)
-class TestVolumeBootPattern(manager.OfficialClientTest):
+class TestVolumeBootPattern(manager.ScenarioTest):
"""
This test case attempts to reproduce the following steps:
@@ -54,28 +52,32 @@
# dev_name=id:type:size:delete_on_terminate
# where type needs to be "snap" if the server is booted
# from a snapshot, size instead can be safely left empty
- bd_map = {
- 'vda': vol_id + ':::0'
- }
- security_groups = [self.security_group.name]
+ bd_map = [{
+ 'device_name': 'vda',
+ 'volume_id': vol_id,
+ 'delete_on_termination': '0'}]
+ self.security_group = self._create_security_group_nova()
+ security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'block_device_mapping': bd_map,
- 'key_name': keypair.name,
+ 'key_name': keypair['name'],
'security_groups': security_groups
}
return self.create_server(image='', create_kwargs=create_kwargs)
def _create_snapshot_from_volume(self, vol_id):
- volume_snapshots = self.volume_client.volume_snapshots
snap_name = data_utils.rand_name('snapshot')
- snap = volume_snapshots.create(volume_id=vol_id,
- force=True,
- display_name=snap_name)
- self.addCleanup_with_wait(self.volume_client.volume_snapshots, snap.id,
- exc_type=cinder_exc.NotFound)
- self.status_timeout(volume_snapshots,
- snap.id,
- 'available')
+ _, snap = self.snapshots_client.create_snapshot(
+ volume_id=vol_id,
+ force=True,
+ display_name=snap_name)
+ self.addCleanup_with_wait(
+ waiter_callable=self.snapshots_client.wait_for_resource_deletion,
+ thing_id=snap['id'], thing_id_param='id',
+ cleanup_callable=self.delete_wrapper,
+ cleanup_args=[self.snapshots_client.delete_snapshot, snap['id']])
+ self.snapshots_client.wait_for_snapshot_status(snap['id'], 'available')
+ self.assertEqual(snap_name, snap['display_name'])
return snap
def _create_volume_from_snapshot(self, snap_id):
@@ -85,27 +87,26 @@
def _stop_instances(self, instances):
# NOTE(gfidente): two loops so we do not wait for the status twice
for i in instances:
- self.compute_client.servers.stop(i)
+ self.servers_client.stop(i['id'])
for i in instances:
- self.status_timeout(self.compute_client.servers,
- i.id,
- 'SHUTOFF')
+ self.servers_client.wait_for_server_status(i['id'], 'SHUTOFF')
def _detach_volumes(self, volumes):
# NOTE(gfidente): two loops so we do not wait for the status twice
for v in volumes:
- self.volume_client.volumes.detach(v)
+ self.volumes_client.detach_volume(v['id'])
for v in volumes:
- self.status_timeout(self.volume_client.volumes,
- v.id,
- 'available')
+ self.volumes_client.wait_for_volume_status(v['id'], 'available')
def _ssh_to_server(self, server, keypair):
if CONF.compute.use_floatingip_for_ssh:
- floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(self.delete_wrapper, floating_ip)
- server.add_floating_ip(floating_ip)
- ip = floating_ip.ip
+ _, floating_ip = self.floating_ips_client.create_floating_ip()
+ self.addCleanup(self.delete_wrapper,
+ self.floating_ips_client.delete_floating_ip,
+ floating_ip['id'])
+ self.floating_ips_client.associate_floating_ip_to_server(
+ floating_ip['ip'], server['id'])
+ ip = floating_ip['ip']
else:
network_name_for_ssh = CONF.compute.network_for_ssh
ip = server.networks[network_name_for_ssh][0]
@@ -113,10 +114,10 @@
try:
return self.get_remote_client(
ip,
- private_key=keypair.private_key)
+ private_key=keypair['private_key'])
except Exception:
LOG.exception('ssh to server failed')
- self._log_console_output()
+ self._log_console_output(self)
raise
def _get_content(self, ssh_client):
@@ -129,8 +130,8 @@
return self._get_content(ssh_client)
def _delete_server(self, server):
- self.compute_client.servers.delete(server)
- self.delete_timeout(self.compute_client.servers, server.id)
+ self.servers_client.delete_server(server['id'])
+ self.servers_client.wait_for_server_termination(server['id'])
def _check_content_of_written_file(self, ssh_client, expected):
actual = self._get_content(ssh_client)
@@ -143,7 +144,7 @@
# create an instance from volume
volume_origin = self._create_volume_from_image()
- instance_1st = self._boot_instance_from_volume(volume_origin.id,
+ instance_1st = self._boot_instance_from_volume(volume_origin['id'],
keypair)
# write content to volume on instance
@@ -155,7 +156,7 @@
self._delete_server(instance_1st)
# create a 2nd instance from volume
- instance_2nd = self._boot_instance_from_volume(volume_origin.id,
+ instance_2nd = self._boot_instance_from_volume(volume_origin['id'],
keypair)
# check the content of written file
@@ -164,11 +165,11 @@
self._check_content_of_written_file(ssh_client_for_instance_2nd, text)
# snapshot a volume
- snapshot = self._create_snapshot_from_volume(volume_origin.id)
+ snapshot = self._create_snapshot_from_volume(volume_origin['id'])
# create a 3rd instance from snapshot
- volume = self._create_volume_from_snapshot(snapshot.id)
- instance_from_snapshot = self._boot_instance_from_volume(volume.id,
+ volume = self._create_volume_from_snapshot(snapshot['id'])
+ instance_from_snapshot = self._boot_instance_from_volume(volume['id'],
keypair)
# check the content of written file
@@ -186,10 +187,11 @@
bdms = [{'uuid': vol_id, 'source_type': 'volume',
'destination_type': 'volume', 'boot_index': 0,
'delete_on_termination': False}]
- security_groups = [self.security_group.name]
+ self.security_group = self._create_security_group_nova()
+ security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'block_device_mapping_v2': bdms,
- 'key_name': keypair.name,
+ 'key_name': keypair['name'],
'security_groups': security_groups
}
return self.create_server(image='', create_kwargs=create_kwargs)
diff --git a/tempest/services/baremetal/base.py b/tempest/services/baremetal/base.py
index f98ecff..0b97f74 100644
--- a/tempest/services/baremetal/base.py
+++ b/tempest/services/baremetal/base.py
@@ -119,6 +119,7 @@
uri += "?%s" % urllib.urlencode(kwargs)
resp, body = self.get(uri)
+ self.expected_success(200, resp['status'])
return resp, self.deserialize(body)
@@ -135,6 +136,7 @@
else:
uri = self._get_uri(resource, uuid=uuid, permanent=permanent)
resp, body = self.get(uri)
+ self.expected_success(200, resp['status'])
return resp, self.deserialize(body)
@@ -153,6 +155,7 @@
uri = self._get_uri(resource)
resp, body = self.post(uri, body=body)
+ self.expected_success(201, resp['status'])
return resp, self.deserialize(body)
@@ -168,6 +171,7 @@
uri = self._get_uri(resource, uuid)
resp, body = self.delete(uri)
+ self.expected_success(204, resp['status'])
return resp, body
def _patch_request(self, resource, uuid, patch_object):
@@ -184,6 +188,7 @@
patch_body = json.dumps(patch_object)
resp, body = self.patch(uri, body=patch_body)
+ self.expected_success(200, resp['status'])
return resp, self.deserialize(body)
@handle_errors
@@ -212,4 +217,5 @@
put_body = json.dumps(put_object)
resp, body = self.put(uri, body=put_body)
+ self.expected_success(202, resp['status'])
return resp, body
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index f44be29..88b68d3 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -58,6 +58,7 @@
disk_config: Determines if user or admin controls disk configuration.
return_reservation_id: Enable/Disable the return of reservation id
block_device_mapping: Block device mapping for the server.
+ block_device_mapping_v2: Block device mapping V2 for the server.
"""
post_body = {
'name': name,
@@ -70,7 +71,8 @@
'availability_zone', 'accessIPv4', 'accessIPv6',
'min_count', 'max_count', ('metadata', 'meta'),
('OS-DCF:diskConfig', 'disk_config'),
- 'return_reservation_id', 'block_device_mapping']:
+ 'return_reservation_id', 'block_device_mapping',
+ 'block_device_mapping_v2']:
if isinstance(option, tuple):
post_param = option[0]
key = option[1]
@@ -80,6 +82,7 @@
value = kwargs.get(key)
if value is not None:
post_body[post_param] = value
+
post_body = {'server': post_body}
if 'sched_hints' in kwargs:
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index d57b931..0522f37 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -525,7 +525,7 @@
def __init__(self):
super(V3TokenClientJSON, self).__init__(None)
auth_url = CONF.identity.uri_v3
- if not auth_url and CONF.identity_feature_enabled.api_v3:
+ if not auth_url:
raise exceptions.InvalidConfiguration('you must specify a v3 uri '
'if using the v3 identity '
'api')
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index c2bd77e..5b761b3 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -520,7 +520,7 @@
def __init__(self):
super(V3TokenClientXML, self).__init__(None)
auth_url = CONF.identity.uri_v3
- if not auth_url and CONF.identity_feature_enabled.api_v3:
+ if not auth_url:
raise exceptions.InvalidConfiguration('you must specify a v3 uri '
'if using the v3 identity '
'api')
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 2e28bfe..16a4f5c 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -54,12 +54,14 @@
body = json.dumps(put_body)
uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
resp, body = self.put(uri, body)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['quota']
def reset_quotas(self, tenant_id):
uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
resp, body = self.delete(uri)
+ self.rest_client.expected_success(204, resp.status)
return resp, body
def create_router(self, name, admin_state_up=True, **kwargs):
@@ -69,12 +71,14 @@
body = json.dumps(post_body)
uri = '%s/routers' % (self.uri_prefix)
resp, body = self.post(uri, body)
+ self.rest_client.expected_success(201, resp.status)
body = json.loads(body)
return resp, body
def _update_router(self, router_id, set_enable_snat, **kwargs):
uri = '%s/routers/%s' % (self.uri_prefix, router_id)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
update_body = {}
update_body['name'] = kwargs.get('name', body['router']['name'])
@@ -88,6 +92,7 @@
update_body = dict(router=update_body)
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -114,6 +119,7 @@
update_body = {"subnet_id": subnet_id}
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -123,6 +129,7 @@
update_body = {"port_id": port_id}
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -132,6 +139,7 @@
update_body = {"subnet_id": subnet_id}
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -141,6 +149,7 @@
update_body = {"port_id": port_id}
update_body = json.dumps(update_body)
resp, body = self.put(uri, update_body)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -155,6 +164,7 @@
uri = '%s/lb/pools/%s/health_monitors' % (self.uri_prefix,
pool_id)
resp, body = self.post(uri, body)
+ self.rest_client.expected_success(201, resp.status)
body = json.loads(body)
return resp, body
@@ -163,11 +173,13 @@
uri = '%s/lb/pools/%s/health_monitors/%s' % (self.uri_prefix, pool_id,
health_monitor_id)
resp, body = self.delete(uri)
+ self.rest_client.expected_success(204, resp.status)
return resp, body
def list_router_interfaces(self, uuid):
uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -180,12 +192,14 @@
agent = {"agent": agent_info}
body = json.dumps(agent)
resp, body = self.put(uri, body)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
def list_pools_hosted_by_one_lbaas_agent(self, agent_id):
uri = '%s/agents/%s/loadbalancer-pools' % (self.uri_prefix, agent_id)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -193,18 +207,21 @@
uri = ('%s/lb/pools/%s/loadbalancer-agent' %
(self.uri_prefix, pool_id))
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
def list_routers_on_l3_agent(self, agent_id):
uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
def list_l3_agents_hosting_router(self, router_id):
uri = '%s/routers/%s/l3-agents' % (self.uri_prefix, router_id)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -213,6 +230,7 @@
post_body = {"router_id": router_id}
body = json.dumps(post_body)
resp, body = self.post(uri, body)
+ self.rest_client.expected_success(201, resp.status)
body = json.loads(body)
return resp, body
@@ -220,17 +238,20 @@
uri = '%s/agents/%s/l3-routers/%s' % (
self.uri_prefix, agent_id, router_id)
resp, body = self.delete(uri)
+ self.rest_client.expected_success(204, resp.status)
return resp, body
def list_dhcp_agent_hosting_network(self, network_id):
uri = '%s/networks/%s/dhcp-agents' % (self.uri_prefix, network_id)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
def list_networks_hosted_by_one_dhcp_agent(self, agent_id):
uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -238,6 +259,7 @@
uri = '%s/agents/%s/dhcp-networks/%s' % (self.uri_prefix, agent_id,
network_id)
resp, body = self.delete(uri)
+ self.rest_client.expected_success(204, resp.status)
return resp, body
def create_ikepolicy(self, name, **kwargs):
@@ -251,6 +273,7 @@
body = json.dumps(post_body)
uri = '%s/vpn/ikepolicies' % (self.uri_prefix)
resp, body = self.post(uri, body)
+ self.rest_client.expected_success(201, resp.status)
body = json.loads(body)
return resp, body
@@ -264,6 +287,7 @@
}
body = json.dumps(put_body)
resp, body = self.put(uri, body)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -277,12 +301,14 @@
}
body = json.dumps(put_body)
resp, body = self.put(uri, body)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
def list_lb_pool_stats(self, pool_id):
uri = '%s/lb/pools/%s/stats' % (self.uri_prefix, pool_id)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -291,5 +317,6 @@
body = json.dumps(post_body)
uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
resp, body = self.post(uri, body)
+ self.rest_client.expected_success(201, resp.status)
body = json.loads(body)
return resp, body
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index 4ee8302..94ba5aa 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -111,6 +111,7 @@
uri += '?' + urllib.urlencode(filters, doseq=1)
resp, body = self.get(uri)
result = {plural_name: self.deserialize_list(body)}
+ self.rest_client.expected_success(200, resp.status)
return resp, result
return _list
@@ -119,7 +120,9 @@
def _delete(resource_id):
plural = self.pluralize(resource_name)
uri = '%s/%s' % (self.get_uri(plural), resource_id)
- return self.delete(uri)
+ resp, body = self.delete(uri)
+ self.rest_client.expected_success(204, resp.status)
+ return resp, body
return _delete
@@ -134,6 +137,7 @@
uri += '?' + urllib.urlencode(fields, doseq=1)
resp, body = self.get(uri)
body = self.deserialize_single(body)
+ self.rest_client.expected_success(200, resp.status)
return resp, body
return _show
@@ -145,6 +149,7 @@
post_data = self.serialize({resource_name: kwargs})
resp, body = self.post(uri, post_data)
body = self.deserialize_single(body)
+ self.rest_client.expected_success(201, resp.status)
return resp, body
return _create
@@ -156,6 +161,7 @@
post_data = self.serialize({resource_name: kwargs})
resp, body = self.put(uri, post_data)
body = self.deserialize_single(body)
+ self.rest_client.expected_success(200, resp.status)
return resp, body
return _update
@@ -174,15 +180,14 @@
raise AttributeError(name)
# Common methods that are hard to automate
- def create_bulk_network(self, count, names):
- network_list = list()
- for i in range(count):
- network_list.append({'name': names[i]})
+ def create_bulk_network(self, names):
+ network_list = [{'name': name} for name in names]
post_data = {'networks': network_list}
body = self.serialize_list(post_data, "networks", "network")
uri = self.get_uri("networks")
resp, body = self.post(uri, body)
body = {'networks': self.deserialize_list(body)}
+ self.rest_client.expected_success(201, resp.status)
return resp, body
def create_bulk_subnet(self, subnet_list):
@@ -191,6 +196,7 @@
uri = self.get_uri('subnets')
resp, body = self.post(uri, body)
body = {'subnets': self.deserialize_list(body)}
+ self.rest_client.expected_success(201, resp.status)
return resp, body
def create_bulk_port(self, port_list):
@@ -199,6 +205,7 @@
uri = self.get_uri('ports')
resp, body = self.post(uri, body)
body = {'ports': self.deserialize_list(body)}
+ self.rest_client.expected_success(201, resp.status)
return resp, body
def wait_for_resource_deletion(self, resource_type, id):
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
index ea9dc77..17b1f8e 100644
--- a/tempest/services/network/xml/network_client.py
+++ b/tempest/services/network/xml/network_client.py
@@ -103,17 +103,21 @@
post_body.append(p1)
resp, body = self.post(uri, str(common.Document(post_body)))
body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ self.rest_client.expected_success(201, resp.status)
return resp, body
def disassociate_health_monitor_with_pool(self, health_monitor_id,
pool_id):
uri = '%s/lb/pools/%s/health_monitors/%s' % (self.uri_prefix, pool_id,
health_monitor_id)
- return self.delete(uri)
+ resp, body = self.delete(uri)
+ self.rest_client.expected_success(204, resp.status)
+ return resp, body
def show_extension_details(self, ext_alias):
uri = '%s/extensions/%s' % (self.uri_prefix, str(ext_alias))
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
@@ -123,6 +127,7 @@
router.append(common.Element("name", name))
common.deep_dict_to_xml(router, kwargs)
resp, body = self.post(uri, str(common.Document(router)))
+ self.rest_client.expected_success(201, resp.status)
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
@@ -132,6 +137,7 @@
for element, content in kwargs.iteritems():
router.append(common.Element(element, content))
resp, body = self.put(uri, str(common.Document(router)))
+ self.rest_client.expected_success(200, resp.status)
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
@@ -140,6 +146,7 @@
router_id)
subnet = common.Element("subnet_id", subnet_id)
resp, body = self.put(uri, str(common.Document(subnet)))
+ self.rest_client.expected_success(200, resp.status)
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
@@ -148,6 +155,7 @@
router_id)
port = common.Element("port_id", port_id)
resp, body = self.put(uri, str(common.Document(port)))
+ self.rest_client.expected_success(200, resp.status)
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
@@ -156,6 +164,7 @@
router_id)
subnet = common.Element("subnet_id", subnet_id)
resp, body = self.put(uri, str(common.Document(subnet)))
+ self.rest_client.expected_success(200, resp.status)
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
@@ -164,12 +173,14 @@
router_id)
port = common.Element("port_id", port_id)
resp, body = self.put(uri, str(common.Document(port)))
+ self.rest_client.expected_success(200, resp.status)
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
def list_router_interfaces(self, uuid):
uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
ports = common.parse_array(etree.fromstring(body), self.PLURALS)
ports = {"ports": ports}
return resp, ports
@@ -181,12 +192,14 @@
p = common.Element(key, value)
agent.append(p)
resp, body = self.put(uri, str(common.Document(agent)))
+ self.rest_client.expected_success(200, resp.status)
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
def list_pools_hosted_by_one_lbaas_agent(self, agent_id):
uri = '%s/agents/%s/loadbalancer-pools' % (self.uri_prefix, agent_id)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
pools = common.parse_array(etree.fromstring(body))
body = {'pools': pools}
return resp, body
@@ -195,12 +208,14 @@
uri = ('%s/lb/pools/%s/loadbalancer-agent' %
(self.uri_prefix, pool_id))
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
def list_routers_on_l3_agent(self, agent_id):
uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
routers = common.parse_array(etree.fromstring(body))
body = {'routers': routers}
return resp, body
@@ -208,6 +223,7 @@
def list_l3_agents_hosting_router(self, router_id):
uri = '%s/routers/%s/l3-agents' % (self.uri_prefix, router_id)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
agents = common.parse_array(etree.fromstring(body))
body = {'agents': agents}
return resp, body
@@ -216,6 +232,7 @@
uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
router = (common.Element("router_id", router_id))
resp, body = self.post(uri, str(common.Document(router)))
+ self.rest_client.expected_success(201, resp.status)
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
@@ -223,11 +240,13 @@
uri = '%s/agents/%s/l3-routers/%s' % (
self.uri_prefix, agent_id, router_id)
resp, body = self.delete(uri)
+ self.rest_client.expected_success(204, resp.status)
return resp, body
def list_dhcp_agent_hosting_network(self, network_id):
uri = '%s/networks/%s/dhcp-agents' % (self.uri_prefix, network_id)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
agents = common.parse_array(etree.fromstring(body))
body = {'agents': agents}
return resp, body
@@ -235,6 +254,7 @@
def list_networks_hosted_by_one_dhcp_agent(self, agent_id):
uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
networks = common.parse_array(etree.fromstring(body))
body = {'networks': networks}
return resp, body
@@ -243,11 +263,13 @@
uri = '%s/agents/%s/dhcp-networks/%s' % (self.uri_prefix, agent_id,
network_id)
resp, body = self.delete(uri)
+ self.rest_client.expected_success(204, resp.status)
return resp, body
def list_lb_pool_stats(self, pool_id):
uri = '%s/lb/pools/%s/stats' % (self.uri_prefix, pool_id)
resp, body = self.get(uri)
+ self.rest_client.expected_success(200, resp.status)
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
@@ -255,6 +277,7 @@
uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
network = common.Element("network_id", network_id)
resp, body = self.post(uri, str(common.Document(network)))
+ self.rest_client.expected_success(201, resp.status)
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 46b0ec4..dd166dd 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -41,6 +41,7 @@
uri += '?%s' % urllib.urlencode(params)
resp, body = self.get(uri)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['stacks']
@@ -58,6 +59,7 @@
files)
uri = 'stacks'
resp, body = self.post(uri, headers=headers, body=body)
+ self.expected_success(201, resp.status)
return resp, body
def update_stack(self, stack_identifier, name, disable_rollback=True,
@@ -74,6 +76,7 @@
uri = "stacks/%s" % stack_identifier
resp, body = self.put(uri, headers=headers, body=body)
+ self.expected_success(202, resp.status)
return resp, body
def _prepare_update_create(self, name, disable_rollback=True,
@@ -106,6 +109,7 @@
"""Returns the details of a single stack."""
url = "stacks/%s" % stack_identifier
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['stack']
@@ -114,6 +118,7 @@
url = 'stacks/%s/actions' % stack_identifier
body = {'suspend': None}
resp, body = self.post(url, json.dumps(body))
+ self.expected_success(200, resp.status)
return resp, body
def resume_stack(self, stack_identifier):
@@ -121,12 +126,14 @@
url = 'stacks/%s/actions' % stack_identifier
body = {'resume': None}
resp, body = self.post(url, json.dumps(body))
+ self.expected_success(200, resp.status)
return resp, body
def list_resources(self, stack_identifier):
"""Returns the details of a single resource."""
url = "stacks/%s/resources" % stack_identifier
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['resources']
@@ -134,12 +141,15 @@
"""Returns the details of a single resource."""
url = "stacks/%s/resources/%s" % (stack_identifier, resource_name)
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['resource']
def delete_stack(self, stack_identifier):
"""Deletes the specified Stack."""
- return self.delete("stacks/%s" % str(stack_identifier))
+ resp, _ = self.delete("stacks/%s" % str(stack_identifier))
+ self.expected_success(204, resp.status)
+ return resp
def wait_for_resource_status(self, stack_identifier, resource_name,
status, failure_pattern='^.*_FAILED$'):
@@ -208,6 +218,7 @@
url = ('stacks/{stack_identifier}/resources/{resource_name}'
'/metadata'.format(**locals()))
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['metadata']
@@ -215,6 +226,7 @@
"""Returns list of all events for a stack."""
url = 'stacks/{stack_identifier}/events'.format(**locals())
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['events']
@@ -223,6 +235,7 @@
url = ('stacks/{stack_identifier}/resources/{resource_name}'
'/events'.format(**locals()))
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['events']
@@ -231,6 +244,7 @@
url = ('stacks/{stack_identifier}/resources/{resource_name}/events'
'/{event_id}'.format(**locals()))
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['event']
@@ -238,6 +252,7 @@
"""Returns the template for the stack."""
url = ('stacks/{stack_identifier}/template'.format(**locals()))
resp, body = self.get(url)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -245,6 +260,7 @@
"""Returns the validation request result."""
post_body = json.dumps(post_body)
resp, body = self.post('validate', post_body)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
@@ -271,7 +287,7 @@
url = 'software_configs'
resp, body = self.post(url, headers=headers, body=body)
- self.expected_success(200, resp)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return body
@@ -279,7 +295,7 @@
"""Returns a software configuration resource."""
url = 'software_configs/%s' % str(conf_id)
resp, body = self.get(url)
- self.expected_success(200, resp)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return body
@@ -287,7 +303,7 @@
"""Deletes a specific software configuration."""
url = 'software_configs/%s' % str(conf_id)
resp, _ = self.delete(url)
- self.expected_success(204, resp)
+ self.expected_success(204, resp.status)
def create_software_deploy(self, server_id=None, config_id=None,
action=None, status=None,
@@ -300,7 +316,7 @@
url = 'software_deployments'
resp, body = self.post(url, headers=headers, body=body)
- self.expected_success(200, resp)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return body
@@ -315,7 +331,7 @@
url = 'software_deployments/%s' % str(deploy_id)
resp, body = self.put(url, headers=headers, body=body)
- self.expected_success(200, resp)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return body
@@ -323,7 +339,7 @@
"""Returns a list of all deployments."""
url = 'software_deployments'
resp, body = self.get(url)
- self.expected_success(200, resp)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return body
@@ -331,7 +347,7 @@
"""Returns a specific software deployment."""
url = 'software_deployments/%s' % str(deploy_id)
resp, body = self.get(url)
- self.expected_success(200, resp)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return body
@@ -339,7 +355,7 @@
"""Return a config metadata for a specific server."""
url = 'software_deployments/metadata/%s' % server_id
resp, body = self.get(url)
- self.expected_success(200, resp)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return body
@@ -347,7 +363,7 @@
"""Deletes a specific software deployment."""
url = 'software_deployments/%s' % str(deploy_id)
resp, _ = self.delete(url)
- self.expected_success(204, resp)
+ self.expected_success(204, resp.status)
def _prep_software_config_create(self, name=None, conf=None, group=None,
inputs=None, outputs=None, options=None):
diff --git a/tempest/stress/stressaction.py b/tempest/stress/stressaction.py
index f6770ab..286e022 100644
--- a/tempest/stress/stressaction.py
+++ b/tempest/stress/stressaction.py
@@ -12,12 +12,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+import abc
import signal
import sys
+import six
+
from tempest.openstack.common import log as logging
+@six.add_metaclass(abc.ABCMeta)
class StressAction(object):
def __init__(self, manager, max_runs=None, stop_on_error=False):
@@ -83,6 +87,7 @@
self.tearDown()
sys.exit(1)
+ @abc.abstractmethod
def run(self):
"""This method is where the stress test code runs."""
- raise NotImplemented()
+ return
diff --git a/tempest/test.py b/tempest/test.py
index 59da2f9..f34933e 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -70,7 +70,7 @@
"""A decorator used to wrap the setUpClass for cleaning up resources
when setUpClass failed.
"""
-
+ @functools.wraps(f)
def decorator(cls):
try:
f(cls)
@@ -399,25 +399,6 @@
cls.admin_client = os_admin.negative_client
@staticmethod
- def load_schema(file_or_dict):
- """
- Loads a schema from a file_or_dict on a specified location.
-
- :param file_or_dict: just a dict or filename
- """
- # NOTE(mkoderer): we will get rid of this function when all test are
- # ported to dicts
- if isinstance(file_or_dict, dict):
- return file_or_dict
-
- # NOTE(mkoderer): must be extended for xml support
- fn = os.path.join(
- os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
- "etc", "schemas", file_or_dict)
- LOG.debug("Open schema file: %s" % (fn))
- return json.load(open(fn))
-
- @staticmethod
def load_tests(*args):
"""
Wrapper for testscenarios to set the mandatory scenarios variable
@@ -460,7 +441,6 @@
the data is used to generate query strings appended to the url,
otherwise for the body of the http call.
"""
- description = NegativeAutoTest.load_schema(description)
LOG.debug(description)
generator = importutils.import_class(
CONF.negative.test_generator)()
@@ -514,7 +494,6 @@
otherwise for the body of the http call.
"""
- description = NegativeAutoTest.load_schema(description)
LOG.info("Executing %s" % description["name"])
LOG.debug(description)
method = description["http-method"]
@@ -604,8 +583,6 @@
"""
@attr(type=['negative', 'gate'])
def generic_test(self):
- if hasattr(self, '_schema_file'):
- self.execute(self._schema_file)
if hasattr(self, '_schema'):
self.execute(self._schema)
diff --git a/tempest/tests/negative/test_negative_auto_test.py b/tempest/tests/negative/test_negative_auto_test.py
index edff3a8..dddd083 100644
--- a/tempest/tests/negative/test_negative_auto_test.py
+++ b/tempest/tests/negative/test_negative_auto_test.py
@@ -13,10 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
-import mock
-
from tempest import config
import tempest.test as test
from tempest.tests import base
@@ -58,11 +54,9 @@
for entry in entries:
self.assertIsNotNone(entry[1]['resource'])
- @mock.patch('tempest.test.NegativeAutoTest.load_schema')
- def test_generate_scenario(self, open_mock):
- open_mock.return_value = self.fake_input_desc
+ def test_generate_scenario(self):
scenarios = test.NegativeAutoTest.\
- generate_scenario(None)
+ generate_scenario(self.fake_input_desc)
self.assertIsInstance(scenarios, list)
for scenario in scenarios:
@@ -72,13 +66,3 @@
self._check_prop_entries(scenarios, "prop_minRam")
self._check_prop_entries(scenarios, "prop_minDisk")
self._check_resource_entries(scenarios, "inv_res")
-
- def test_load_schema(self):
- json_schema = json.dumps(self.fake_input_desc)
- with mock.patch('tempest.test.open',
- mock.mock_open(read_data=json_schema),
- create=True):
- return_file = test.NegativeAutoTest.load_schema('filename')
- self.assertEqual(return_file, self.fake_input_desc)
- return_dict = test.NegativeAutoTest.load_schema(self.fake_input_desc)
- self.assertEqual(return_file, return_dict)
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index 6b678f7..12104ec 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -237,7 +237,7 @@
class TestSimpleNegativeDecorator(BaseDecoratorsTest):
@test.SimpleNegativeAutoTest
class FakeNegativeJSONTest(test.NegativeAutoTest):
- _schema_file = 'fake/schemas/file.json'
+ _schema = {}
def test_testfunc_exist(self):
self.assertIn("test_fake_negative", dir(self.FakeNegativeJSONTest))
@@ -247,4 +247,4 @@
obj = self.FakeNegativeJSONTest("test_fake_negative")
self.assertIn("test_fake_negative", dir(obj))
obj.test_fake_negative()
- mock.assert_called_once_with(self.FakeNegativeJSONTest._schema_file)
+ mock.assert_called_once_with(self.FakeNegativeJSONTest._schema)
diff --git a/tempest/tests/test_xml_utils.py b/tempest/tests/test_xml_utils.py
new file mode 100644
index 0000000..53e31c4
--- /dev/null
+++ b/tempest/tests/test_xml_utils.py
@@ -0,0 +1,35 @@
+#
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common import xml_utils
+from tempest.tests import base
+
+
+class TestDocumentXML(base.TestCase):
+ def test_xml_document_ordering_version_encoding(self):
+ expected = '<?xml version="1.0" encoding="UTF-8"?>'
+ xml_out = str(xml_utils.Document())
+ self.assertEqual(expected, xml_out.strip())
+
+ xml_out = str(xml_utils.Document(encoding='UTF-8', version='1.0'))
+ self.assertEqual(expected, xml_out.strip())
+
+ xml_out = str(xml_utils.Document(version='1.0', encoding='UTF-8'))
+ self.assertEqual(expected, xml_out.strip())
+
+ def test_xml_document_additonal_attrs(self):
+ expected = '<?xml version="1.0" encoding="UTF-8" foo="bar"?>'
+ xml_out = str(xml_utils.Document(foo='bar'))
+ self.assertEqual(expected, xml_out.strip())
diff --git a/tox.ini b/tox.ini
index edcb901..a071d4b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -32,7 +32,6 @@
[testenv:all]
sitepackages = True
-setenv = VIRTUAL_ENV={envdir}
commands =
bash tools/pretty_tox.sh '{posargs}'