Merge "Verify list_virtual_interfaces attributes of API"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 2fdbb7e..70c791b 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -101,14 +101,32 @@
# Options defined in tempest.config
#
-# Catalog type of the baremetal provisioning service. (string
+# Catalog type of the baremetal provisioning service (string
# value)
#catalog_type=baremetal
+# Whether the Ironic nova-compute driver is enabled (boolean
+# value)
+#driver_enabled=false
+
# The endpoint type to use for the baremetal provisioning
-# service. (string value)
+# service (string value)
#endpoint_type=publicURL
+# Timeout for Ironic node to completely provision (integer
+# value)
+#active_timeout=300
+
+# Timeout for association of Nova instance and Ironic node
+# (integer value)
+#association_timeout=10
+
+# Timeout for Ironic power transitions. (integer value)
+#power_timeout=20
+
+# Timeout for unprovisioning an Ironic node. (integer value)
+#unprovision_timeout=20
+
[boto]
@@ -193,7 +211,7 @@
# admin credentials are known. (boolean value)
#allow_tenant_isolation=false
-# Valid secondary image reference to be used in tests. (string
+# Valid primary image reference to be used in tests. (string
# value)
#image_ref={$IMAGE_ID}
@@ -734,7 +752,7 @@
# Timeout in seconds to wait for a stack to build. (integer
# value)
-#build_timeout=600
+#build_timeout=1200
# Instance type for tests. Needs to be big enough for a full
# OS plus the test workload (string value)
@@ -960,6 +978,10 @@
# value)
#disk_format=raw
+# Default size in GB for volumes created by volumes tests
+# (integer value)
+#volume_size=1
+
[volume-feature-enabled]
diff --git a/requirements.txt b/requirements.txt
index 3521df0..e97eece 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-pbr>=0.6,<1.0
+pbr>=0.6,!=0.7,<1.0
anyjson>=0.3.3
httplib2>=0.7.5
jsonschema>=2.0.0,<3.0.0
@@ -13,10 +13,11 @@
python-neutronclient>=2.3.4,<3
python-cinderclient>=1.0.6
python-heatclient>=0.2.3
+python-ironicclient
python-saharaclient>=0.6.0
python-swiftclient>=1.6
testresources>=0.2.4
-keyring>=1.6.1,<2.0,>=2.1
+keyring>=2.1
testrepository>=0.0.18
oslo.config>=1.2.0
six>=1.5.2
diff --git a/tempest/api/baremetal/base.py b/tempest/api/baremetal/base.py
index 2e745f8..021adaf 100644
--- a/tempest/api/baremetal/base.py
+++ b/tempest/api/baremetal/base.py
@@ -106,17 +106,20 @@
@classmethod
@creates('port')
- def create_port(cls, node_id, address=None):
+ def create_port(cls, node_id, address, extra=None, uuid=None):
"""
Wrapper utility for creating test ports.
- :param address: MAC address of the port. If not supplied, a random
- value will be generated.
+ :param address: MAC address of the port.
+ :param extra: Meta data of the port. If not supplied, an empty
+ dictionary will be created.
+ :param uuid: UUID of the port.
:return: Created port.
"""
- address = address or data_utils.rand_mac_address()
- resp, body = cls.client.create_port(address=address, node_id=node_id)
+ extra = extra or {}
+ resp, body = cls.client.create_port(address=address, node_id=node_id,
+ extra=extra, uuid=uuid)
return {'port': body, 'response': resp}
@@ -170,3 +173,12 @@
cls.created_objects['port'].remove(port_id)
return resp
+
+ def validate_self_link(self, resource, uuid, link):
+ """Check whether the given self link formatted correctly."""
+ expected_link = "{base}/{pref}/{res}/{uuid}".format(
+ base=self.client.base_url,
+ pref=self.client.uri_prefix,
+ res=resource,
+ uuid=uuid)
+ self.assertEqual(expected_link, link)
diff --git a/tempest/api/baremetal/test_drivers.py b/tempest/api/baremetal/test_drivers.py
new file mode 100644
index 0000000..445ca60
--- /dev/null
+++ b/tempest/api/baremetal/test_drivers.py
@@ -0,0 +1,26 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.baremetal import base
+from tempest import test
+
+
+class TestDrivers(base.BaseBaremetalTest):
+ """Tests for drivers."""
+
+ @test.attr(type="smoke")
+ def test_list_drivers(self):
+ resp, drivers = self.client.list_drivers()
+ self.assertEqual('200', resp['status'])
+ self.assertIn('fake', [d['name'] for d in drivers['drivers']])
diff --git a/tempest/api/baremetal/test_ports.py b/tempest/api/baremetal/test_ports.py
index fb2acc7..8b76811 100644
--- a/tempest/api/baremetal/test_ports.py
+++ b/tempest/api/baremetal/test_ports.py
@@ -30,54 +30,268 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- port = self.create_port(node_id=node_id, address=address)['port']
+ result = self.create_port(node_id=node_id, address=address)
- self.assertEqual(port['address'], address)
- self.assertEqual(port['node_uuid'], node_id)
+ port = result['port']
+
+ resp, body = self.client.show_port(port['uuid'])
+
+ self.assertEqual(200, resp.status)
+ self.assertEqual(port['uuid'], body['uuid'])
+ self.assertEqual(address, body['address'])
+ self.assertEqual({}, body['extra'])
+ self.assertEqual(node_id, body['node_uuid'])
+
+ @test.attr(type='smoke')
+ def test_create_port_specifying_uuid(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ uuid = data_utils.rand_uuid()
+
+ self.create_port(node_id=node_id, address=address, uuid=uuid)
+
+ resp, body = self.client.show_port(uuid)
+
+ self.assertEqual(200, resp.status)
+ self.assertEqual(uuid, body['uuid'])
+ self.assertEqual(address, body['address'])
+ self.assertEqual({}, body['extra'])
+ self.assertEqual(node_id, body['node_uuid'])
+
+ @test.attr(type='smoke')
+ def test_create_port_with_extra(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key': 'value'}
+
+ result = self.create_port(node_id=node_id, address=address,
+ extra=extra)
+ port = result['port']
+
+ resp, body = self.client.show_port(port['uuid'])
+
+ self.assertEqual(200, resp.status)
+ self.assertEqual(port['uuid'], body['uuid'])
+ self.assertEqual(address, body['address'])
+ self.assertEqual(extra, body['extra'])
+ self.assertEqual(node_id, body['node_uuid'])
@test.attr(type='smoke')
def test_delete_port(self):
node_id = self.node['uuid']
- port_id = self.create_port(node_id=node_id)['port']['uuid']
+ address = data_utils.rand_mac_address()
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
resp = self.delete_port(port_id)
- self.assertEqual(resp['status'], '204')
+ self.assertEqual(204, resp.status)
self.assertRaises(exc.NotFound, self.client.show_port, port_id)
@test.attr(type='smoke')
def test_show_port(self):
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
+ extra = {'key': 'value'}
- port_id = self.create_port(node_id=node_id,
- address=address)['port']['uuid']
+ port_id = self.create_port(node_id=node_id, address=address,
+ extra=extra)['port']['uuid']
resp, port = self.client.show_port(port_id)
- self.assertEqual(port['uuid'], port_id)
- self.assertEqual(port['address'], address)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(port_id, port['uuid'])
+ self.assertEqual(address, port['address'])
+ self.assertEqual(extra, port['extra'])
+
+ @test.attr(type='smoke')
+ def test_show_port_with_links(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+
+ resp, body = self.client.show_port(port_id)
+
+ self.assertEqual(200, resp.status)
+ self.assertIn('links', body.keys())
+ self.assertEqual(2, len(body['links']))
+ self.assertIn(port_id, body['links'][0]['href'])
@test.attr(type='smoke')
def test_list_ports(self):
node_id = self.node['uuid']
- uuids = [self.create_port(node_id=node_id)['port']['uuid']
- for i in range(0, 5)]
+ uuids = [self.create_port(node_id=node_id,
+ address=data_utils.rand_mac_address())
+ ['port']['uuid'] for i in xrange(5)]
resp, body = self.client.list_ports()
+ self.assertEqual(200, resp.status)
loaded_uuids = [p['uuid'] for p in body['ports']]
- for u in uuids:
- self.assertIn(u, loaded_uuids)
+ for uuid in uuids:
+ self.assertIn(uuid, loaded_uuids)
+
+ # Verify self links.
+ for port in body['ports']:
+ self.validate_self_link('ports', port['uuid'],
+ port['links'][0]['href'])
@test.attr(type='smoke')
- def test_update_port(self):
+ def test_list_with_limit(self):
node_id = self.node['uuid']
- port_id = self.create_port(node_id=node_id)['port']['uuid']
+
+ for i in xrange(5):
+ self.create_port(node_id=node_id,
+ address=data_utils.rand_mac_address())
+
+ resp, body = self.client.list_ports(limit=3)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(3, len(body['ports']))
+
+ next_marker = body['ports'][-1]['uuid']
+ self.assertIn(next_marker, body['next'])
+
+ def test_list_ports_details(self):
+ node_id = self.node['uuid']
+
+ uuids = [
+ self.create_port(node_id=node_id,
+ address=data_utils.rand_mac_address())
+ ['port']['uuid'] for i in range(0, 5)]
+
+ resp, body = self.client.list_ports_detail()
+ self.assertEqual(200, resp.status)
+
+ ports_dict = {port['uuid']: port for port in body['ports']
+ if port['uuid'] in uuids}
+
+ for uuid in uuids:
+ self.assertIn(uuid, ports_dict)
+ port = ports_dict[uuid]
+ self.assertIn('extra', port)
+ self.assertIn('node_uuid', port)
+ # never expose the node_id
+ self.assertNotIn('node_id', port)
+ # Verify self link.
+ self.validate_self_link('ports', port['uuid'],
+ port['links'][0]['href'])
+
+ @test.attr(type='smoke')
+ def test_update_port_replace(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
+
+ port_id = self.create_port(node_id=node_id, address=address,
+ extra=extra)['port']['uuid']
new_address = data_utils.rand_mac_address()
- self.client.update_port(port_id, address=new_address)
+ new_extra = {'key1': 'new-value1', 'key2': 'new-value2',
+ 'key3': 'new-value3'}
+
+ patch = [{'path': '/address',
+ 'op': 'replace',
+ 'value': new_address},
+ {'path': '/extra/key1',
+ 'op': 'replace',
+ 'value': new_extra['key1']},
+ {'path': '/extra/key2',
+ 'op': 'replace',
+ 'value': new_extra['key2']},
+ {'path': '/extra/key3',
+ 'op': 'replace',
+ 'value': new_extra['key3']}]
+
+ self.client.update_port(port_id, patch)
resp, body = self.client.show_port(port_id)
- self.assertEqual(body['address'], new_address)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(new_address, body['address'])
+ self.assertEqual(new_extra, body['extra'])
+
+ @test.attr(type='smoke')
+ def test_update_port_remove(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
+
+ port_id = self.create_port(node_id=node_id, address=address,
+ extra=extra)['port']['uuid']
+
+ # Removing one item from the collection
+ resp, _ = self.client.update_port(port_id, [{'path': '/extra/key2',
+ 'op': 'remove'}])
+ self.assertEqual(200, resp.status)
+ extra.pop('key2')
+ resp, body = self.client.show_port(port_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(extra, body['extra'])
+
+ # Removing the collection
+ resp, _ = self.client.update_port(port_id, [{'path': '/extra',
+ 'op': 'remove'}])
+ self.assertEqual(200, resp.status)
+ resp, body = self.client.show_port(port_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual({}, body['extra'])
+
+ # Assert nothing else was changed
+ self.assertEqual(node_id, body['node_uuid'])
+ self.assertEqual(address, body['address'])
+
+ @test.attr(type='smoke')
+ def test_update_port_add(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+
+ extra = {'key1': 'value1', 'key2': 'value2'}
+
+ patch = [{'path': '/extra/key1',
+ 'op': 'add',
+ 'value': extra['key1']},
+ {'path': '/extra/key2',
+ 'op': 'add',
+ 'value': extra['key2']}]
+
+ self.client.update_port(port_id, patch)
+
+ resp, body = self.client.show_port(port_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(extra, body['extra'])
+
+ @test.attr(type='smoke')
+ def test_update_port_mixed_ops(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key1': 'value1', 'key2': 'value2'}
+
+ port_id = self.create_port(node_id=node_id, address=address,
+ extra=extra)['port']['uuid']
+
+ new_address = data_utils.rand_mac_address()
+ new_extra = {'key1': 'new-value1', 'key3': 'new-value3'}
+
+ patch = [{'path': '/address',
+ 'op': 'replace',
+ 'value': new_address},
+ {'path': '/extra/key1',
+ 'op': 'replace',
+ 'value': new_extra['key1']},
+ {'path': '/extra/key2',
+ 'op': 'remove'},
+ {'path': '/extra/key3',
+ 'op': 'add',
+ 'value': new_extra['key3']}]
+
+ self.client.update_port(port_id, patch)
+
+ resp, body = self.client.show_port(port_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(new_address, body['address'])
+ self.assertEqual(new_extra, body['extra'])
diff --git a/tempest/api/baremetal/test_ports_negative.py b/tempest/api/baremetal/test_ports_negative.py
index 6cb8812..4cbe00e 100644
--- a/tempest/api/baremetal/test_ports_negative.py
+++ b/tempest/api/baremetal/test_ports_negative.py
@@ -25,16 +25,346 @@
chassis = self.create_chassis()['chassis']
self.node = self.create_node(chassis['uuid'])['node']
- @test.attr(type='negative')
- def test_create_port_invalid_mac(self):
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_malformed_mac(self):
node_id = self.node['uuid']
- address = 'not an uuid'
+ address = 'malformed:mac'
self.assertRaises(exc.BadRequest,
self.create_port, node_id=node_id, address=address)
- @test.attr(type='negative')
- def test_create_port_wrong_node_id(self):
- node_id = str(data_utils.rand_uuid())
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_malformed_extra(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key': 0.123}
+ self.assertRaises(exc.BadRequest,
+ self.create_port, node_id=node_id,
+ address=address, extra=extra)
- self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id)
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_nonexsistent_node_id(self):
+ node_id = str(data_utils.rand_uuid())
+ address = data_utils.rand_mac_address()
+ self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id,
+ address=address)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_show_port_malformed_uuid(self):
+ self.assertRaises(exc.BadRequest, self.client.show_port,
+ 'malformed:uuid')
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_show_port_nonexistent_uuid(self):
+ self.assertRaises(exc.NotFound, self.client.show_port,
+ data_utils.rand_uuid())
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_show_port_by_mac_not_allowed(self):
+ self.assertRaises(exc.BadRequest, self.client.show_port,
+ data_utils.rand_mac_address())
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_duplicated_port_uuid(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ uuid = data_utils.rand_uuid()
+
+ self.create_port(node_id=node_id, address=address, uuid=uuid)
+ self.assertRaises(exc.Conflict, self.create_port, node_id=node_id,
+ address=address, uuid=uuid)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_no_mandatory_field_node_id(self):
+ address = data_utils.rand_mac_address()
+
+ self.assertRaises(exc.BadRequest, self.create_port, node_id=None,
+ address=address)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_no_mandatory_field_mac(self):
+ node_id = self.node['uuid']
+
+ self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id,
+ address=None)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_malformed_port_uuid(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ uuid = 'malformed:uuid'
+
+ self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id,
+ address=address, uuid=uuid)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_malformed_node_id(self):
+ address = data_utils.rand_mac_address()
+ self.assertRaises(exc.BadRequest, self.create_port,
+ node_id='malformed:nodeid', address=address)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_duplicated_mac(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ self.create_port(node_id=node_id, address=address)
+ self.assertRaises(exc.Conflict,
+ self.create_port, node_id=node_id,
+ address=address)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_by_mac_not_allowed(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key': 'value'}
+
+ self.create_port(node_id=node_id, address=address, extra=extra)
+
+ patch = [{'path': '/extra/key',
+ 'op': 'replace',
+ 'value': 'new-value'}]
+
+ self.assertRaises(exc.BadRequest,
+ self.client.update_port, address,
+ patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_nonexistent(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key': 'value'}
+
+ port_id = self.create_port(node_id=node_id, address=address,
+ extra=extra)['port']['uuid']
+ self.client.delete_port(port_id)
+
+ patch = [{'path': '/extra/key',
+ 'op': 'replace',
+ 'value': 'new-value'}]
+ self.assertRaises(exc.NotFound,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_malformed_port_uuid(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ self.create_port(node_id=node_id, address=address)
+
+ new_address = data_utils.rand_mac_address()
+ self.assertRaises(exc.BadRequest, self.client.update_port,
+ uuid='malformed:uuid',
+ patch=[{'path': '/address', 'op': 'replace',
+ 'value': new_address}])
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_add_malformed_extra(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+
+ self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+ [{'path': '/extra/key', ' op': 'add',
+ 'value': 0.123}])
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_add_whole_malformed_extra(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+
+ self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+ [{'path': '/extra',
+ 'op': 'add',
+ 'value': [1, 2, 3, 4, 'a']}])
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_add_nonexistent_property(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+ self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+ [{'path': '/nonexistent', ' op': 'add',
+ 'value': 'value'}])
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_replace_node_id_with_malformed(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id,
+ address=address)['port']['uuid']
+
+ patch = [{'path': '/node_uuid',
+ 'op': 'replace',
+ 'value': 'malformed:node_uuid'}]
+ self.assertRaises(exc.BadRequest,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_replace_mac_with_duplicated(self):
+ node_id = self.node['uuid']
+ address1 = data_utils.rand_mac_address()
+ address2 = data_utils.rand_mac_address()
+
+ self.create_port(node_id=node_id, address=address1)
+ port_id = self.create_port(node_id=node_id,
+ address=address2)['port']['uuid']
+ patch = [{'path': '/address',
+ 'op': 'replace',
+ 'value': address1}]
+ self.assertRaises(exc.Conflict,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_replace_node_id_with_nonexistent(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id,
+ address=address)['port']['uuid']
+
+ patch = [{'path': '/node_uuid',
+ 'op': 'replace',
+ 'value': data_utils.rand_uuid()}]
+ self.assertRaises(exc.BadRequest,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_replace_mac_with_malformed(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id,
+ address=address)['port']['uuid']
+ patch = [{'path': '/address',
+ 'op': 'replace',
+ 'value': 'malformed:mac'}]
+
+ self.assertRaises(exc.BadRequest,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_replace_extra_item_with_malformed(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key': 'value'}
+
+ port_id = self.create_port(node_id=node_id,
+ address=address,
+ extra=extra)['port']['uuid']
+ patch = [{'path': '/extra/key',
+ 'op': 'replace',
+ 'value': 0.123}]
+
+ self.assertRaises(exc.BadRequest,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_replace_whole_extra_with_malformed(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key': 'value'}
+
+ port_id = self.create_port(node_id=node_id,
+ address=address,
+ extra=extra)['port']['uuid']
+ patch = [{'path': '/extra',
+ 'op': 'replace',
+ 'value': [1, 2, 3, 4, 'a']}]
+
+ self.assertRaises(exc.BadRequest,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_replace_nonexistent_property(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id,
+ address=address)['port']['uuid']
+
+ patch = [{'path': '/nonexistent', ' op': 'replace', 'value': 'value'}]
+
+ self.assertRaises(exc.BadRequest,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_remove_mandatory_field_mac(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+ self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+ [{'path': '/address', 'op': 'remove'}])
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_remove_mandatory_field_port_uuid(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+ self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+ [{'path': '/uuid', 'op': 'remove'}])
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_remove_nonexistent_property(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+ self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+ [{'path': '/nonexistent', 'op': 'remove'}])
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_delete_port_by_mac_not_allowed(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ self.create_port(node_id=node_id, address=address)
+ self.assertRaises(exc.BadRequest, self.client.delete_port, address)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_mixed_ops_integrity(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key1': 'value1', 'key2': 'value2'}
+
+ port_id = self.create_port(node_id=node_id, address=address,
+ extra=extra)['port']['uuid']
+
+ new_address = data_utils.rand_mac_address()
+ new_extra = {'key1': 'new-value1', 'key3': 'new-value3'}
+
+ patch = [{'path': '/address',
+ 'op': 'replace',
+ 'value': new_address},
+ {'path': '/extra/key1',
+ 'op': 'replace',
+ 'value': new_extra['key1']},
+ {'path': '/extra/key2',
+ 'op': 'remove'},
+ {'path': '/extra/key3',
+ 'op': 'add',
+ 'value': new_extra['key3']},
+ {'path': '/nonexistent',
+ 'op': 'replace',
+ 'value': 'value'}]
+
+ self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+ patch)
+
+ # patch should not be applied
+ resp, body = self.client.show_port(port_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(address, body['address'])
+ self.assertEqual(extra, body['extra'])
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
new file mode 100644
index 0000000..514f1fa
--- /dev/null
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -0,0 +1,55 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+from tempest.api.compute import base
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class MigrationsAdminTest(base.BaseV2ComputeAdminTest):
+
+ @classmethod
+ def setUpClass(cls):
+ super(MigrationsAdminTest, cls).setUpClass()
+ cls.client = cls.os_adm.migrations_client
+
+ @test.attr(type='gate')
+ def test_list_migrations(self):
+ # Admin can get the migrations list
+ resp, _ = self.client.list_migrations()
+ self.assertEqual(200, resp.status)
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ @test.attr(type='gate')
+ def test_list_migrations_in_flavor_resize_situation(self):
+ # Admin can get the migrations list which contains the resized server
+ resp, server = self.create_test_server(wait_until="ACTIVE")
+ server_id = server['id']
+
+ resp, _ = self.servers_client.resize(server_id, self.flavor_ref_alt)
+ self.assertEqual(202, resp.status)
+ self.servers_client.wait_for_server_status(server_id, 'VERIFY_RESIZE')
+ self.servers_client.confirm_resize(server_id)
+ self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+
+ resp, body = self.client.list_migrations()
+ self.assertEqual(200, resp.status)
+
+ instance_uuids = [x['instance_uuid'] for x in body]
+ self.assertIn(server_id, instance_uuids)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index abd36a6..7631ea5 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -233,6 +233,7 @@
cls.os.instance_usages_audit_log_client
cls.hypervisor_client = cls.os.hypervisor_client
cls.certificates_client = cls.os.certificates_client
+ cls.migrations_client = cls.os.migrations_client
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
@@ -339,6 +340,7 @@
cls.hosts_client = cls.os.hosts_v3_client
cls.quotas_client = cls.os.quotas_v3_client
cls.version_client = cls.os.version_v3_client
+ cls.migrations_client = cls.os.migrations_v3_client
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
@@ -412,3 +414,4 @@
cls.hosts_admin_client = cls.os_adm.hosts_v3_client
cls.quotas_admin_client = cls.os_adm.quotas_v3_client
cls.agents_admin_client = cls.os_adm.agents_v3_client
+ cls.migrations_admin_client = cls.os_adm.migrations_v3_client
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 451d08f..9e34922 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -102,6 +102,26 @@
self.assertEqual('204', resp['status'])
self.client.wait_for_server_termination(server['id'])
+ @test.attr(type='gate')
+ def test_delete_server_while_in_attached_volume(self):
+ # Delete a server while a volume is attached to it
+ volumes_client = self.volumes_extensions_client
+ device = '/dev/%s' % CONF.compute.volume_device_name
+ resp, server = self.create_test_server(wait_until='ACTIVE')
+
+ resp, volume = volumes_client.create_volume(1)
+ self.addCleanup(volumes_client.delete_volume, volume['id'])
+ volumes_client.wait_for_volume_status(volume['id'], 'available')
+ resp, body = self.client.attach_volume(server['id'],
+ volume['id'],
+ device=device)
+ volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+
+ resp, _ = self.client.delete_server(server['id'])
+ self.assertEqual('204', resp['status'])
+ self.client.wait_for_server_termination(server['id'])
+ volumes_client.wait_for_volume_status(volume['id'], 'available')
+
class DeleteServersAdminTestJSON(base.BaseV2ComputeAdminTest):
# NOTE: Server creations of each test class should be under 10
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 7f909d7..c87f24e 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -60,7 +60,7 @@
resp, cls.server = cls.client.get_server(server['id'])
name = data_utils.rand_name('image')
- resp, body = cls.client.create_image(server['id'], name)
+ resp, body = cls.images_client.create_image(server['id'], name)
image_id = data_utils.parse_image_id(resp['location'])
cls.images_client.wait_for_image_status(image_id, 'ACTIVE')
resp, cls.image = cls.images_client.get_image(image_id)
diff --git a/tempest/api/compute/v3/admin/test_migrations.py b/tempest/api/compute/v3/admin/test_migrations.py
new file mode 100644
index 0000000..e8bd473
--- /dev/null
+++ b/tempest/api/compute/v3/admin/test_migrations.py
@@ -0,0 +1,50 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+from tempest.api.compute import base
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class MigrationsAdminV3Test(base.BaseV3ComputeAdminTest):
+
+ @test.attr(type='gate')
+ def test_list_migrations(self):
+ # Admin can get the migrations list
+ resp, _ = self.migrations_admin_client.list_migrations()
+ self.assertEqual(200, resp.status)
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ @test.attr(type='gate')
+ def test_list_migrations_in_flavor_resize_situation(self):
+ # Admin can get the migrations list which contains the resized server
+ resp, server = self.create_test_server(wait_until="ACTIVE")
+ server_id = server['id']
+
+ resp, _ = self.servers_client.resize(server_id, self.flavor_ref_alt)
+ self.assertEqual(202, resp.status)
+ self.servers_client.wait_for_server_status(server_id, 'VERIFY_RESIZE')
+ self.servers_client.confirm_resize(server_id)
+ self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+
+ resp, body = self.migrations_admin_client.list_migrations()
+ self.assertEqual(200, resp.status)
+
+ instance_uuids = [x['instance_uuid'] for x in body]
+ self.assertIn(server_id, instance_uuids)
diff --git a/tempest/api/compute/v3/servers/test_delete_server.py b/tempest/api/compute/v3/servers/test_delete_server.py
index d694a33..8f85557 100644
--- a/tempest/api/compute/v3/servers/test_delete_server.py
+++ b/tempest/api/compute/v3/servers/test_delete_server.py
@@ -99,6 +99,25 @@
self.assertEqual('204', resp['status'])
self.client.wait_for_server_termination(server['id'])
+ @test.attr(type='gate')
+ def test_delete_server_while_in_attached_volume(self):
+ # Delete a server while a volume is attached to it
+ device = '/dev/%s' % CONF.compute.volume_device_name
+ resp, server = self.create_test_server(wait_until='ACTIVE')
+
+ resp, volume = self.volumes_client.create_volume(1)
+ self.addCleanup(self.volumes_client.delete_volume, volume['id'])
+ self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+ resp, body = self.client.attach_volume(server['id'],
+ volume['id'],
+ device=device)
+ self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+
+ resp, _ = self.client.delete_server(server['id'])
+ self.assertEqual('204', resp['status'])
+ self.client.wait_for_server_termination(server['id'])
+ self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+
class DeleteServersAdminV3Test(base.BaseV3ComputeAdminTest):
# NOTE: Server creations of each test class should be under 10
diff --git a/tempest/api/compute/v3/servers/test_instance_actions.py b/tempest/api/compute/v3/servers/test_instance_actions.py
index 7d25100..399541b 100644
--- a/tempest/api/compute/v3/servers/test_instance_actions.py
+++ b/tempest/api/compute/v3/servers/test_instance_actions.py
@@ -27,25 +27,27 @@
cls.resp = resp
cls.server_id = server['id']
+ @test.skip_because(bug="1206032")
@test.attr(type='gate')
- def test_list_instance_actions(self):
+ def test_list_server_actions(self):
# List actions of the provided server
resp, body = self.client.reboot(self.server_id, 'HARD')
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
- resp, body = self.client.list_instance_actions(self.server_id)
+ resp, body = self.client.list_server_actions(self.server_id)
self.assertEqual(200, resp.status)
self.assertTrue(len(body) == 2, str(body))
self.assertTrue(any([i for i in body if i['action'] == 'create']))
self.assertTrue(any([i for i in body if i['action'] == 'reboot']))
+ @test.skip_because(bug="1206032")
@test.attr(type='gate')
@test.skip_because(bug="1281915")
- def test_get_instance_action(self):
+ def test_get_server_action(self):
# Get the action details of the provided server
request_id = self.resp['x-compute-request-id']
- resp, body = self.client.get_instance_action(self.server_id,
- request_id)
+ resp, body = self.client.get_server_action(self.server_id,
+ request_id)
self.assertEqual(200, resp.status)
- self.assertEqual(self.server_id, body['instance_uuid'])
+ self.assertEqual(self.server_id, body['server_uuid'])
self.assertEqual('create', body['action'])
diff --git a/tempest/api/compute/v3/servers/test_instance_actions_negative.py b/tempest/api/compute/v3/servers/test_instance_actions_negative.py
index b0a7050..0b2c6f9 100644
--- a/tempest/api/compute/v3/servers/test_instance_actions_negative.py
+++ b/tempest/api/compute/v3/servers/test_instance_actions_negative.py
@@ -29,15 +29,15 @@
cls.server_id = server['id']
@test.attr(type=['negative', 'gate'])
- def test_list_instance_actions_invalid_server(self):
+ def test_list_server_actions_invalid_server(self):
# List actions of the invalid server id
invalid_server_id = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
- self.client.list_instance_actions, invalid_server_id)
+ self.client.list_server_actions, invalid_server_id)
@test.attr(type=['negative', 'gate'])
- def test_get_instance_action_invalid_request(self):
+ def test_get_server_action_invalid_request(self):
# Get the action details of the provided server with invalid request
invalid_request_id = 'req-' + data_utils.rand_uuid()
- self.assertRaises(exceptions.NotFound, self.client.get_instance_action,
+ self.assertRaises(exceptions.NotFound, self.client.get_server_action,
self.server_id, invalid_request_id)
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 24c7b83..90dccca 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -73,7 +73,7 @@
self.assertIn(role_id, fetched_role_ids)
@test.attr(type='smoke')
- def test_role_create_update_get(self):
+ def test_role_create_update_get_list(self):
r_name = data_utils.rand_name('Role-')
resp, role = self.client.create_role(r_name)
self.addCleanup(self.client.delete_role, role['id'])
@@ -94,6 +94,10 @@
self.assertEqual(new_name, new_role['name'])
self.assertEqual(updated_role['id'], new_role['id'])
+ resp, roles = self.client.list_roles()
+ self.assertEqual(resp['status'], '200')
+ self.assertIn(role['id'], [r['id'] for r in roles])
+
@test.attr(type='smoke')
def test_grant_list_revoke_role_to_user_on_project(self):
resp, _ = self.client.assign_user_role_on_project(
diff --git a/tempest/api/network/admin/test_agent_management.py b/tempest/api/network/admin/test_agent_management.py
index 342bc6a..b848994 100644
--- a/tempest/api/network/admin/test_agent_management.py
+++ b/tempest/api/network/admin/test_agent_management.py
@@ -37,8 +37,10 @@
agents = body['agents']
# Hearthbeats must be excluded from comparison
self.agent.pop('heartbeat_timestamp', None)
+ self.agent.pop('configurations', None)
for agent in agents:
agent.pop('heartbeat_timestamp', None)
+ agent.pop('configurations', None)
self.assertIn(self.agent, agents)
@test.attr(type=['smoke'])
diff --git a/tempest/api/network/admin/test_dhcp_agent_scheduler.py b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
index 13ae1c0..25e1cc0 100644
--- a/tempest/api/network/admin/test_dhcp_agent_scheduler.py
+++ b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
@@ -20,6 +20,7 @@
_interface = 'json'
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(DHCPAgentSchedulersTestJSON, cls).setUpClass()
if not test.is_extension_enabled('dhcp_agent_scheduler', 'network'):
diff --git a/tempest/api/network/admin/test_lbaas_agent_scheduler.py b/tempest/api/network/admin/test_lbaas_agent_scheduler.py
index a5ba90f..675c62d 100644
--- a/tempest/api/network/admin/test_lbaas_agent_scheduler.py
+++ b/tempest/api/network/admin/test_lbaas_agent_scheduler.py
@@ -35,6 +35,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(LBaaSAgentSchedulerTestJSON, cls).setUpClass()
if not test.is_extension_enabled('lbaas_agent_scheduler', 'network'):
diff --git a/tempest/api/network/admin/test_load_balancer_admin_actions.py b/tempest/api/network/admin/test_load_balancer_admin_actions.py
index 34a8e32..6bcc118 100644
--- a/tempest/api/network/admin/test_load_balancer_admin_actions.py
+++ b/tempest/api/network/admin/test_load_balancer_admin_actions.py
@@ -29,6 +29,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(LoadBalancerAdminTestJSON, cls).setUpClass()
if not test.is_extension_enabled('lbaas', 'network'):
@@ -89,6 +90,18 @@
show_health_monitor = body['health_monitor']
self.assertEqual(health_monitor['id'], show_health_monitor['id'])
+ @test.attr(type='smoke')
+ def test_create_pool_from_admin_user_other_tenant(self):
+ resp, body = self.admin_client.create_pool(
+ name=data_utils.rand_name('pool-'), lb_method="ROUND_ROBIN",
+ protocol="HTTP", subnet_id=self.subnet['id'],
+ tenant_id=self.tenant_id)
+ self.assertEqual('201', resp['status'])
+ pool = body['pool']
+ self.addCleanup(self.admin_client.delete_pool, pool['id'])
+ self.assertIsNotNone(pool['id'])
+ self.assertEqual(self.tenant_id, pool['tenant_id'])
+
class LoadBalancerAdminTestXML(LoadBalancerAdminTestJSON):
_interface = 'xml'
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index f92ad68..696a1c3 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -79,9 +79,17 @@
cls.floating_ips = []
cls.metering_labels = []
cls.metering_label_rules = []
+ cls.fw_rules = []
+ cls.fw_policies = []
@classmethod
def tearDownClass(cls):
+ # Clean up firewall policies
+ for fw_policy in cls.fw_policies:
+ cls.client.delete_firewall_policy(fw_policy['id'])
+ # Clean up firewall rules
+ for fw_rule in cls.fw_rules:
+ cls.client.delete_firewall_rule(fw_rule['id'])
# Clean up ike policies
for ikepolicy in cls.ikepolicies:
cls.client.delete_ikepolicy(ikepolicy['id'])
@@ -93,12 +101,8 @@
cls.client.delete_floatingip(floating_ip['id'])
# Clean up routers
for router in cls.routers:
- resp, body = cls.client.list_router_interfaces(router['id'])
- interfaces = body['ports']
- for i in interfaces:
- cls.client.remove_router_interface_with_subnet_id(
- router['id'], i['fixed_ips'][0]['subnet_id'])
- cls.client.delete_router(router['id'])
+ cls.delete_router(router)
+
# Clean up health monitors
for health_monitor in cls.health_monitors:
cls.client.delete_health_monitor(health_monitor['id'])
@@ -296,6 +300,35 @@
cls.ikepolicies.append(ikepolicy)
return ikepolicy
+ @classmethod
+ def create_firewall_rule(cls, action, protocol):
+ """Wrapper utility that returns a test firewall rule."""
+ resp, body = cls.client.create_firewall_rule(
+ name=data_utils.rand_name("fw-rule"),
+ action=action,
+ protocol=protocol)
+ fw_rule = body['firewall_rule']
+ cls.fw_rules.append(fw_rule)
+ return fw_rule
+
+ @classmethod
+ def create_firewall_policy(cls):
+ """Wrapper utility that returns a test firewall policy."""
+ resp, body = cls.client.create_firewall_policy(
+ name=data_utils.rand_name("fw-policy"))
+ fw_policy = body['firewall_policy']
+ cls.fw_policies.append(fw_policy)
+ return fw_policy
+
+ @classmethod
+ def delete_router(cls, router):
+ resp, body = cls.client.list_router_interfaces(router['id'])
+ interfaces = body['ports']
+ for i in interfaces:
+ cls.client.remove_router_interface_with_subnet_id(
+ router['id'], i['fixed_ips'][0]['subnet_id'])
+ cls.client.delete_router(router['id'])
+
class BaseAdminNetworkTest(BaseNetworkTest):
diff --git a/tempest/api/network/test_extra_dhcp_options.py b/tempest/api/network/test_extra_dhcp_options.py
index ed86d75..371c651 100644
--- a/tempest/api/network/test_extra_dhcp_options.py
+++ b/tempest/api/network/test_extra_dhcp_options.py
@@ -36,6 +36,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ExtraDHCPOptionsTestJSON, cls).setUpClass()
if not test.is_extension_enabled('extra_dhcp_opt', 'network'):
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index 06871ad..7191940 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -44,6 +44,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(FloatingIPTestJSON, cls).setUpClass()
if not test.is_extension_enabled('router', 'network'):
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
new file mode 100644
index 0000000..0647069
--- /dev/null
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -0,0 +1,207 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest.common.utils import data_utils
+from tempest import exceptions
+from tempest import test
+
+
+class FWaaSExtensionTestJSON(base.BaseNetworkTest):
+ _interface = 'json'
+
+ """
+ Tests the following operations in the Neutron API using the REST client for
+ Neutron:
+
+ List firewall rules
+ Create firewall rule
+ Update firewall rule
+ Delete firewall rule
+ Show firewall rule
+ List firewall policies
+ Create firewall policy
+ Update firewall policy
+ Delete firewall policy
+ Show firewall policy
+ List firewall
+ Create firewall
+ Update firewall
+ Delete firewall
+ Show firewall
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(FWaaSExtensionTestJSON, cls).setUpClass()
+ if not test.is_extension_enabled('fwaas', 'network'):
+ msg = "FWaaS Extension not enabled."
+ raise cls.skipException(msg)
+ cls.fw_rule = cls.create_firewall_rule("allow", "tcp")
+ cls.fw_policy = cls.create_firewall_policy()
+
+ def _try_delete_policy(self, policy_id):
+ # delete policy, if it exists
+ try:
+ self.client.delete_firewall_policy(policy_id)
+ # if policy is not found, this means it was deleted in the test
+ except exceptions.NotFound:
+ pass
+
+ def _try_delete_firewall(self, fw_id):
+ # delete firewall, if it exists
+ try:
+ self.client.delete_firewall(fw_id)
+ # if firewall is not found, this means it was deleted in the test
+ except exceptions.NotFound:
+ pass
+
+ @test.attr(type='smoke')
+ def test_list_firewall_rules(self):
+ # List firewall rules
+ resp, fw_rules = self.client.list_firewall_rules()
+ self.assertEqual('200', resp['status'])
+ fw_rules = fw_rules['firewall_rules']
+ self.assertIn((self.fw_rule['id'],
+ self.fw_rule['name'],
+ self.fw_rule['action'],
+ self.fw_rule['protocol'],
+ self.fw_rule['ip_version'],
+ self.fw_rule['enabled']),
+ [(m['id'],
+ m['name'],
+ m['action'],
+ m['protocol'],
+ m['ip_version'],
+ m['enabled']) for m in fw_rules])
+
+ @test.attr(type='smoke')
+ def test_create_update_delete_firewall_rule(self):
+ # Create firewall rule
+ resp, body = self.client.create_firewall_rule(
+ name=data_utils.rand_name("fw-rule"),
+ action="allow",
+ protocol="tcp")
+ self.assertEqual('201', resp['status'])
+ fw_rule_id = body['firewall_rule']['id']
+
+ # Update firewall rule
+ resp, body = self.client.update_firewall_rule(fw_rule_id,
+ shared=True)
+ self.assertEqual('200', resp['status'])
+ self.assertTrue(body["firewall_rule"]['shared'])
+
+ # Delete firewall rule
+ resp, _ = self.client.delete_firewall_rule(fw_rule_id)
+ self.assertEqual('204', resp['status'])
+ # Confirm deletion
+ resp, fw_rules = self.client.list_firewall_rules()
+ self.assertNotIn(fw_rule_id,
+ [m['id'] for m in fw_rules['firewall_rules']])
+
+ @test.attr(type='smoke')
+ def test_show_firewall_rule(self):
+ # show a created firewall rule
+ resp, fw_rule = self.client.show_firewall_rule(self.fw_rule['id'])
+ self.assertEqual('200', resp['status'])
+ for key, value in fw_rule['firewall_rule'].iteritems():
+ self.assertEqual(self.fw_rule[key], value)
+
+ @test.attr(type='smoke')
+ def test_list_firewall_policies(self):
+ resp, fw_policies = self.client.list_firewall_policies()
+ self.assertEqual('200', resp['status'])
+ fw_policies = fw_policies['firewall_policies']
+ self.assertIn((self.fw_policy['id'],
+ self.fw_policy['name'],
+ self.fw_policy['firewall_rules']),
+ [(m['id'],
+ m['name'],
+ m['firewall_rules']) for m in fw_policies])
+
+ @test.attr(type='smoke')
+ def test_create_update_delete_firewall_policy(self):
+ # Create firewall policy
+ resp, body = self.client.create_firewall_policy(
+ name=data_utils.rand_name("fw-policy"))
+ self.assertEqual('201', resp['status'])
+ fw_policy_id = body['firewall_policy']['id']
+ self.addCleanup(self._try_delete_policy, fw_policy_id)
+
+ # Update firewall policy
+ resp, body = self.client.update_firewall_policy(fw_policy_id,
+ shared=True,
+ name="updated_policy")
+ self.assertEqual('200', resp['status'])
+ updated_fw_policy = body["firewall_policy"]
+ self.assertTrue(updated_fw_policy['shared'])
+ self.assertEqual("updated_policy", updated_fw_policy['name'])
+
+ # Delete firewall policy
+ resp, _ = self.client.delete_firewall_policy(fw_policy_id)
+ self.assertEqual('204', resp['status'])
+ # Confirm deletion
+ resp, fw_policies = self.client.list_firewall_policies()
+ fw_policies = fw_policies['firewall_policies']
+ self.assertNotIn(fw_policy_id, [m['id'] for m in fw_policies])
+
+ @test.attr(type='smoke')
+ def test_show_firewall_policy(self):
+ # show a created firewall policy
+ resp, fw_policy = self.client.show_firewall_policy(
+ self.fw_policy['id'])
+ self.assertEqual('200', resp['status'])
+ fw_policy = fw_policy['firewall_policy']
+ for key, value in fw_policy.iteritems():
+ self.assertEqual(self.fw_policy[key], value)
+
+ @test.attr(type='smoke')
+ def test_create_show_delete_firewall(self):
+ # Create firewall
+ resp, body = self.client.create_firewall(
+ name=data_utils.rand_name("firewall"),
+ firewall_policy_id=self.fw_policy['id'])
+ self.assertEqual('201', resp['status'])
+ created_firewall = body['firewall']
+ firewall_id = created_firewall['id']
+ self.addCleanup(self._try_delete_firewall, firewall_id)
+
+ # show a created firewall
+ resp, firewall = self.client.show_firewall(firewall_id)
+ self.assertEqual('200', resp['status'])
+ firewall = firewall['firewall']
+ for key, value in firewall.iteritems():
+ self.assertEqual(created_firewall[key], value)
+
+ # list firewall
+ resp, firewalls = self.client.list_firewalls()
+ self.assertEqual('200', resp['status'])
+ firewalls = firewalls['firewalls']
+ self.assertIn((created_firewall['id'],
+ created_firewall['name'],
+ created_firewall['firewall_policy_id']),
+ [(m['id'],
+ m['name'],
+ m['firewall_policy_id']) for m in firewalls])
+
+ # Delete firewall
+ resp, _ = self.client.delete_firewall(firewall_id)
+ self.assertEqual('204', resp['status'])
+ # Confirm deletion
+ # TODO(raies): Confirm deletion can be done only when,
+ # deleted firewall status is not "PENDING_DELETE".
+
+
+class FWaaSExtensionTestXML(FWaaSExtensionTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/network/test_load_balancer.py b/tempest/api/network/test_load_balancer.py
index 792d61d..3ab015e 100644
--- a/tempest/api/network/test_load_balancer.py
+++ b/tempest/api/network/test_load_balancer.py
@@ -38,6 +38,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(LoadBalancerTestJSON, cls).setUpClass()
if not test.is_extension_enabled('lbaas', 'network'):
@@ -156,10 +157,14 @@
# Verification of pool update
new_name = "New_pool"
resp, body = self.client.update_pool(pool['id'],
- name=new_name)
+ name=new_name,
+ description="new_description",
+ lb_method='LEAST_CONNECTIONS')
self.assertEqual('200', resp['status'])
updated_pool = body['pool']
- self.assertEqual(updated_pool['name'], new_name)
+ self.assertEqual(new_name, updated_pool['name'])
+ self.assertEqual('new_description', updated_pool['description'])
+ self.assertEqual('LEAST_CONNECTIONS', updated_pool['lb_method'])
# Verification of pool delete
resp, body = self.client.delete_pool(pool['id'])
self.assertEqual('204', resp['status'])
@@ -377,6 +382,58 @@
self.assertIn("active_connections", stats)
self.assertIn("bytes_out", stats)
+ @test.attr(type='smoke')
+ def test_update_list_of_health_monitors_associated_with_pool(self):
+ resp, _ = (self.client.associate_health_monitor_with_pool
+ (self.health_monitor['id'], self.pool['id']))
+ self.assertEqual('201', resp['status'])
+ resp, _ = self.client.update_health_monitor(
+ self.health_monitor['id'], admin_state_up=False)
+ self.assertEqual('200', resp['status'])
+ resp, body = self.client.show_pool(self.pool['id'])
+ self.assertEqual('200', resp['status'])
+ health_monitors = body['pool']['health_monitors']
+ for health_monitor_id in health_monitors:
+ resp, body = self.client.show_health_monitor(health_monitor_id)
+ self.assertEqual('200', resp['status'])
+ self.assertFalse(body['health_monitor']['admin_state_up'])
+ resp, _ = (self.client.disassociate_health_monitor_with_pool
+ (self.health_monitor['id'], self.pool['id']))
+ self.assertEqual('204', resp['status'])
+
+ @test.attr(type='smoke')
+ def test_update_admin_state_up_of_pool(self):
+ resp, _ = self.client.update_pool(self.pool['id'],
+ admin_state_up=False)
+ self.assertEqual('200', resp['status'])
+ resp, body = self.client.show_pool(self.pool['id'])
+ self.assertEqual('200', resp['status'])
+ pool = body['pool']
+ self.assertFalse(pool['admin_state_up'])
+
+ @test.attr(type='smoke')
+ def test_show_vip_associated_with_pool(self):
+ resp, body = self.client.show_pool(self.pool['id'])
+ self.assertEqual('200', resp['status'])
+ pool = body['pool']
+ resp, body = self.client.show_vip(pool['vip_id'])
+ self.assertEqual('200', resp['status'])
+ vip = body['vip']
+ self.assertEqual(self.vip['name'], vip['name'])
+ self.assertEqual(self.vip['id'], vip['id'])
+
+ @test.attr(type='smoke')
+ def test_show_members_associated_with_pool(self):
+ resp, body = self.client.show_pool(self.pool['id'])
+ self.assertEqual('200', resp['status'])
+ members = body['pool']['members']
+ for member_id in members:
+ resp, body = self.client.show_member(member_id)
+ self.assertEqual('200', resp['status'])
+ self.assertIsNotNone(body['member']['status'])
+ self.assertEqual(member_id, body['member']['id'])
+ self.assertIsNotNone(body['member']['admin_state_up'])
+
class LoadBalancerTestXML(LoadBalancerTestJSON):
_interface = 'xml'
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index b9041ee..0175de7 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -19,7 +19,7 @@
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
CONF = config.CONF
@@ -64,6 +64,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(NetworksTestJSON, cls).setUpClass()
cls.network = cls.create_network()
@@ -71,7 +72,7 @@
cls.subnet = cls.create_subnet(cls.network)
cls.cidr = cls.subnet['cidr']
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_create_update_delete_network_subnet(self):
# Create a network
name = data_utils.rand_name('network-')
@@ -102,7 +103,7 @@
resp, body = self.client.delete_network(net_id)
self.assertEqual('204', resp['status'])
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_show_network(self):
# Verify the details of a network
resp, body = self.client.show_network(self.network['id'])
@@ -111,7 +112,7 @@
for key in ['id', 'name']:
self.assertEqual(network[key], self.network[key])
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_show_network_fields(self):
# Verify specific fields of a network
field_list = [('fields', 'id'), ('fields', 'name'), ]
@@ -123,7 +124,7 @@
for label, field_name in field_list:
self.assertEqual(network[field_name], self.network[field_name])
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_list_networks(self):
# Verify the network exists in the list of all networks
resp, body = self.client.list_networks()
@@ -132,7 +133,7 @@
if network['id'] == self.network['id']]
self.assertNotEmpty(networks, "Created network not found in the list")
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_list_networks_fields(self):
# Verify specific fields of the networks
resp, body = self.client.list_networks(fields='id')
@@ -143,7 +144,7 @@
self.assertEqual(len(network), 1)
self.assertIn('id', network)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_show_subnet(self):
# Verify the details of a subnet
resp, body = self.client.show_subnet(self.subnet['id'])
@@ -154,7 +155,7 @@
self.assertIn(key, subnet)
self.assertEqual(subnet[key], self.subnet[key])
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_show_subnet_fields(self):
# Verify specific fields of a subnet
field_list = [('fields', 'id'), ('fields', 'cidr'), ]
@@ -166,7 +167,7 @@
for label, field_name in field_list:
self.assertEqual(subnet[field_name], self.subnet[field_name])
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_list_subnets(self):
# Verify the subnet exists in the list of all subnets
resp, body = self.client.list_subnets()
@@ -175,7 +176,7 @@
if subnet['id'] == self.subnet['id']]
self.assertNotEmpty(subnets, "Created subnet not found in the list")
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_list_subnets_fields(self):
# Verify specific fields of subnets
resp, body = self.client.list_subnets(fields='id')
@@ -194,7 +195,7 @@
except exceptions.NotFound:
pass
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_delete_network_with_subnet(self):
# Creates a network
name = data_utils.rand_name('network-')
@@ -221,7 +222,7 @@
# it from the list.
self.subnets.pop()
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_create_port_with_no_ip(self):
# For this test create a new network - do not use any previously
# created networks.
@@ -275,6 +276,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(BulkNetworkOpsTestJSON, cls).setUpClass()
cls.network1 = cls.create_network()
@@ -310,7 +312,7 @@
for n in created_ports:
self.assertNotIn(n['id'], ports_list)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_bulk_create_delete_network(self):
# Creates 2 networks in one request
network_names = [data_utils.rand_name('network-'),
@@ -326,7 +328,7 @@
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], networks_list)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_bulk_create_delete_subnet(self):
# Creates 2 subnets in one request
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
@@ -358,7 +360,7 @@
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], subnets_list)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_bulk_create_delete_port(self):
# Creates 2 ports in one request
networks = [self.network1['id'], self.network2['id']]
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index fbb25a8..66dcaa5 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -27,6 +27,7 @@
_interface = 'json'
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(PortsTestJSON, cls).setUpClass()
cls.network = cls.create_network()
@@ -143,6 +144,7 @@
_interface = 'json'
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(PortsAdminExtendedAttrsTestJSON, cls).setUpClass()
cls.identity_client = cls._get_identity_admin_client()
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 2657031..4cc0338 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -36,6 +36,18 @@
admin_manager = clients.AdminManager()
cls.identity_admin_client = admin_manager.identity_client
+ def _cleanup_router(self, router):
+ self.delete_router(router)
+ self.routers.remove(router)
+
+ def _create_router(self, name, admin_state_up=False,
+ external_network_id=None, enable_snat=None):
+ # associate a cleanup with created routers to avoid quota limits
+ router = self.create_router(name, admin_state_up,
+ external_network_id, enable_snat)
+ self.addCleanup(self._cleanup_router, router)
+ return router
+
@test.attr(type='smoke')
def test_create_show_list_update_delete_router(self):
# Create a router
@@ -102,7 +114,7 @@
def test_add_remove_router_interface_with_subnet_id(self):
network = self.create_network()
subnet = self.create_subnet(network)
- router = self.create_router(data_utils.rand_name('router-'))
+ router = self._create_router(data_utils.rand_name('router-'))
# Add router interface with subnet id
resp, interface = self.client.add_router_interface_with_subnet_id(
router['id'], subnet['id'])
@@ -121,7 +133,7 @@
def test_add_remove_router_interface_with_port_id(self):
network = self.create_network()
self.create_subnet(network)
- router = self.create_router(data_utils.rand_name('router-'))
+ router = self._create_router(data_utils.rand_name('router-'))
resp, port_body = self.client.create_port(
network_id=network['id'])
# add router interface to port created above
@@ -164,7 +176,7 @@
@test.attr(type='smoke')
def test_update_router_set_gateway(self):
- router = self.create_router(data_utils.rand_name('router-'))
+ router = self._create_router(data_utils.rand_name('router-'))
self.client.update_router(
router['id'],
external_gateway_info={
@@ -180,7 +192,7 @@
@test.requires_ext(extension='ext-gw-mode', service='network')
@test.attr(type='smoke')
def test_update_router_set_gateway_with_snat_explicit(self):
- router = self.create_router(data_utils.rand_name('router-'))
+ router = self._create_router(data_utils.rand_name('router-'))
self.admin_client.update_router_with_snat_gw_info(
router['id'],
external_gateway_info={
@@ -195,7 +207,7 @@
@test.requires_ext(extension='ext-gw-mode', service='network')
@test.attr(type='smoke')
def test_update_router_set_gateway_without_snat(self):
- router = self.create_router(data_utils.rand_name('router-'))
+ router = self._create_router(data_utils.rand_name('router-'))
self.admin_client.update_router_with_snat_gw_info(
router['id'],
external_gateway_info={
@@ -209,7 +221,7 @@
@test.attr(type='smoke')
def test_update_router_unset_gateway(self):
- router = self.create_router(
+ router = self._create_router(
data_utils.rand_name('router-'),
external_network_id=CONF.network.public_network_id)
self.client.update_router(router['id'], external_gateway_info={})
@@ -223,7 +235,7 @@
@test.requires_ext(extension='ext-gw-mode', service='network')
@test.attr(type='smoke')
def test_update_router_reset_gateway_without_snat(self):
- router = self.create_router(
+ router = self._create_router(
data_utils.rand_name('router-'),
external_network_id=CONF.network.public_network_id)
self.admin_client.update_router_with_snat_gw_info(
@@ -244,7 +256,8 @@
self.name = self.network['name']
self.subnet = self.create_subnet(self.network)
# Add router interface with subnet id
- self.router = self.create_router(data_utils.rand_name('router-'), True)
+ self.router = self._create_router(
+ data_utils.rand_name('router-'), True)
self.create_router_interface(self.router['id'], self.subnet['id'])
self.addCleanup(
self._delete_extra_routes,
@@ -259,7 +272,7 @@
@test.attr(type='smoke')
def test_update_router_admin_state(self):
- self.router = self.create_router(data_utils.rand_name('router-'))
+ self.router = self._create_router(data_utils.rand_name('router-'))
self.assertFalse(self.router['admin_state_up'])
# Update router admin state
resp, update_body = self.client.update_router(self.router['id'],
@@ -275,7 +288,7 @@
network = self.create_network()
subnet01 = self.create_subnet(network)
subnet02 = self.create_subnet(network)
- router = self.create_router(data_utils.rand_name('router-'))
+ router = self._create_router(data_utils.rand_name('router-'))
interface01 = self._add_router_interface_with_subnet_id(router['id'],
subnet01['id'])
self._verify_router_interface(router['id'], subnet01['id'],
diff --git a/tempest/api/network/test_routers_negative.py b/tempest/api/network/test_routers_negative.py
index e6ad4de..91ab9d6 100644
--- a/tempest/api/network/test_routers_negative.py
+++ b/tempest/api/network/test_routers_negative.py
@@ -23,6 +23,7 @@
_interface = 'json'
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(RoutersNegativeTest, cls).setUpClass()
if not test.is_extension_enabled('router', 'network'):
diff --git a/tempest/api/network/test_vpnaas_extensions.py b/tempest/api/network/test_vpnaas_extensions.py
index 78bc80a..f64bd33 100644
--- a/tempest/api/network/test_vpnaas_extensions.py
+++ b/tempest/api/network/test_vpnaas_extensions.py
@@ -37,6 +37,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
if not test.is_extension_enabled('vpnaas', 'network'):
msg = "vpnaas extension not enabled."
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index a3098a5..d919245 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -82,7 +82,8 @@
# Set a quota of 20 bytes on the user's account before each test
headers = {"X-Account-Meta-Quota-Bytes": "20"}
- self.os.custom_account_client.request("POST", "", headers, "")
+ self.os.custom_account_client.request("POST", url="", headers=headers,
+ body="")
def tearDown(self):
# Set the reselleradmin auth in headers for next custom_account_client
@@ -94,7 +95,8 @@
# remove the quota from the container
headers = {"X-Remove-Account-Meta-Quota-Bytes": "x"}
- self.os.custom_account_client.request("POST", "", headers, "")
+ self.os.custom_account_client.request("POST", url="", headers=headers,
+ body="")
super(AccountQuotasTest, self).tearDown()
@classmethod
@@ -135,8 +137,9 @@
)
headers = {"X-Account-Meta-Quota-Bytes": quota}
- resp, _ = self.os.custom_account_client.request("POST", "",
- headers, "")
+ resp, _ = self.os.custom_account_client.request("POST", url="",
+ headers=headers,
+ body="")
self.assertEqual(resp["status"], "204")
self.assertHeaders(resp, 'Account', 'POST')
diff --git a/tempest/api/object_storage/test_account_quotas_negative.py b/tempest/api/object_storage/test_account_quotas_negative.py
index 7648ea1..29afebc 100644
--- a/tempest/api/object_storage/test_account_quotas_negative.py
+++ b/tempest/api/object_storage/test_account_quotas_negative.py
@@ -81,7 +81,8 @@
# Set a quota of 20 bytes on the user's account before each test
headers = {"X-Account-Meta-Quota-Bytes": "20"}
- self.os.custom_account_client.request("POST", "", headers, "")
+ self.os.custom_account_client.request("POST", url="", headers=headers,
+ body="")
def tearDown(self):
# Set the reselleradmin auth in headers for next custom_account_client
@@ -93,7 +94,8 @@
# remove the quota from the container
headers = {"X-Remove-Account-Meta-Quota-Bytes": "x"}
- self.os.custom_account_client.request("POST", "", headers, "")
+ self.os.custom_account_client.request("POST", url="", headers=headers,
+ body="")
super(AccountQuotasNegativeTest, self).tearDown()
@classmethod
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 91df292..b057698 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -14,6 +14,7 @@
# under the License.
import hashlib
+import re
from six import moves
from tempest.api.object_storage import base
@@ -35,6 +36,29 @@
cls.delete_containers(cls.containers)
super(ObjectTest, cls).tearDownClass()
+ def _create_object(self, metadata=None):
+ # setup object
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ self.object_client.create_object(self.container_name,
+ object_name, data, metadata=metadata)
+
+ return object_name, data
+
+ def _upload_segments(self):
+ # create object
+ object_name = data_utils.rand_name(name='LObject')
+ data = data_utils.arbitrary_string()
+ segments = 10
+ data_segments = [data + str(i) for i in moves.xrange(segments)]
+ # uploading segments
+ for i in moves.xrange(segments):
+ resp, _ = self.object_client.create_object_segments(
+ self.container_name, object_name, i, data_segments[i])
+ self.assertEqual(resp['status'], '201')
+
+ return object_name, data_segments
+
@test.attr(type='smoke')
def test_create_object(self):
# create object
@@ -64,32 +88,220 @@
self.assertHeaders(resp, 'Object', 'DELETE')
@test.attr(type='smoke')
- def test_object_metadata(self):
- # add metadata to storage object, test if metadata is retrievable
+ def test_update_object_metadata(self):
+ # update object metadata
+ object_name, data = self._create_object()
- # create Object
- object_name = data_utils.rand_name(name='TestObject')
- data = data_utils.arbitrary_string()
- resp, _ = self.object_client.create_object(self.container_name,
- object_name, data)
- # set object metadata
- meta_key = data_utils.rand_name(name='test-')
- meta_value = data_utils.rand_name(name='MetaValue-')
- orig_metadata = {meta_key: meta_value}
+ metadata = {'X-Object-Meta-test-meta': 'Meta'}
resp, _ = self.object_client.update_object_metadata(
- self.container_name, object_name, orig_metadata)
+ self.container_name,
+ object_name,
+ metadata,
+ metadata_prefix='')
self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
self.assertHeaders(resp, 'Object', 'POST')
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertIn('x-object-meta-test-meta', resp)
+ self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
+
+ def test_update_object_metadata_with_remove_metadata(self):
+ # update object metadata with remove metadata
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=create_metadata)
+
+ update_metadata = {'X-Remove-Object-Meta-test-meta1': 'Meta1'}
+ resp, _ = self.object_client.update_object_metadata(
+ self.container_name,
+ object_name,
+ update_metadata,
+ metadata_prefix='')
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'POST')
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertNotIn('x-object-meta-test-meta1', resp)
+
+ @test.attr(type='smoke')
+ def test_update_object_metadata_with_create_and_remove_metadata(self):
+ # creation and deletion of metadata with one request
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=create_metadata)
+
+ update_metadata = {'X-Object-Meta-test-meta2': 'Meta2',
+ 'X-Remove-Object-Meta-test-meta1': 'Meta1'}
+ resp, _ = self.object_client.update_object_metadata(
+ self.container_name,
+ object_name,
+ update_metadata,
+ metadata_prefix='')
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'POST')
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertNotIn('x-object-meta-test-meta1', resp)
+ self.assertIn('x-object-meta-test-meta2', resp)
+ self.assertEqual(resp['x-object-meta-test-meta2'], 'Meta2')
+
+ @test.attr(type='smoke')
+ def test_update_object_metadata_with_x_object_manifest(self):
+ # update object metadata with x_object_manifest
+
+ # uploading segments
+ object_name, data_segments = self._upload_segments()
+ # creating a manifest file
+ data_empty = ''
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data_empty,
+ metadata=None)
+ object_prefix = '%s/%s' % (self.container_name, object_name)
+ update_metadata = {'X-Object-Manifest': object_prefix}
+ resp, _ = self.object_client.update_object_metadata(
+ self.container_name,
+ object_name,
+ update_metadata,
+ metadata_prefix='')
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'POST')
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertIn('x-object-manifest', resp)
+ self.assertNotEqual(len(resp['x-object-manifest']), 0)
+
+ def test_update_object_metadata_with_x_object_metakey(self):
+ # update object metadata with a blenk value of metadata
+ object_name, data = self._create_object()
+
+ update_metadata = {'X-Object-Meta-test-meta': ''}
+ resp, _ = self.object_client.update_object_metadata(
+ self.container_name,
+ object_name,
+ update_metadata,
+ metadata_prefix='')
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'POST')
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertIn('x-object-meta-test-meta', resp)
+ self.assertEqual(resp['x-object-meta-test-meta'], '')
+
+ @test.attr(type='smoke')
+ def test_update_object_metadata_with_x_remove_object_metakey(self):
+ # update object metadata with a blank value of remove metadata
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ create_metadata = {'X-Object-Meta-test-meta': 'Meta'}
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=create_metadata)
+
+ update_metadata = {'X-Remove-Object-Meta-test-meta': ''}
+ resp, _ = self.object_client.update_object_metadata(
+ self.container_name,
+ object_name,
+ update_metadata,
+ metadata_prefix='')
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'POST')
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertNotIn('x-object-meta-test-meta', resp)
+
+ @test.attr(type='smoke')
+ def test_list_object_metadata(self):
# get object metadata
- resp, resp_metadata = self.object_client.list_object_metadata(
- self.container_name, object_name)
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ metadata = {'X-Object-Meta-test-meta': 'Meta'}
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=metadata)
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
self.assertHeaders(resp, 'Object', 'HEAD')
+ self.assertIn('x-object-meta-test-meta', resp)
+ self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
- actual_meta_key = 'x-object-meta-' + meta_key
- self.assertIn(actual_meta_key, resp)
- self.assertEqual(resp[actual_meta_key], meta_value)
+ @test.attr(type='smoke')
+ def test_list_no_object_metadata(self):
+ # get empty list of object metadata
+ object_name, data = self._create_object()
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'HEAD')
+ self.assertNotIn('x-object-meta-', str(resp))
+
+ @test.attr(type='smoke')
+ def test_list_object_metadata_with_x_object_manifest(self):
+ # get object metadata with x_object_manifest
+
+ # uploading segments
+ object_name, data_segments = self._upload_segments()
+ # creating a manifest file
+ object_prefix = '%s/%s' % (self.container_name, object_name)
+ metadata = {'X-Object-Manifest': object_prefix}
+ data_empty = ''
+ resp, _ = self.object_client.create_object(
+ self.container_name,
+ object_name,
+ data_empty,
+ metadata=metadata)
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+
+ # Check only the existence of common headers with custom matcher
+ self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
+ 'Object', 'HEAD'))
+ self.assertIn('x-object-manifest', resp)
+
+ # Etag value of a large object is enclosed in double-quotations.
+ # This is a special case, therefore the formats of response headers
+ # are checked without a custom matcher.
+ self.assertTrue(resp['etag'].startswith('\"'))
+ self.assertTrue(resp['etag'].endswith('\"'))
+ self.assertTrue(resp['etag'].strip('\"').isalnum())
+ self.assertTrue(re.match("^\d+\.?\d*\Z", resp['x-timestamp']))
+ self.assertNotEqual(len(resp['content-type']), 0)
+ self.assertTrue(re.match("^tx[0-9a-f]*-[0-9a-f]*$",
+ resp['x-trans-id']))
+ self.assertNotEqual(len(resp['date']), 0)
+ self.assertEqual(resp['accept-ranges'], 'bytes')
+ self.assertEqual(resp['x-object-manifest'],
+ '%s/%s' % (self.container_name, object_name))
@test.attr(type='smoke')
def test_get_object(self):
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 1832259..7656ff3 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -120,3 +120,20 @@
"""Return a stack output value for a given key."""
return next((o['output_value'] for o in stack['outputs']
if o['output_key'] == output_key), None)
+
+ def assert_fields_in_dict(self, obj, *fields):
+ for field in fields:
+ self.assertIn(field, obj)
+
+ def list_resources(self, stack_identifier):
+ """Get a dict mapping of resource names to types."""
+ resp, resources = self.client.list_resources(stack_identifier)
+ self.assertEqual('200', resp['status'])
+ self.assertIsInstance(resources, list)
+ for res in resources:
+ self.assert_fields_in_dict(res, 'logical_resource_id',
+ 'resource_type', 'resource_status',
+ 'updated_time')
+
+ return dict((r['resource_name'], r['resource_type'])
+ for r in resources)
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 4b1b5ef..9ef95a1 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -40,16 +40,18 @@
cls.resource_type = 'AWS::AutoScaling::LaunchConfiguration'
cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
- def assert_fields_in_dict(self, obj, *fields):
- for field in fields:
- self.assertIn(field, obj)
+ def _list_stacks(self, expected_num=None, **filter_kwargs):
+ resp, stacks = self.client.list_stacks(params=filter_kwargs)
+ self.assertEqual('200', resp['status'])
+ self.assertIsInstance(stacks, list)
+ if expected_num is not None:
+ self.assertEqual(expected_num, len(stacks))
+ return stacks
@attr(type='gate')
def test_stack_list(self):
"""Created stack should be in the list of existing stacks."""
- resp, stacks = self.client.list_stacks()
- self.assertEqual('200', resp['status'])
- self.assertIsInstance(stacks, list)
+ stacks = self._list_stacks()
stacks_names = map(lambda stack: stack['stack_name'], stacks)
self.assertIn(self.stack_name, stacks_names)
@@ -89,20 +91,8 @@
def test_list_resources(self):
"""Getting list of created resources for the stack should be possible.
"""
- resp, resources = self.client.list_resources(self.stack_identifier)
- self.assertEqual('200', resp['status'])
- self.assertIsInstance(resources, list)
- for res in resources:
- self.assert_fields_in_dict(res, 'logical_resource_id',
- 'resource_type', 'resource_status',
- 'updated_time')
-
- resources_names = map(lambda resource: resource['logical_resource_id'],
- resources)
- self.assertIn(self.resource_name, resources_names)
- resources_types = map(lambda resource: resource['resource_type'],
- resources)
- self.assertIn(self.resource_type, resources_types)
+ resources = self.list_resources(self.stack_identifier)
+ self.assertEqual({self.resource_name: self.resource_type}, resources)
@attr(type='gate')
def test_show_resource(self):
diff --git a/tempest/api/orchestration/stacks/test_server_cfn_init.py b/tempest/api/orchestration/stacks/test_server_cfn_init.py
index 5f65193..3f29269 100644
--- a/tempest/api/orchestration/stacks/test_server_cfn_init.py
+++ b/tempest/api/orchestration/stacks/test_server_cfn_init.py
@@ -93,7 +93,8 @@
try:
self.client.wait_for_resource_status(
sid, 'WaitCondition', 'CREATE_COMPLETE')
- except exceptions.TimeoutException as e:
+ except (exceptions.StackResourceBuildErrorException,
+ exceptions.TimeoutException) as e:
# attempt to log the server console to help with debugging
# the cause of the server not signalling the waitcondition
# to heat.
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index be5d76b..58da440 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -51,8 +51,7 @@
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'Test'}
# Create a volume
- resp, volume = self.client.create_volume(size=1,
- display_name=v_name,
+ resp, volume = self.client.create_volume(display_name=v_name,
metadata=metadata,
**kwargs)
self.assertEqual(200, resp.status)
diff --git a/tempest/api_schema/compute/aggregates.py b/tempest/api_schema/compute/aggregates.py
index 49793fe..a70b356 100644
--- a/tempest/api_schema/compute/aggregates.py
+++ b/tempest/api_schema/compute/aggregates.py
@@ -12,6 +12,24 @@
# License for the specific language governing permissions and limitations
# under the License.
+aggregate = {
+ 'type': 'object',
+ 'properties:': {
+ 'availability_zone': {'type': ['string', 'null']},
+ 'created_at': {'type': 'string'},
+ 'deleted': {'type': 'boolean'},
+ 'deleted_at': {'type': ['string', 'null']},
+ 'hosts': {'type': 'array'},
+ 'id': {'type': 'integer'},
+ 'metadata': {'type': 'object'},
+ 'name': {'type': 'string'},
+ 'updated_at': {'type': ['string', 'null']}
+ },
+ 'required': ['availability_zone', 'created_at', 'deleted',
+ 'deleted_at', 'hosts', 'id', 'metadata',
+ 'name', 'updated_at']
+}
+
list_aggregates = {
'status_code': [200],
'response_body': {
@@ -19,25 +37,22 @@
'properties': {
'aggregates': {
'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'availability_zone': {'type': ['string', 'null']},
- 'created_at': {'type': 'string'},
- 'deleted': {'type': 'boolean'},
- 'deleted_at': {'type': ['string', 'null']},
- 'hosts': {'type': 'array'},
- 'id': {'type': 'integer'},
- 'metadata': {'type': 'object'},
- 'name': {'type': 'string'},
- 'updated_at': {'type': ['string', 'null']}
- },
- 'required': ['availability_zone', 'created_at', 'deleted',
- 'deleted_at', 'hosts', 'id', 'metadata',
- 'name', 'updated_at']
- }
+ 'items': aggregate
}
},
'required': ['aggregates']
}
}
+
+get_aggregate = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'aggregate': aggregate
+ },
+ 'required': ['aggregate']
+ }
+}
+
+aggregate_set_metadata = get_aggregate
diff --git a/tempest/api_schema/compute/flavors.py b/tempest/api_schema/compute/flavors.py
index a6367d4..fd02780 100644
--- a/tempest/api_schema/compute/flavors.py
+++ b/tempest/api_schema/compute/flavors.py
@@ -35,3 +35,30 @@
'required': ['flavors']
}
}
+
+common_flavor_list_details = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'flavors': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'name': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'ram': {'type': 'integer'},
+ 'vcpus': {'type': 'integer'},
+ 'swap': {'type': 'integer'},
+ 'disk': {'type': 'integer'},
+ 'id': {'type': 'string'}
+ },
+ 'required': ['name', 'links', 'ram', 'vcpus',
+ 'swap', 'disk', 'id']
+ }
+ }
+ },
+ 'required': ['flavors']
+ }
+}
diff --git a/tempest/api_schema/compute/flavors_access.py b/tempest/api_schema/compute/flavors_access.py
index 152e24c..cd31b0a 100644
--- a/tempest/api_schema/compute/flavors_access.py
+++ b/tempest/api_schema/compute/flavors_access.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-list_flavor_access = {
+add_remove_list_flavor_access = {
'status_code': [200],
'response_body': {
'type': 'object',
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
new file mode 100644
index 0000000..0071845
--- /dev/null
+++ b/tempest/api_schema/compute/servers.py
@@ -0,0 +1,24 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+get_password = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'password': {'type': 'string'}
+ },
+ 'required': ['password']
+ }
+}
diff --git a/tempest/api_schema/compute/services.py b/tempest/api_schema/compute/services.py
index 4793f5a..4c58013 100644
--- a/tempest/api_schema/compute/services.py
+++ b/tempest/api_schema/compute/services.py
@@ -42,3 +42,22 @@
'required': ['services']
}
}
+
+enable_service = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'service': {
+ 'type': 'object',
+ 'properties': {
+ 'status': {'type': 'string'},
+ 'binary': {'type': 'string'},
+ 'host': {'type': 'string'}
+ },
+ 'required': ['status', 'binary', 'host']
+ }
+ },
+ 'required': ['service']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/fixed_ips.py b/tempest/api_schema/compute/v2/fixed_ips.py
index a6add04..446633f 100644
--- a/tempest/api_schema/compute/v2/fixed_ips.py
+++ b/tempest/api_schema/compute/v2/fixed_ips.py
@@ -34,3 +34,8 @@
'required': ['fixed_ip']
}
}
+
+fixed_ip_action = {
+ 'status_code': [202],
+ 'response_body': {'type': 'string'}
+}
diff --git a/tempest/api_schema/compute/v2/flavors.py b/tempest/api_schema/compute/v2/flavors.py
new file mode 100644
index 0000000..999ca19
--- /dev/null
+++ b/tempest/api_schema/compute/v2/flavors.py
@@ -0,0 +1,33 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import flavors
+
+list_flavors_details = copy.deepcopy(flavors.common_flavor_list_details)
+
+# 'swap' attributes comes as integre value but if it is empty it comes as "".
+# So defining type of as string and integer.
+list_flavors_details['response_body']['properties']['flavors']['items'][
+ 'properties']['swap'] = {'type': ['string', 'integer']}
+
+# Defining extra attributes for V2 flavor schema
+list_flavors_details['response_body']['properties']['flavors']['items'][
+ 'properties'].update({'OS-FLV-DISABLED:disabled': {'type': 'boolean'},
+ 'os-flavor-access:is_public': {'type': 'boolean'},
+ 'rxtx_factor': {'type': 'number'},
+ 'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}})
+# 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and 'OS-FLV-EXT-DATA'
+# are API extensions. So they are not 'required'.
diff --git a/tempest/api_schema/compute/v2/hosts.py b/tempest/api_schema/compute/v2/hosts.py
new file mode 100644
index 0000000..cd6bd7b
--- /dev/null
+++ b/tempest/api_schema/compute/v2/hosts.py
@@ -0,0 +1,43 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+body = {
+ 'type': 'object',
+ 'properties': {
+ 'host': {'type': 'string'},
+ 'power_action': {'enum': ['startup']}
+ },
+ 'required': ['host', 'power_action']
+}
+
+startup_host = {
+ 'status_code': [200],
+ 'response_body': body
+}
+
+# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
+shutdown_host = copy.deepcopy(startup_host)
+
+shutdown_host['response_body']['properties']['power_action'] = {
+ 'enum': ['shutdown']
+}
+
+# The 'power_action' attribute of 'reboot_host' API is 'reboot'
+reboot_host = copy.deepcopy(startup_host)
+
+reboot_host['response_body']['properties']['power_action'] = {
+ 'enum': ['reboot']
+}
diff --git a/tempest/api_schema/compute/v2/quotas.py b/tempest/api_schema/compute/v2/quotas.py
index d69cbd7..17dc4dd 100644
--- a/tempest/api_schema/compute/v2/quotas.py
+++ b/tempest/api_schema/compute/v2/quotas.py
@@ -45,3 +45,7 @@
'required': ['quota_set']
}
}
+
+delete_quota = {
+ 'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/security_groups.py b/tempest/api_schema/compute/v2/security_groups.py
index 68b65b4..6dd44cd 100644
--- a/tempest/api_schema/compute/v2/security_groups.py
+++ b/tempest/api_schema/compute/v2/security_groups.py
@@ -36,3 +36,41 @@
'required': ['security_groups']
}
}
+
+create_security_group_rule = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'security_group_rule': {
+ 'type': 'object',
+ 'properties': {
+ 'from_port': {'type': 'integer'},
+ 'to_port': {'type': 'integer'},
+ 'group': {'type': 'object'},
+ 'ip_protocol': {'type': 'string'},
+ # 'parent_group_id' can be UUID so defining it
+ # as 'string' also.
+ 'parent_group_id': {'type': ['integer', 'string']},
+ 'id': {'type': ['integer', 'string']},
+ 'ip_range': {
+ 'type': 'object',
+ 'properties': {
+ 'cidr': {'type': 'string'}
+ }
+ # When optional argument is provided in request body
+ # like 'group_id' then, attribute 'cidr' does not
+ # comes in response body. So it is not 'required'.
+ }
+ },
+ 'required': ['from_port', 'to_port', 'group', 'ip_protocol',
+ 'parent_group_id', 'id', 'ip_range']
+ }
+ },
+ 'required': ['security_group_rule']
+ }
+}
+
+delete_security_group_rule = {
+ 'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/volumes.py b/tempest/api_schema/compute/v2/volumes.py
index 9cfd7e3..84a659c 100644
--- a/tempest/api_schema/compute/v2/volumes.py
+++ b/tempest/api_schema/compute/v2/volumes.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-get_volume = {
+create_get_volume = {
'status_code': [200],
'response_body': {
'type': 'object',
@@ -108,3 +108,7 @@
'required': ['volumes']
}
}
+
+delete_volume = {
+ 'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v3/flavors.py b/tempest/api_schema/compute/v3/flavors.py
new file mode 100644
index 0000000..542d2b1
--- /dev/null
+++ b/tempest/api_schema/compute/v3/flavors.py
@@ -0,0 +1,33 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import flavors
+
+list_flavors_details = copy.deepcopy(flavors.common_flavor_list_details)
+
+# NOTE- In v3 API, 'swap' comes as '0' not empty string '""'
+# (In V2 API, it comes as empty string) So leaving 'swap'as integer type only.
+
+# Defining extra attributes for V3 flavor schema
+list_flavors_details['response_body']['properties']['flavors']['items'][
+ 'properties'].update({'disabled': {'type': 'boolean'},
+ 'ephemeral': {'type': 'integer'},
+ 'flavor-access:is_public': {'type': 'boolean'},
+ 'os-flavor-rxtx:rxtx_factor': {'type': 'number'}})
+# 'flavor-access' and 'os-flavor-rxtx' are API extensions.
+# So they are not 'required'.
+list_flavors_details['response_body']['properties']['flavors']['items'][
+ 'required'].extend(['disabled', 'ephemeral'])
diff --git a/tempest/api_schema/compute/v3/hosts.py b/tempest/api_schema/compute/v3/hosts.py
new file mode 100644
index 0000000..2cf8f9b
--- /dev/null
+++ b/tempest/api_schema/compute/v3/hosts.py
@@ -0,0 +1,41 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+from tempest.api_schema.compute.v2 import hosts
+
+startup_host = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'host': hosts.body
+ },
+ 'required': ['host']
+ }
+}
+
+# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
+shutdown_host = copy.deepcopy(startup_host)
+
+shutdown_host['response_body']['properties']['power_action'] = {
+ 'enum': ['shutdown']
+}
+
+# The 'power_action' attribute of 'reboot_host' API is 'reboot'
+reboot_host = copy.deepcopy(startup_host)
+
+reboot_host['response_body']['properties']['power_action'] = {
+ 'enum': ['reboot']
+}
diff --git a/tempest/api_schema/compute/v3/quotas.py b/tempest/api_schema/compute/v3/quotas.py
index 1b9989d..aec1e80 100644
--- a/tempest/api_schema/compute/v3/quotas.py
+++ b/tempest/api_schema/compute/v3/quotas.py
@@ -40,3 +40,46 @@
'required': ['quota_set']
}
}
+
+quota_common_info = {
+ 'type': 'object',
+ 'properties': {
+ 'reserved': {'type': 'integer'},
+ 'limit': {'type': 'integer'},
+ 'in_use': {'type': 'integer'}
+ },
+ 'required': ['reserved', 'limit', 'in_use']
+}
+
+quota_set_detail = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'quota_set': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'instances': quota_common_info,
+ 'cores': quota_common_info,
+ 'ram': quota_common_info,
+ 'floating_ips': quota_common_info,
+ 'fixed_ips': quota_common_info,
+ 'metadata_items': quota_common_info,
+ 'key_pairs': quota_common_info,
+ 'security_groups': quota_common_info,
+ 'security_group_rules': quota_common_info
+ },
+ 'required': ['id', 'instances', 'cores', 'ram',
+ 'floating_ips', 'fixed_ips',
+ 'metadata_items', 'key_pairs',
+ 'security_groups', 'security_group_rules']
+ }
+ },
+ 'required': ['quota_set']
+ }
+}
+
+delete_quota = {
+ 'status_code': [204]
+}
diff --git a/tempest/cli/simple_read_only/test_sahara.py b/tempest/cli/simple_read_only/test_sahara.py
index cd819a4..36cc324 100644
--- a/tempest/cli/simple_read_only/test_sahara.py
+++ b/tempest/cli/simple_read_only/test_sahara.py
@@ -48,23 +48,93 @@
def test_sahara_plugins_list(self):
plugins = self.parser.listing(self.sahara('plugin-list'))
- self.assertTableStruct(plugins, ['name', 'versions', 'title'])
+ self.assertTableStruct(plugins, [
+ 'name',
+ 'versions',
+ 'title'
+ ])
def test_sahara_plugins_show(self):
- plugin = self.parser.listing(self.sahara('plugin-show',
- params='--name vanilla'))
- self.assertTableStruct(plugin, ['Property', 'Value'])
+ result = self.sahara('plugin-show', params='--name vanilla')
+ plugin = self.parser.listing(result)
+ self.assertTableStruct(plugin, [
+ 'Property',
+ 'Value'
+ ])
def test_sahara_node_group_template_list(self):
- plugins = self.parser.listing(self.sahara('node-group-template-list'))
- self.assertTableStruct(plugins, ['name', 'id', 'plugin_name',
- 'node_processes', 'description'])
+ result = self.sahara('node-group-template-list')
+ node_group_templates = self.parser.listing(result)
+ self.assertTableStruct(node_group_templates, [
+ 'name',
+ 'id',
+ 'plugin_name',
+ 'node_processes',
+ 'description'
+ ])
def test_sahara_cluster_template_list(self):
- plugins = self.parser.listing(self.sahara('cluster-template-list'))
- self.assertTableStruct(plugins, ['name', 'id', 'plugin_name',
- 'node_groups', 'description'])
+ result = self.sahara('cluster-template-list')
+ cluster_templates = self.parser.listing(result)
+ self.assertTableStruct(cluster_templates, [
+ 'name',
+ 'id',
+ 'plugin_name',
+ 'node_groups',
+ 'description'
+ ])
def test_sahara_cluster_list(self):
- plugins = self.parser.listing(self.sahara('cluster-list'))
- self.assertTableStruct(plugins, ['name', 'id', 'status', 'node_count'])
+ result = self.sahara('cluster-list')
+ clusters = self.parser.listing(result)
+ self.assertTableStruct(clusters, [
+ 'name',
+ 'id',
+ 'status',
+ 'node_count'
+ ])
+
+ def test_sahara_data_source_list(self):
+ result = self.sahara('data-source-list')
+ data_sources = self.parser.listing(result)
+ self.assertTableStruct(data_sources, [
+ 'name',
+ 'id',
+ 'type',
+ 'description'
+ ])
+
+ def test_sahara_job_binary_data_list(self):
+ result = self.sahara('job-binary-data-list')
+ job_binary_data_list = self.parser.listing(result)
+ self.assertTableStruct(job_binary_data_list, [
+ 'id',
+ 'name'
+ ])
+
+ def test_sahara_job_binary_list(self):
+ result = self.sahara('job-binary-list')
+ job_binaries = self.parser.listing(result)
+ self.assertTableStruct(job_binaries, [
+ 'id',
+ 'name',
+ 'description'
+ ])
+
+ def test_sahara_job_template_list(self):
+ result = self.sahara('job-template-list')
+ job_templates = self.parser.listing(result)
+ self.assertTableStruct(job_templates, [
+ 'id',
+ 'name',
+ 'description'
+ ])
+
+ def test_sahara_job_list(self):
+ result = self.sahara('job-list')
+ jobs = self.parser.listing(result)
+ self.assertTableStruct(jobs, [
+ 'id',
+ 'cluster_id',
+ 'status'
+ ])
diff --git a/tempest/clients.py b/tempest/clients.py
index 7ebd983..10c0014 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -13,15 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Default client libs
-import cinderclient.client
-import glanceclient
-import heatclient.client
import keystoneclient.exceptions
import keystoneclient.v2_0.client
-import neutronclient.v2_0.client
-import novaclient.client
-import swiftclient
from tempest.common.rest_client import NegativeRestClient
from tempest import config
@@ -52,6 +45,8 @@
InterfacesClientJSON
from tempest.services.compute.json.keypairs_client import KeyPairsClientJSON
from tempest.services.compute.json.limits_client import LimitsClientJSON
+from tempest.services.compute.json.migrations_client import \
+ MigrationsClientJSON
from tempest.services.compute.json.quotas_client import QuotasClientJSON
from tempest.services.compute.json.security_groups_client import \
SecurityGroupsClientJSON
@@ -78,6 +73,8 @@
InterfacesV3ClientJSON
from tempest.services.compute.v3.json.keypairs_client import \
KeyPairsV3ClientJSON
+from tempest.services.compute.v3.json.migration_client import \
+ MigrationsV3ClientJSON
from tempest.services.compute.v3.json.quotas_client import \
QuotasV3ClientJSON
from tempest.services.compute.v3.json.servers_client import \
@@ -325,6 +322,8 @@
self.tenant_usages_client = TenantUsagesClientJSON(
self.auth_provider)
self.version_v3_client = VersionV3ClientJSON(self.auth_provider)
+ self.migrations_v3_client = MigrationsV3ClientJSON(
+ self.auth_provider)
self.policy_client = PolicyClientJSON(self.auth_provider)
self.hosts_client = HostsClientJSON(self.auth_provider)
self.hypervisor_v3_client = HypervisorV3ClientJSON(
@@ -381,6 +380,7 @@
AccountClientCustomizedHeader(self.auth_provider)
self.data_processing_client = DataProcessingClient(
self.auth_provider)
+ self.migrations_client = MigrationsClientJSON(self.auth_provider)
class AltManager(Manager):
@@ -456,6 +456,8 @@
NOVACLIENT_VERSION = '2'
CINDERCLIENT_VERSION = '1'
HEATCLIENT_VERSION = '1'
+ IRONICCLIENT_VERSION = '1'
+ SAHARACLIENT_VERSION = '1.1'
def __init__(self, username, password, tenant_name):
# FIXME(andreaf) Auth provider for client_type 'official' is
@@ -465,6 +467,7 @@
# super cares for credentials validation
super(OfficialClientManager, self).__init__(
username=username, password=password, tenant_name=tenant_name)
+ self.baremetal_client = self._get_baremetal_client()
self.compute_client = self._get_compute_client(username,
password,
tenant_name)
@@ -484,11 +487,34 @@
username,
password,
tenant_name)
+ self.data_processing_client = self._get_data_processing_client(
+ username,
+ password,
+ tenant_name)
+
+ def _get_roles(self):
+ keystone_admin = self._get_identity_client(
+ CONF.identity.admin_username,
+ CONF.identity.admin_password,
+ CONF.identity.admin_tenant_name)
+
+ username = self.credentials['username']
+ tenant_name = self.credentials['tenant_name']
+ user_id = keystone_admin.users.find(name=username).id
+ tenant_id = keystone_admin.tenants.find(name=tenant_name).id
+
+ roles = keystone_admin.roles.roles_for_user(
+ user=user_id, tenant=tenant_id)
+
+ return [r.name for r in roles]
def _get_compute_client(self, username, password, tenant_name):
# Novaclient will not execute operations for anyone but the
# identified user, so a new client needs to be created for
# each user that operations need to be performed for.
+ if not CONF.service_available.nova:
+ return None
+ import novaclient.client
self._validate_credentials(username, password, tenant_name)
auth_url = CONF.identity.uri
@@ -510,6 +536,9 @@
http_log_debug=True)
def _get_image_client(self):
+ if not CONF.service_available.glance:
+ return None
+ import glanceclient
token = self.identity_client.auth_token
region = CONF.identity.region
endpoint_type = CONF.image.endpoint_type
@@ -521,9 +550,13 @@
insecure=dscv)
def _get_volume_client(self, username, password, tenant_name):
+ if not CONF.service_available.cinder:
+ return None
+ import cinderclient.client
auth_url = CONF.identity.uri
region = CONF.identity.region
endpoint_type = CONF.volume.endpoint_type
+ dscv = CONF.identity.disable_ssl_certificate_validation
return cinderclient.client.Client(self.CINDERCLIENT_VERSION,
username,
password,
@@ -531,9 +564,13 @@
auth_url,
region_name=region,
endpoint_type=endpoint_type,
+ insecure=dscv,
http_log_debug=True)
def _get_object_storage_client(self, username, password, tenant_name):
+ if not CONF.service_available.swift:
+ return None
+ import swiftclient
auth_url = CONF.identity.uri
# add current tenant to swift operator role group.
keystone_admin = self._get_identity_client(
@@ -563,6 +600,9 @@
def _get_orchestration_client(self, username=None, password=None,
tenant_name=None):
+ if not CONF.service_available.heat:
+ return None
+ import heatclient.client
if not username:
username = CONF.identity.admin_username
if not password:
@@ -606,6 +646,37 @@
auth_url=auth_url,
insecure=dscv)
+ def _get_baremetal_client(self):
+ # ironic client is currently intended to by used by admin users
+ if not CONF.service_available.ironic:
+ return None
+ import ironicclient.client
+ roles = self._get_roles()
+ if CONF.identity.admin_role not in roles:
+ return None
+
+ auth_url = CONF.identity.uri
+ api_version = self.IRONICCLIENT_VERSION
+ insecure = CONF.identity.disable_ssl_certificate_validation
+ service_type = CONF.baremetal.catalog_type
+ endpoint_type = CONF.baremetal.endpoint_type
+ creds = {
+ 'os_username': self.credentials['username'],
+ 'os_password': self.credentials['password'],
+ 'os_tenant_name': self.credentials['tenant_name']
+ }
+
+ try:
+ return ironicclient.client.get_client(
+ api_version=api_version,
+ os_auth_url=auth_url,
+ insecure=insecure,
+ os_service_type=service_type,
+ os_endpoint_type=endpoint_type,
+ **creds)
+ except keystoneclient.exceptions.EndpointNotFound:
+ return None
+
def _get_network_client(self):
# The intended configuration is for the network client to have
# admin privileges and indicate for whom resources are being
@@ -613,6 +684,9 @@
# preferable to authenticating as a specific user because
# working with certain resources (public routers and networks)
# often requires admin privileges anyway.
+ if not CONF.service_available.neutron:
+ return None
+ import neutronclient.v2_0.client
username = CONF.identity.admin_username
password = CONF.identity.admin_password
tenant_name = CONF.identity.admin_tenant_name
@@ -629,3 +703,25 @@
endpoint_type=endpoint_type,
auth_url=auth_url,
insecure=dscv)
+
+ def _get_data_processing_client(self, username, password, tenant_name):
+ if not CONF.service_available.sahara:
+ # Sahara isn't available
+ return None
+
+ import saharaclient.client
+
+ self._validate_credentials(username, password, tenant_name)
+
+ endpoint_type = CONF.data_processing.endpoint_type
+ catalog_type = CONF.data_processing.catalog_type
+ auth_url = CONF.identity.uri
+
+ client = saharaclient.client.Client(self.SAHARACLIENT_VERSION,
+ username, password,
+ project_name=tenant_name,
+ endpoint_type=endpoint_type,
+ service_type=catalog_type,
+ auth_url=auth_url)
+
+ return client
diff --git a/tempest/common/debug.py b/tempest/common/debug.py
index 6a496c2..228be7a 100644
--- a/tempest/common/debug.py
+++ b/tempest/common/debug.py
@@ -20,7 +20,7 @@
CONF = config.CONF
LOG = logging.getLogger(__name__)
-tables = ['filter', 'nat', 'mangle']
+TABLES = ['filter', 'nat', 'mangle']
def log_ip_ns():
@@ -28,14 +28,14 @@
return
LOG.info("Host Addr:\n" + commands.ip_addr_raw())
LOG.info("Host Route:\n" + commands.ip_route_raw())
- for table in ['filter', 'nat', 'mangle']:
+ for table in TABLES:
LOG.info('Host %s table:\n%s', table, commands.iptables_raw(table))
ns_list = commands.ip_ns_list()
LOG.info("Host ns list" + str(ns_list))
for ns in ns_list:
LOG.info("ns(%s) Addr:\n%s", ns, commands.ip_ns_addr(ns))
LOG.info("ns(%s) Route:\n%s", ns, commands.ip_ns_route(ns))
- for table in ['filter', 'nat', 'mangle']:
+ for table in TABLES:
LOG.info('ns(%s) table(%s):\n%s', ns, table,
commands.iptables_ns(ns, table))
diff --git a/tempest/common/generator/base_generator.py b/tempest/common/generator/base_generator.py
index 95d50e2..87d65d0 100644
--- a/tempest/common/generator/base_generator.py
+++ b/tempest/common/generator/base_generator.py
@@ -87,12 +87,6 @@
"additionalProperties": False,
}
- def __new__(cls, *args, **kwargs):
- if not cls._instance:
- cls._instance = super(BasicGeneratorSet, cls).__new__(cls, *args,
- **kwargs)
- return cls._instance
-
def __init__(self):
self.types_dict = {}
for m in dir(self):
diff --git a/tempest/common/glance_http.py b/tempest/common/glance_http.py
index b4ba933..9358851 100644
--- a/tempest/common/glance_http.py
+++ b/tempest/common/glance_http.py
@@ -19,6 +19,7 @@
import hashlib
import httplib
import json
+import OpenSSL
import posixpath
import re
from six import moves
@@ -27,14 +28,6 @@
import struct
import urlparse
-
-# Python 2.5 compat fix
-if not hasattr(urlparse, 'parse_qsl'):
- import cgi
- urlparse.parse_qsl = cgi.parse_qsl
-
-import OpenSSL
-
from tempest import exceptions as exc
from tempest.openstack.common import log as logging
@@ -50,7 +43,7 @@
self.auth_provider = auth_provider
self.filters = filters
self.endpoint = auth_provider.base_url(filters)
- endpoint_parts = self.parse_endpoint(self.endpoint)
+ endpoint_parts = urlparse.urlparse(self.endpoint)
self.endpoint_scheme = endpoint_parts.scheme
self.endpoint_hostname = endpoint_parts.hostname
self.endpoint_port = endpoint_parts.port
@@ -61,10 +54,6 @@
self.endpoint_scheme, **kwargs)
@staticmethod
- def parse_endpoint(endpoint):
- return urlparse.urlparse(endpoint)
-
- @staticmethod
def get_connection_class(scheme):
if scheme == 'https':
return VerifiedHTTPSConnection
@@ -107,7 +96,7 @@
conn = self.get_connection()
try:
- url_parts = self.parse_endpoint(url)
+ url_parts = urlparse.urlparse(url)
conn_url = posixpath.normpath(url_parts.path)
LOG.debug('Actual Path: {path}'.format(path=conn_url))
if kwargs['headers'].get('Transfer-Encoding') == 'chunked':
@@ -134,7 +123,6 @@
raise exc.TimeoutException(message)
body_iter = ResponseBodyIterator(resp)
-
# Read body into string if it isn't obviously image data
if resp.getheader('content-type', None) != 'application/octet-stream':
body_str = ''.join([body_chunk for body_chunk in body_iter])
@@ -178,7 +166,7 @@
resp, body_iter = self._http_request(url, method, **kwargs)
- if 'application/json' in resp.getheader('content-type', None):
+ if 'application/json' in resp.getheader('content-type', ''):
body = ''.join([chunk for chunk in body_iter])
try:
body = json.loads(body)
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 830968e..0d32f41 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -197,26 +197,26 @@
details = pattern.format(read_code, expected_code)
raise exceptions.InvalidHttpSuccessCode(details)
- def post(self, url, body, headers=None):
- return self.request('POST', url, headers, body)
+ def post(self, url, body, headers=None, extra_headers=False):
+ return self.request('POST', url, extra_headers, headers, body)
- def get(self, url, headers=None):
- return self.request('GET', url, headers)
+ def get(self, url, headers=None, extra_headers=False):
+ return self.request('GET', url, extra_headers, headers)
- def delete(self, url, headers=None, body=None):
- return self.request('DELETE', url, headers, body)
+ def delete(self, url, headers=None, body=None, extra_headers=False):
+ return self.request('DELETE', url, extra_headers, headers, body)
- def patch(self, url, body, headers=None):
- return self.request('PATCH', url, headers, body)
+ def patch(self, url, body, headers=None, extra_headers=False):
+ return self.request('PATCH', url, extra_headers, headers, body)
- def put(self, url, body, headers=None):
- return self.request('PUT', url, headers, body)
+ def put(self, url, body, headers=None, extra_headers=False):
+ return self.request('PUT', url, extra_headers, headers, body)
- def head(self, url, headers=None):
- return self.request('HEAD', url, headers)
+ def head(self, url, headers=None, extra_headers=False):
+ return self.request('HEAD', url, extra_headers, headers)
- def copy(self, url, headers=None):
- return self.request('COPY', url, headers)
+ def copy(self, url, headers=None, extra_headers=False):
+ return self.request('COPY', url, extra_headers, headers)
def get_versions(self):
resp, body = self.get('')
@@ -418,13 +418,22 @@
return resp, resp_body
- def request(self, method, url, headers=None, body=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None):
+ # if extra_headers is True
+ # default headers would be added to headers
retry = 0
if headers is None:
# NOTE(vponomaryov): if some client do not need headers,
# it should explicitly pass empty dict
headers = self.get_headers()
+ elif extra_headers:
+ try:
+ headers = headers.copy()
+ headers.update(self.get_headers())
+ except (ValueError, TypeError):
+ headers = self.get_headers()
resp, resp_body = self._request(method, url,
headers=headers, body=body)
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 00e5e0d..95b6833 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -112,3 +112,8 @@
def turn_nic_on(self, nic):
cmd = "sudo /bin/ip link set {nic} up".format(nic=nic)
return self.exec_command(cmd)
+
+ def get_pids(self, pr_name):
+ # Get pid(s) of a process/program
+ cmd = "ps -ef | grep %s | grep -v 'grep' | awk {'print $1'}" % pr_name
+ return self.exec_command(cmd).split('\n')
diff --git a/tempest/config.py b/tempest/config.py
index 0212d8a..b9fe572 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -126,7 +126,7 @@
"OpenStack Identity API admin credentials are known."),
cfg.StrOpt('image_ref',
default="{$IMAGE_ID}",
- help="Valid secondary image reference to be used in tests."),
+ help="Valid primary image reference to be used in tests."),
cfg.StrOpt('image_ref_alt',
default="{$IMAGE_ID_ALT}",
help="Valid secondary image reference to be used in tests."),
@@ -441,6 +441,9 @@
cfg.StrOpt('disk_format',
default='raw',
help='Disk format to use when copying a volume to image'),
+ cfg.IntOpt('volume_size',
+ default=1,
+ help='Default size in GB for volumes created by volumes tests'),
]
volume_feature_group = cfg.OptGroup(name='volume-feature-enabled',
@@ -556,7 +559,7 @@
default=1,
help="Time in seconds between build status checks."),
cfg.IntOpt('build_timeout',
- default=600,
+ default=1200,
help="Timeout in seconds to wait for a stack to build."),
cfg.StrOpt('instance_type',
default='m1.micro',
@@ -844,13 +847,29 @@
BaremetalGroup = [
cfg.StrOpt('catalog_type',
default='baremetal',
- help="Catalog type of the baremetal provisioning service."),
+ help="Catalog type of the baremetal provisioning service"),
+ cfg.BoolOpt('driver_enabled',
+ default=False,
+ help="Whether the Ironic nova-compute driver is enabled"),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the baremetal provisioning "
- "service."),
+ "service"),
+ cfg.IntOpt('active_timeout',
+ default=300,
+ help="Timeout for Ironic node to completely provision"),
+ cfg.IntOpt('association_timeout',
+ default=10,
+ help="Timeout for association of Nova instance and Ironic "
+ "node"),
+ cfg.IntOpt('power_timeout',
+ default=20,
+ help="Timeout for Ironic power transitions."),
+ cfg.IntOpt('unprovision_timeout',
+ default=20,
+ help="Timeout for unprovisioning an Ironic node.")
]
cli_group = cfg.OptGroup(name='cli', title="cli Configuration Options")
diff --git a/tempest/exceptions/__init__.py b/tempest/exceptions/__init__.py
index d313def..9eb9c1e 100644
--- a/tempest/exceptions/__init__.py
+++ b/tempest/exceptions/__init__.py
@@ -78,6 +78,12 @@
"due to '%(stack_status_reason)s'")
+class StackResourceBuildErrorException(base.TempestException):
+ message = ("Resource %(resource_name) in stack %(stack_identifier)s is "
+ "in %(resource_status)s status due to "
+ "'%(resource_status_reason)s'")
+
+
class BadRequest(base.RestClientException):
message = "Bad request"
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 234faad..270851d 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -47,7 +47,7 @@
T104: Scenario tests require a services decorator
"""
- if 'tempest/scenario' in filename:
+ if 'tempest/scenario/test_' in filename:
if TEST_DEFINITION.match(physical_line):
if not SCENARIO_DECORATOR.match(previous_logical):
return (physical_line.find('def'),
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index f06a850..5895c37 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -19,6 +19,7 @@
import six
import subprocess
+from ironicclient import exc as ironic_exceptions
import netaddr
from neutronclient.common import exceptions as exc
from novaclient import exceptions as nova_exceptions
@@ -71,11 +72,13 @@
username, password, tenant_name)
cls.compute_client = cls.manager.compute_client
cls.image_client = cls.manager.image_client
+ cls.baremetal_client = cls.manager.baremetal_client
cls.identity_client = cls.manager.identity_client
cls.network_client = cls.manager.network_client
cls.volume_client = cls.manager.volume_client
cls.object_storage_client = cls.manager.object_storage_client
cls.orchestration_client = cls.manager.orchestration_client
+ cls.data_processing_client = cls.manager.data_processing_client
cls.resource_keys = {}
cls.os_resources = []
@@ -283,7 +286,7 @@
return rules
def create_server(self, client=None, name=None, image=None, flavor=None,
- create_kwargs={}):
+ wait=True, create_kwargs={}):
if client is None:
client = self.compute_client
if name is None:
@@ -318,7 +321,8 @@
server = client.servers.create(name, image, flavor, **create_kwargs)
self.assertEqual(server.name, name)
self.set_resource(name, server)
- self.status_timeout(client.servers, server.id, 'ACTIVE')
+ if wait:
+ self.status_timeout(client.servers, server.id, 'ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
@@ -439,6 +443,80 @@
LOG.debug("image:%s" % self.image)
+class BaremetalScenarioTest(OfficialClientTest):
+ @classmethod
+ def setUpClass(cls):
+ super(BaremetalScenarioTest, cls).setUpClass()
+
+ if (not CONF.service_available.ironic or
+ not CONF.baremetal.driver_enabled):
+ msg = 'Ironic not available or Ironic compute driver not enabled'
+ raise cls.skipException(msg)
+
+ # use an admin client manager for baremetal client
+ username, password, tenant = cls.admin_credentials()
+ manager = clients.OfficialClientManager(username, password, tenant)
+ cls.baremetal_client = manager.baremetal_client
+
+ # allow any issues obtaining the node list to raise early
+ cls.baremetal_client.node.list()
+
+ def _node_state_timeout(self, node_id, state_attr,
+ target_states, timeout=10, interval=1):
+ if not isinstance(target_states, list):
+ target_states = [target_states]
+
+ def check_state():
+ node = self.get_node(node_id=node_id)
+ if getattr(node, state_attr) in target_states:
+ return True
+ return False
+
+ if not tempest.test.call_until_true(
+ check_state, timeout, interval):
+ msg = ("Timed out waiting for node %s to reach %s state(s) %s" %
+ (node_id, state_attr, target_states))
+ raise exceptions.TimeoutException(msg)
+
+ def wait_provisioning_state(self, node_id, state, timeout):
+ self._node_state_timeout(
+ node_id=node_id, state_attr='provision_state',
+ target_states=state, timeout=timeout)
+
+ def wait_power_state(self, node_id, state):
+ self._node_state_timeout(
+ node_id=node_id, state_attr='power_state',
+ target_states=state, timeout=CONF.baremetal.power_timeout)
+
+ def wait_node(self, instance_id):
+ """Waits for a node to be associated with instance_id."""
+ def _get_node():
+ node = None
+ try:
+ node = self.get_node(instance_id=instance_id)
+ except ironic_exceptions.HTTPNotFound:
+ pass
+ return node is not None
+
+ if not tempest.test.call_until_true(
+ _get_node, CONF.baremetal.association_timeout, 1):
+ msg = ('Timed out waiting to get Ironic node by instance id %s'
+ % instance_id)
+ raise exceptions.TimeoutException(msg)
+
+ def get_node(self, node_id=None, instance_id=None):
+ if node_id:
+ return self.baremetal_client.node.get(node_id)
+ elif instance_id:
+ return self.baremetal_client.node.get_by_instance_uuid(instance_id)
+
+ def get_ports(self, node_id):
+ ports = []
+ for port in self.baremetal_client.node.list_ports(node_id):
+ ports.append(self.baremetal_client.port.get(port.uuid))
+ return ports
+
+
class NetworkScenarioTest(OfficialClientTest):
"""
Base class for network scenario tests
diff --git a/tempest/scenario/test_baremetal_basic_ops.py b/tempest/scenario/test_baremetal_basic_ops.py
new file mode 100644
index 0000000..c53aa83
--- /dev/null
+++ b/tempest/scenario/test_baremetal_basic_ops.py
@@ -0,0 +1,147 @@
+#
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import config
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+from tempest import test
+
+CONF = config.CONF
+
+LOG = logging.getLogger(__name__)
+
+
+# power/provision states as of icehouse
+class PowerStates(object):
+ """Possible power states of an Ironic node."""
+ POWER_ON = 'power on'
+ POWER_OFF = 'power off'
+ REBOOT = 'rebooting'
+ SUSPEND = 'suspended'
+
+
+class ProvisionStates(object):
+ """Possible provision states of an Ironic node."""
+ NOSTATE = None
+ INIT = 'initializing'
+ ACTIVE = 'active'
+ BUILDING = 'building'
+ DEPLOYWAIT = 'wait call-back'
+ DEPLOYING = 'deploying'
+ DEPLOYFAIL = 'deploy failed'
+ DEPLOYDONE = 'deploy complete'
+ DELETING = 'deleting'
+ DELETED = 'deleted'
+ ERROR = 'error'
+
+
+class BaremetalBasicOptsPXESSH(manager.BaremetalScenarioTest):
+ """
+ This smoke test tests the pxe_ssh Ironic driver. It follows this basic
+ set of operations:
+ * Creates a keypair
+ * Boots an instance using the keypair
+ * Monitors the associated Ironic node for power and
+ expected state transitions
+ * Validates Ironic node's driver_info has been properly
+ updated
+ * Validates Ironic node's port data has been properly updated
+ * Verifies SSH connectivity using created keypair via fixed IP
+ * Associates a floating ip
+ * Verifies SSH connectivity using created keypair via floating IP
+ * Deletes instance
+ * Monitors the associated Ironic node for power and
+ expected state transitions
+ """
+ def add_keypair(self):
+ self.keypair = self.create_keypair()
+
+ def add_floating_ip(self):
+ floating_ip = self.compute_client.floating_ips.create()
+ self.instance.add_floating_ip(floating_ip)
+ return floating_ip.ip
+
+ def verify_connectivity(self, ip=None):
+ if ip:
+ dest = self.get_remote_client(ip)
+ else:
+ dest = self.get_remote_client(self.instance)
+ dest.validate_authentication()
+
+ def validate_driver_info(self):
+ f_id = self.instance.flavor['id']
+ flavor_extra = self.compute_client.flavors.get(f_id).get_keys()
+ driver_info = self.node.driver_info
+ self.assertEqual(driver_info['pxe_deploy_kernel'],
+ flavor_extra['baremetal:deploy_kernel_id'])
+ self.assertEqual(driver_info['pxe_deploy_ramdisk'],
+ flavor_extra['baremetal:deploy_ramdisk_id'])
+ self.assertEqual(driver_info['pxe_image_source'],
+ self.instance.image['id'])
+
+ def validate_ports(self):
+ for port in self.get_ports(self.node.uuid):
+ n_port_id = port.extra['vif_port_id']
+ n_port = self.network_client.show_port(n_port_id)['port']
+ self.assertEqual(n_port['device_id'], self.instance.id)
+ self.assertEqual(n_port['mac_address'], port.address)
+
+ def boot_instance(self):
+ create_kwargs = {
+ 'key_name': self.keypair.id
+ }
+ self.instance = self.create_server(
+ wait=False, create_kwargs=create_kwargs)
+
+ self.set_resource('instance', self.instance)
+
+ self.wait_node(self.instance.id)
+ self.node = self.get_node(instance_id=self.instance.id)
+
+ self.wait_power_state(self.node.uuid, PowerStates.POWER_ON)
+
+ self.wait_provisioning_state(
+ self.node.uuid,
+ [ProvisionStates.DEPLOYWAIT, ProvisionStates.ACTIVE],
+ timeout=15)
+
+ self.wait_provisioning_state(self.node.uuid, ProvisionStates.ACTIVE,
+ timeout=CONF.baremetal.active_timeout)
+
+ self.status_timeout(
+ self.compute_client.servers, self.instance.id, 'ACTIVE')
+
+ self.node = self.get_node(instance_id=self.instance.id)
+ self.instance = self.compute_client.servers.get(self.instance.id)
+
+ def terminate_instance(self):
+ self.instance.delete()
+ self.remove_resource('instance')
+ self.wait_power_state(self.node.uuid, PowerStates.POWER_OFF)
+ self.wait_provisioning_state(
+ self.node.uuid,
+ ProvisionStates.NOSTATE,
+ timeout=CONF.baremetal.unprovision_timeout)
+
+ @test.services('baremetal', 'compute', 'image', 'network')
+ def test_baremetal_server_ops(self):
+ self.add_keypair()
+ self.boot_instance()
+ self.validate_driver_info()
+ self.validate_ports()
+ self.verify_connectivity()
+ floating_ip = self.add_floating_ip()
+ self.verify_connectivity(ip=floating_ip)
+ self.terminate_instance()
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index f7a3d6f..5f71461 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -17,7 +17,6 @@
import urllib
from tempest.api.network import common as net_common
-from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.scenario import manager
@@ -59,24 +58,45 @@
def setUpClass(cls):
super(TestLoadBalancerBasic, cls).setUpClass()
cls.check_preconditions()
- cls.security_groups = {}
cls.servers_keypairs = {}
cls.members = []
cls.floating_ips = {}
- cls.server_ip = None
- cls.vip_ip = None
+ cls.server_ips = {}
cls.port1 = 80
cls.port2 = 88
- def _create_security_groups(self):
- self.security_groups[self.tenant_id] =\
- self._create_security_group_neutron(tenant_id=self.tenant_id)
+ def setUp(self):
+ super(TestLoadBalancerBasic, self).setUp()
+ self.server_ips = {}
+ self._create_security_group()
- def _create_server(self):
- tenant_id = self.tenant_id
- name = data_utils.rand_name("smoke_server-")
+ def cleanup_wrapper(self, resource):
+ self.cleanup_resource(resource, self.__class__.__name__)
+
+ def _create_security_group(self):
+ self.security_group = self._create_security_group_neutron(
+ tenant_id=self.tenant_id)
+ self._create_security_group_rules_for_port(self.port1)
+ self._create_security_group_rules_for_port(self.port2)
+ self.addCleanup(self.cleanup_wrapper, self.security_group)
+
+ def _create_security_group_rules_for_port(self, port):
+ rule = {
+ 'direction': 'ingress',
+ 'protocol': 'tcp',
+ 'port_range_min': port,
+ 'port_range_max': port,
+ }
+ self._create_security_group_rule(
+ client=self.network_client,
+ secgroup=self.security_group,
+ tenant_id=self.tenant_id,
+ **rule)
+
+ def _create_server(self, name):
keypair = self.create_keypair(name='keypair-%s' % name)
- security_groups = [self.security_groups[tenant_id].name]
+ self.addCleanup(self.cleanup_wrapper, keypair)
+ security_groups = [self.security_group.name]
net = self._list_networks(tenant_id=self.tenant_id)[0]
create_kwargs = {
'nics': [
@@ -87,51 +107,106 @@
}
server = self.create_server(name=name,
create_kwargs=create_kwargs)
- self.servers_keypairs[server] = keypair
+ self.addCleanup(self.cleanup_wrapper, server)
+ self.servers_keypairs[server.id] = keypair
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
public_network_id = config.network.public_network_id
floating_ip = self._create_floating_ip(
server, public_network_id)
+ self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips[floating_ip] = server
- self.server_ip = floating_ip.floating_ip_address
+ self.server_ips[server.id] = floating_ip.floating_ip_address
else:
- self.server_ip = server.networks[net['name']][0]
+ self.server_ips[server.id] = server.networks[net.name][0]
self.assertTrue(self.servers_keypairs)
return server
- def _start_servers(self, server):
+ def _create_servers(self):
+ for count in range(2):
+ self._create_server(name=("server%s" % (count + 1)))
+ self.assertEqual(len(self.servers_keypairs), 2)
+
+ def _start_servers(self):
"""
+ Start two backends
+
1. SSH to the instance
2. Start two http backends listening on ports 80 and 88 respectively
+ In case there are two instances, each backend is created on a separate
+ instance.
+
+ The backends are the inetd services. To start them we need to edit
+ /etc/inetd.conf in the following way:
+ www stream tcp nowait root /bin/sh sh /home/cirros/script_name
+
+ Where /home/cirros/script_name is a path to a script which
+ echoes the responses:
+ echo -e 'HTTP/1.0 200 OK\r\n\r\nserver_name
+
+ If we want the server to listen on port 88, then we use
+ "kerberos" instead of "www".
"""
- private_key = self.servers_keypairs[server].private_key
- ssh_client = self.get_remote_client(
- server_or_ip=self.server_ip,
- private_key=private_key).ssh_client
- start_server = "while true; do echo -e 'HTTP/1.0 200 OK\r\n\r\n" \
- "%(server)s' | sudo nc -l -p %(port)s ; done &"
- cmd = start_server % {'server': 'server1',
- 'port': self.port1}
- ssh_client.exec_command(cmd)
- cmd = start_server % {'server': 'server2',
- 'port': self.port2}
- ssh_client.exec_command(cmd)
+ for server_id, ip in self.server_ips.iteritems():
+ private_key = self.servers_keypairs[server_id].private_key
+ server_name = self.compute_client.servers.get(server_id).name
+ ssh_client = self.get_remote_client(
+ server_or_ip=ip,
+ private_key=private_key)
+ ssh_client.validate_authentication()
+ # Create service for inetd
+ create_script = """sudo sh -c "echo -e \\"echo -e 'HTTP/1.0 """ \
+ """200 OK\\\\\\r\\\\\\n\\\\\\r\\\\\\n""" \
+ """%(server)s'\\" >>/home/cirros/%(script)s\""""
- def _check_connection(self, check_ip):
- def try_connect(ip):
+ cmd = create_script % {
+ 'server': server_name,
+ 'script': 'script1'}
+ ssh_client.exec_command(cmd)
+ # Configure inetd
+ configure_inetd = """sudo sh -c "echo -e \\"%(service)s """ \
+ """stream tcp nowait root /bin/sh sh """ \
+ """/home/cirros/%(script)s\\" >> """ \
+ """/etc/inetd.conf\""""
+ # "www" stands for port 80
+ cmd = configure_inetd % {'service': 'www',
+ 'script': 'script1'}
+ ssh_client.exec_command(cmd)
+
+ if len(self.server_ips) == 1:
+ cmd = create_script % {'server': 'server2',
+ 'script': 'script2'}
+ ssh_client.exec_command(cmd)
+ # "kerberos" stands for port 88
+ cmd = configure_inetd % {'service': 'kerberos',
+ 'script': 'script2'}
+ ssh_client.exec_command(cmd)
+
+ # Get PIDs of inetd
+ pids = ssh_client.get_pids('inetd')
+ if pids != ['']:
+ # If there are any inetd processes, reload them
+ kill_cmd = "sudo kill -HUP %s" % ' '.join(pids)
+ ssh_client.exec_command(kill_cmd)
+ else:
+ # In other case start inetd
+ start_inetd = "sudo /usr/sbin/inetd /etc/inetd.conf"
+ ssh_client.exec_command(start_inetd)
+
+ def _check_connection(self, check_ip, port=80):
+ def try_connect(ip, port):
try:
- urllib.urlopen("http://{0}/".format(ip))
- return True
+ resp = urllib.urlopen("http://{0}:{1}/".format(ip, port))
+ if resp.getcode() == 200:
+ return True
+ return False
except IOError:
return False
timeout = config.compute.ping_timeout
- timer = 0
- while not try_connect(check_ip):
- time.sleep(1)
- timer += 1
- if timer >= timeout:
+ start = time.time()
+ while not try_connect(check_ip, port):
+ if (time.time() - start) > timeout:
message = "Timed out trying to connect to %s" % check_ip
raise exceptions.TimeoutException(message)
@@ -142,30 +217,37 @@
self.subnet = net_common.DeletableSubnet(client=self.network_client,
**subnet)
self.pool = super(TestLoadBalancerBasic, self)._create_pool(
- 'ROUND_ROBIN',
- 'HTTP',
- self.subnet.id)
+ lb_method='ROUND_ROBIN',
+ protocol='HTTP',
+ subnet_id=self.subnet.id)
+ self.addCleanup(self.cleanup_wrapper, self.pool)
self.assertTrue(self.pool)
- def _create_members(self, server_ids):
+ def _create_members(self):
"""
Create two members.
In case there is only one server, create both members with the same ip
but with different ports to listen on.
"""
- servers = self.compute_client.servers.list()
- for server in servers:
- if server.id in server_ids:
- ip = self.server_ip
- pool_id = self.pool.id
- if len(set(server_ids)) == 1 or len(servers) == 1:
- member1 = self._create_member(ip, self.port1, pool_id)
- member2 = self._create_member(ip, self.port2, pool_id)
- self.members.extend([member1, member2])
- else:
- member = self._create_member(ip, self.port1, pool_id)
- self.members.append(member)
+
+ for server_id, ip in self.server_ips.iteritems():
+ if len(self.server_ips) == 1:
+ member1 = self._create_member(address=ip,
+ protocol_port=self.port1,
+ pool_id=self.pool.id)
+ self.addCleanup(self.cleanup_wrapper, member1)
+ member2 = self._create_member(address=ip,
+ protocol_port=self.port2,
+ pool_id=self.pool.id)
+ self.addCleanup(self.cleanup_wrapper, member2)
+ self.members.extend([member1, member2])
+ else:
+ member = self._create_member(address=ip,
+ protocol_port=self.port1,
+ pool_id=self.pool.id)
+ self.addCleanup(self.cleanup_wrapper, member)
+ self.members.append(member)
self.assertTrue(self.members)
def _assign_floating_ip_to_vip(self, vip):
@@ -173,22 +255,23 @@
port_id = vip.port_id
floating_ip = self._create_floating_ip(vip, public_network_id,
port_id=port_id)
+ self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips.setdefault(vip.id, [])
self.floating_ips[vip.id].append(floating_ip)
def _create_load_balancer(self):
self._create_pool()
- self._create_members([self.servers_keypairs.keys()[0].id])
- subnet_id = self.subnet.id
- pool_id = self.pool.id
- self.vip = super(TestLoadBalancerBasic, self)._create_vip('HTTP', 80,
- subnet_id,
- pool_id)
- self._status_timeout(NeutronRetriever(self.network_client,
- self.network_client.vip_path,
- net_common.DeletableVip),
- self.vip.id,
- expected_status='ACTIVE')
+ self._create_members()
+ self.vip = self._create_vip(protocol='HTTP',
+ protocol_port=80,
+ subnet_id=self.subnet.id,
+ pool_id=self.pool.id)
+ self.addCleanup(self.cleanup_wrapper, self.vip)
+ self.status_timeout(NeutronRetriever(self.network_client,
+ self.network_client.vip_path,
+ net_common.DeletableVip),
+ self.vip.id,
+ expected_status='ACTIVE')
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
self._assign_floating_ip_to_vip(self.vip)
@@ -199,34 +282,49 @@
def _check_load_balancing(self):
"""
- 1. Send 10 requests on the floating ip associated with the VIP
+ 1. Send 100 requests on the floating ip associated with the VIP
2. Check that the requests are shared between
the two servers and that both of them get equal portions
of the requests
"""
self._check_connection(self.vip_ip)
+ resp = self._send_requests(self.vip_ip)
+ self.assertEqual(set(["server1\n", "server2\n"]), set(resp))
+ self.assertEqual(50, resp.count("server1\n"))
+ self.assertEqual(50, resp.count("server2\n"))
+
+ def _send_requests(self, vip_ip):
resp = []
- for count in range(10):
+ for count in range(100):
resp.append(
urllib.urlopen(
- "http://{0}/".format(self.vip_ip)).read())
- self.assertEqual(set(["server1\n", "server2\n"]), set(resp))
- self.assertEqual(5, resp.count("server1\n"))
- self.assertEqual(5, resp.count("server2\n"))
+ "http://{0}/".format(vip_ip)).read())
+ return resp
- @test.skip_because(bug='1295165')
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_load_balancer_basic(self):
- self._create_security_groups()
- server = self._create_server()
- self._start_servers(server)
+ self._create_server('server1')
+ self._start_servers()
self._create_load_balancer()
self._check_load_balancing()
class NeutronRetriever(object):
+ """
+ Helper class to make possible handling neutron objects returned by GET
+ requests as attribute dicts.
+
+ Whet get() method is called, the returned dictionary is wrapped into
+ a corresponding DeletableResource class which provides attribute access
+ to dictionary values.
+
+ Usage:
+ This retriever is used to allow using status_timeout from
+ tempest.manager with Neutron objects.
+ """
+
def __init__(self, network_client, path, resource):
self.network_client = network_client
self.path = path
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index e7e97b5..0ba65cf 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.common import debug
from tempest.common.utils import data_utils
from tempest import config
@@ -158,6 +160,8 @@
self.server.rebuild(image_ref_alt)
self._wait_server_status_and_check_network_connectivity()
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
@services('compute', 'network')
def test_server_connectivity_pause_unpause(self):
self.server.pause()
@@ -167,6 +171,8 @@
self.server.unpause()
self._wait_server_status_and_check_network_connectivity()
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
@services('compute', 'network')
def test_server_connectivity_suspend_resume(self):
self.server.suspend()
@@ -176,11 +182,10 @@
self.server.resume()
self._wait_server_status_and_check_network_connectivity()
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize is not available.')
@services('compute', 'network')
def test_server_connectivity_resize(self):
- if not CONF.compute_feature_enabled.resize:
- msg = "Skipping test - resize not available on this host"
- raise self.skipException(msg)
resize_flavor = CONF.compute.flavor_ref_alt
if resize_flavor == CONF.compute.flavor_ref:
msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index c0eb6e7..5a1dc04 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
@@ -37,16 +39,12 @@
cls.set_network_resources()
super(TestServerAdvancedOps, cls).setUpClass()
- if not CONF.compute_feature_enabled.resize:
- msg = "Skipping test - resize not available on this host"
- raise cls.skipException(msg)
-
- resize_flavor = CONF.compute.flavor_ref_alt
-
- if resize_flavor == CONF.compute.flavor_ref:
+ if CONF.compute.flavor_ref_alt == CONF.compute.flavor_ref:
msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
raise cls.skipException(msg)
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize is not available.')
@test.services('compute')
def test_resize_server_confirm(self):
# We create an instance for use in this test
@@ -65,6 +63,8 @@
self.status_timeout(
self.compute_client.servers, instance_id, 'ACTIVE')
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
@test.services('compute')
def test_server_sequence_suspend_resume(self):
# We create an instance for use in this test
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index d369f12..13e00a5 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -14,19 +14,17 @@
# under the License.
from tempest.common.utils import data_utils
-from tempest.common.utils import test_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
+from tempest.scenario import utils as test_utils
from tempest import test
-import testscenarios
-
CONF = config.CONF
LOG = logging.getLogger(__name__)
-load_tests = testscenarios.load_tests_apply_scenarios
+load_tests = test_utils.load_tests_input_scenario_utils
class TestServerBasicOps(manager.OfficialClientTest):
@@ -43,13 +41,6 @@
* Terminate the instance
"""
- scenario_utils = test_utils.InputScenarioUtils()
- scenario_flavor = scenario_utils.scenario_flavors
- scenario_image = scenario_utils.scenario_images
-
- scenarios = testscenarios.multiply_scenarios(scenario_image,
- scenario_flavor)
-
def setUp(self):
super(TestServerBasicOps, self).setUp()
# Setup image and flavor the test instance
@@ -99,42 +90,6 @@
create_kwargs=create_kwargs)
self.set_resource('instance', instance)
- def pause_server(self):
- instance = self.get_resource('instance')
- instance_id = instance.id
- LOG.debug("Pausing instance %s. Current status: %s",
- instance_id, instance.status)
- instance.pause()
- self.status_timeout(
- self.compute_client.servers, instance_id, 'PAUSED')
-
- def unpause_server(self):
- instance = self.get_resource('instance')
- instance_id = instance.id
- LOG.debug("Unpausing instance %s. Current status: %s",
- instance_id, instance.status)
- instance.unpause()
- self.status_timeout(
- self.compute_client.servers, instance_id, 'ACTIVE')
-
- def suspend_server(self):
- instance = self.get_resource('instance')
- instance_id = instance.id
- LOG.debug("Suspending instance %s. Current status: %s",
- instance_id, instance.status)
- instance.suspend()
- self.status_timeout(self.compute_client.servers,
- instance_id, 'SUSPENDED')
-
- def resume_server(self):
- instance = self.get_resource('instance')
- instance_id = instance.id
- LOG.debug("Resuming instance %s. Current status: %s",
- instance_id, instance.status)
- instance.resume()
- self.status_timeout(
- self.compute_client.servers, instance_id, 'ACTIVE')
-
def terminate_instance(self):
instance = self.get_resource('instance')
instance.delete()
@@ -149,10 +104,11 @@
instance.add_floating_ip(floating_ip)
# Check ssh
try:
- self.get_remote_client(
+ linux_client = self.get_remote_client(
server_or_ip=floating_ip.ip,
username=self.image_utils.ssh_user(self.image_ref),
- private_key=self.keypair.private)
+ private_key=self.keypair.private_key)
+ linux_client.validate_authentication()
except Exception:
LOG.exception('ssh to server failed')
self._log_console_output()
@@ -163,9 +119,5 @@
self.add_keypair()
self.create_security_group()
self.boot_instance()
- self.pause_server()
- self.unpause_server()
- self.suspend_server()
- self.resume_server()
self.verify_ssh()
self.terminate_instance()
diff --git a/tempest/common/utils/test_utils.py b/tempest/scenario/utils.py
similarity index 85%
rename from tempest/common/utils/test_utils.py
rename to tempest/scenario/utils.py
index cc0d831..4c7b6d7 100644
--- a/tempest/common/utils/test_utils.py
+++ b/tempest/scenario/utils.py
@@ -12,15 +12,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest import clients
-from tempest.common.utils import misc
-from tempest import config
import json
import re
import string
import unicodedata
+import testscenarios
+import testtools
+
+from tempest import clients
+from tempest.common.utils import misc
+from tempest import config
+
CONF = config.CONF
@@ -79,7 +83,7 @@
class TestInputScenario(manager.OfficialClientTest):
- scenario_utils = test_utils.InputScenarioUtils()
+ scenario_utils = utils.InputScenarioUtils()
scenario_flavor = scenario_utils.scenario_flavors
scenario_image = scenario_utils.scenario_images
scenarios = testscenarios.multiply_scenarios(scenario_image,
@@ -134,3 +138,22 @@
for f in flavors if re.search(self.flavor_pattern, str(f.name))
]
return self._scenario_flavors
+
+
+def load_tests_input_scenario_utils(*args):
+ """
+ Wrapper for testscenarios to set the scenarios to avoid running a getattr
+ on the CONF object at import.
+ """
+ if getattr(args[0], 'suiteClass', None) is not None:
+ loader, standard_tests, pattern = args
+ else:
+ standard_tests, module, loader = args
+ scenario_utils = InputScenarioUtils()
+ scenario_flavor = scenario_utils.scenario_flavors
+ scenario_image = scenario_utils.scenario_images
+ for test in testtools.iterate_tests(standard_tests):
+ setattr(test, 'scenarios', testscenarios.multiply_scenarios(
+ scenario_image,
+ scenario_flavor))
+ return testscenarios.load_tests_apply_scenarios(*args)
diff --git a/tempest/services/baremetal/base.py b/tempest/services/baremetal/base.py
index 5f6b513..2af287f 100644
--- a/tempest/services/baremetal/base.py
+++ b/tempest/services/baremetal/base.py
@@ -12,6 +12,7 @@
import functools
import json
+import urllib
import six
@@ -103,16 +104,19 @@
return patch
- def _list_request(self, resource, permanent=False):
+ def _list_request(self, resource, permanent=False, **kwargs):
"""
Get the list of objects of the specified type.
:param resource: The name of the REST resource, e.g., 'nodes'.
+ "param **kw: Parameters for the request.
:return: A tuple with the server response and deserialized JSON list
of objects
"""
uri = self._get_uri(resource, permanent=permanent)
+ if kwargs:
+ uri += "?%s" % urllib.urlencode(kwargs)
resp, body = self.get(uri)
diff --git a/tempest/services/baremetal/v1/base_v1.py b/tempest/services/baremetal/v1/base_v1.py
index 3f4c509..2145c88 100644
--- a/tempest/services/baremetal/v1/base_v1.py
+++ b/tempest/services/baremetal/v1/base_v1.py
@@ -37,9 +37,19 @@
return self._list_request('chassis')
@base.handle_errors
- def list_ports(self):
+ def list_ports(self, **kwargs):
"""List all existing ports."""
- return self._list_request('ports')
+ return self._list_request('ports', **kwargs)
+
+ @base.handle_errors
+ def list_ports_detail(self):
+ """Details list all existing ports."""
+ return self._list_request('/ports/detail')
+
+ @base.handle_errors
+ def list_drivers(self):
+ """List all existing drivers."""
+ return self._list_request('drivers')
@base.handle_errors
def show_node(self, uuid):
@@ -116,12 +126,20 @@
Create a port with the specified parameters.
:param node_id: The ID of the node which owns the port.
- :param address: MAC address of the port. Default: 01:23:45:67:89:0A.
+ :param address: MAC address of the port.
+ :param extra: Meta data of the port. Default: {'foo': 'bar'}.
+ :param uuid: UUID of the port.
:return: A tuple with the server response and the created port.
"""
- port = {'address': kwargs.get('address', '01:23:45:67:89:0A'),
- 'node_uuid': node_id}
+ port = {'extra': kwargs.get('extra', {'foo': 'bar'}),
+ 'uuid': kwargs['uuid']}
+
+ if node_id is not None:
+ port['node_uuid'] = node_id
+
+ if kwargs['address'] is not None:
+ port['address'] = kwargs['address']
return self._create_request('ports', 'port', port)
@@ -192,15 +210,14 @@
return self._patch_request('chassis', uuid, patch)
@base.handle_errors
- def update_port(self, uuid, **kwargs):
+ def update_port(self, uuid, patch):
"""
Update the specified port.
:param uuid: The unique identifier of the port.
+ :param patch: List of dicts representing json patches.
:return: A tuple with the server response and the updated port.
"""
- port_attributes = ('address',)
- patch = self._make_patch(port_attributes, **kwargs)
return self._patch_request('ports', uuid, patch)
diff --git a/tempest/services/compute/json/aggregates_client.py b/tempest/services/compute/json/aggregates_client.py
index ccb85c4..fe67102 100644
--- a/tempest/services/compute/json/aggregates_client.py
+++ b/tempest/services/compute/json/aggregates_client.py
@@ -40,6 +40,7 @@
"""Get details of the given aggregate."""
resp, body = self.get("os-aggregates/%s" % str(aggregate_id))
body = json.loads(body)
+ self.validate_response(schema.get_aggregate, resp, body)
return resp, body['aggregate']
def create_aggregate(self, **kwargs):
@@ -104,4 +105,5 @@
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
+ self.validate_response(schema.aggregate_set_metadata, resp, body)
return resp, body['aggregate']
diff --git a/tempest/services/compute/json/fixed_ips_client.py b/tempest/services/compute/json/fixed_ips_client.py
index 5fdd564..f2d5cbe 100644
--- a/tempest/services/compute/json/fixed_ips_client.py
+++ b/tempest/services/compute/json/fixed_ips_client.py
@@ -39,4 +39,5 @@
"""This reserves and unreserves fixed ips."""
url = "os-fixed-ips/%s/action" % (ip)
resp, body = self.post(url, json.dumps(body))
+ self.validate_response(schema.fixed_ip_action, resp, body)
return resp, body
diff --git a/tempest/services/compute/json/flavors_client.py b/tempest/services/compute/json/flavors_client.py
index bc4a64f..0206b82 100644
--- a/tempest/services/compute/json/flavors_client.py
+++ b/tempest/services/compute/json/flavors_client.py
@@ -18,6 +18,7 @@
from tempest.api_schema.compute import flavors as common_schema
from tempest.api_schema.compute import flavors_access as schema_access
+from tempest.api_schema.compute.v2 import flavors as v2schema
from tempest.common import rest_client
from tempest import config
@@ -47,6 +48,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(v2schema.list_flavors_details, resp, body)
return resp, body['flavors']
def get_flavor_details(self, flavor_id):
@@ -128,7 +130,8 @@
"""Gets flavor access information given the flavor id."""
resp, body = self.get('flavors/%s/os-flavor-access' % flavor_id)
body = json.loads(body)
- self.validate_response(schema_access.list_flavor_access, resp, body)
+ self.validate_response(schema_access.add_remove_list_flavor_access,
+ resp, body)
return resp, body['flavor_access']
def add_flavor_access(self, flavor_id, tenant_id):
@@ -141,6 +144,8 @@
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
+ self.validate_response(schema_access.add_remove_list_flavor_access,
+ resp, body)
return resp, body['flavor_access']
def remove_flavor_access(self, flavor_id, tenant_id):
@@ -153,4 +158,6 @@
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
+ self.validate_response(schema_access.add_remove_list_flavor_access,
+ resp, body)
return resp, body['flavor_access']
diff --git a/tempest/services/compute/json/hosts_client.py b/tempest/services/compute/json/hosts_client.py
index eeb417a..e148572 100644
--- a/tempest/services/compute/json/hosts_client.py
+++ b/tempest/services/compute/json/hosts_client.py
@@ -16,6 +16,7 @@
import urllib
from tempest.api_schema.compute import hosts as schema
+from tempest.api_schema.compute.v2 import hosts as v2_schema
from tempest.common import rest_client
from tempest import config
@@ -67,6 +68,7 @@
resp, body = self.get("os-hosts/%s/startup" % str(hostname))
body = json.loads(body)
+ self.validate_response(v2_schema.startup_host, resp, body)
return resp, body['host']
def shutdown_host(self, hostname):
@@ -74,6 +76,7 @@
resp, body = self.get("os-hosts/%s/shutdown" % str(hostname))
body = json.loads(body)
+ self.validate_response(v2_schema.shutdown_host, resp, body)
return resp, body['host']
def reboot_host(self, hostname):
@@ -81,4 +84,5 @@
resp, body = self.get("os-hosts/%s/reboot" % str(hostname))
body = json.loads(body)
+ self.validate_response(v2_schema.reboot_host, resp, body)
return resp, body['host']
diff --git a/tempest/services/compute/json/migrations_client.py b/tempest/services/compute/json/migrations_client.py
new file mode 100644
index 0000000..a13349e
--- /dev/null
+++ b/tempest/services/compute/json/migrations_client.py
@@ -0,0 +1,39 @@
+# Copyright 2014 NEC Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import urllib
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class MigrationsClientJSON(rest_client.RestClient):
+
+ def __init__(self, auth_provider):
+ super(MigrationsClientJSON, self).__init__(auth_provider)
+ self.service = CONF.compute.catalog_type
+
+ def list_migrations(self, params=None):
+ """Lists all migrations."""
+
+ url = 'os-migrations'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['migrations']
diff --git a/tempest/services/compute/json/quotas_client.py b/tempest/services/compute/json/quotas_client.py
index 9346183..ee2c43f 100644
--- a/tempest/services/compute/json/quotas_client.py
+++ b/tempest/services/compute/json/quotas_client.py
@@ -108,4 +108,6 @@
def delete_quota_set(self, tenant_id):
"""Delete the tenant's quota set."""
- return self.delete('os-quota-sets/%s' % str(tenant_id))
+ resp, body = self.delete('os-quota-sets/%s' % str(tenant_id))
+ self.validate_response(schema.delete_quota, resp, body)
+ return resp, body
diff --git a/tempest/services/compute/json/security_groups_client.py b/tempest/services/compute/json/security_groups_client.py
index 9267be7..7411fb7 100644
--- a/tempest/services/compute/json/security_groups_client.py
+++ b/tempest/services/compute/json/security_groups_client.py
@@ -111,11 +111,15 @@
url = 'os-security-group-rules'
resp, body = self.post(url, post_body)
body = json.loads(body)
+ self.validate_response(schema.create_security_group_rule, resp, body)
return resp, body['security_group_rule']
def delete_security_group_rule(self, group_rule_id):
"""Deletes the provided Security Group rule."""
- return self.delete('os-security-group-rules/%s' % str(group_rule_id))
+ resp, body = self.delete('os-security-group-rules/%s' %
+ str(group_rule_id))
+ self.validate_response(schema.delete_security_group_rule, resp, body)
+ return resp, body
def list_security_group_rules(self, security_group_id):
"""List all rules for a security group."""
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 438fce8..90b0d73 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -18,6 +18,7 @@
import time
import urllib
+from tempest.api_schema.compute import servers as common_schema
from tempest.api_schema.compute.v2 import servers as schema
from tempest.common import rest_client
from tempest.common import waiters
@@ -221,6 +222,7 @@
resp, body = self.get("servers/%s/os-server-password" %
str(server_id))
body = json.loads(body)
+ self.validate_response(common_schema.get_password, resp, body)
return resp, body
def delete_password(self, server_id):
@@ -260,10 +262,6 @@
"""Reverts a server back to its original flavor."""
return self.action(server_id, 'revertResize', None, **kwargs)
- def create_image(self, server_id, name):
- """Creates an image of the given server."""
- return self.action(server_id, 'createImage', None, name=name)
-
def list_server_metadata(self, server_id):
resp, body = self.get("servers/%s/metadata" % str(server_id))
body = json.loads(body)
diff --git a/tempest/services/compute/json/services_client.py b/tempest/services/compute/json/services_client.py
index 0f7d4cb..d58ca6f 100644
--- a/tempest/services/compute/json/services_client.py
+++ b/tempest/services/compute/json/services_client.py
@@ -49,6 +49,7 @@
post_body = json.dumps({'binary': binary, 'host': host_name})
resp, body = self.put('os-services/enable', post_body)
body = json.loads(body)
+ self.validate_response(schema.enable_service, resp, body)
return resp, body['service']
def disable_service(self, host_name, binary):
diff --git a/tempest/services/compute/json/volumes_extensions_client.py b/tempest/services/compute/json/volumes_extensions_client.py
index 17468eb..d1014af 100644
--- a/tempest/services/compute/json/volumes_extensions_client.py
+++ b/tempest/services/compute/json/volumes_extensions_client.py
@@ -61,7 +61,7 @@
url = "os-volumes/%s" % str(volume_id)
resp, body = self.get(url)
body = json.loads(body)
- self.validate_response(schema.get_volume, resp, body)
+ self.validate_response(schema.create_get_volume, resp, body)
return resp, body['volume']
def create_volume(self, size, **kwargs):
@@ -81,11 +81,14 @@
post_body = json.dumps({'volume': post_body})
resp, body = self.post('os-volumes', post_body)
body = json.loads(body)
+ self.validate_response(schema.create_get_volume, resp, body)
return resp, body['volume']
def delete_volume(self, volume_id):
"""Deletes the Specified Volume."""
- return self.delete("os-volumes/%s" % str(volume_id))
+ resp, body = self.delete("os-volumes/%s" % str(volume_id))
+ self.validate_response(schema.delete_volume, resp, body)
+ return resp, body
def wait_for_volume_status(self, volume_id, status):
"""Waits for a Volume to reach a given status."""
diff --git a/tempest/services/compute/v3/json/aggregates_client.py b/tempest/services/compute/v3/json/aggregates_client.py
index 7f73622..8d7440e 100644
--- a/tempest/services/compute/v3/json/aggregates_client.py
+++ b/tempest/services/compute/v3/json/aggregates_client.py
@@ -40,6 +40,7 @@
"""Get details of the given aggregate."""
resp, body = self.get("os-aggregates/%s" % str(aggregate_id))
body = json.loads(body)
+ self.validate_response(schema.get_aggregate, resp, body)
return resp, body['aggregate']
def create_aggregate(self, **kwargs):
@@ -104,4 +105,5 @@
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
+ self.validate_response(schema.aggregate_set_metadata, resp, body)
return resp, body['aggregate']
diff --git a/tempest/services/compute/v3/json/flavors_client.py b/tempest/services/compute/v3/json/flavors_client.py
index 3fdb3ca..189fe3f 100644
--- a/tempest/services/compute/v3/json/flavors_client.py
+++ b/tempest/services/compute/v3/json/flavors_client.py
@@ -18,6 +18,7 @@
from tempest.api_schema.compute import flavors as common_schema
from tempest.api_schema.compute import flavors_access as schema_access
+from tempest.api_schema.compute.v3 import flavors as v3schema
from tempest.common import rest_client
from tempest import config
@@ -47,6 +48,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(v3schema.list_flavors_details, resp, body)
return resp, body['flavors']
def get_flavor_details(self, flavor_id):
@@ -128,7 +130,8 @@
"""Gets flavor access information given the flavor id."""
resp, body = self.get('flavors/%s/flavor-access' % flavor_id)
body = json.loads(body)
- self.validate_response(schema_access.list_flavor_access, resp, body)
+ self.validate_response(schema_access.add_remove_list_flavor_access,
+ resp, body)
return resp, body['flavor_access']
def add_flavor_access(self, flavor_id, tenant_id):
@@ -141,6 +144,8 @@
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
+ self.validate_response(schema_access.add_remove_list_flavor_access,
+ resp, body)
return resp, body['flavor_access']
def remove_flavor_access(self, flavor_id, tenant_id):
@@ -153,4 +158,6 @@
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
+ self.validate_response(schema_access.add_remove_list_flavor_access,
+ resp, body)
return resp, body['flavor_access']
diff --git a/tempest/services/compute/v3/json/hosts_client.py b/tempest/services/compute/v3/json/hosts_client.py
index db7134c..24d43d0 100644
--- a/tempest/services/compute/v3/json/hosts_client.py
+++ b/tempest/services/compute/v3/json/hosts_client.py
@@ -16,6 +16,7 @@
import urllib
from tempest.api_schema.compute import hosts as schema
+from tempest.api_schema.compute.v3 import hosts as v3_schema
from tempest.common import rest_client
from tempest import config
@@ -67,6 +68,7 @@
resp, body = self.get("os-hosts/%s/startup" % str(hostname))
body = json.loads(body)
+ self.validate_response(v3_schema.startup_host, resp, body)
return resp, body['host']
def shutdown_host(self, hostname):
@@ -74,6 +76,7 @@
resp, body = self.get("os-hosts/%s/shutdown" % str(hostname))
body = json.loads(body)
+ self.validate_response(v3_schema.shutdown_host, resp, body)
return resp, body['host']
def reboot_host(self, hostname):
@@ -81,4 +84,5 @@
resp, body = self.get("os-hosts/%s/reboot" % str(hostname))
body = json.loads(body)
+ self.validate_response(v3_schema.reboot_host, resp, body)
return resp, body['host']
diff --git a/tempest/services/compute/v3/json/migration_client.py b/tempest/services/compute/v3/json/migration_client.py
new file mode 100644
index 0000000..efd39b7
--- /dev/null
+++ b/tempest/services/compute/v3/json/migration_client.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class MigrationsV3ClientJSON(rest_client.RestClient):
+
+ def __init__(self, auth_provider):
+ super(MigrationsV3ClientJSON, self).__init__(auth_provider)
+ self.service = CONF.compute.catalog_v3_type
+
+ def list_migrations(self, params=None):
+ """Lists all migrations."""
+
+ url = 'os-migrations'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ return resp, self._parse_resp(body)
diff --git a/tempest/services/compute/v3/json/quotas_client.py b/tempest/services/compute/v3/json/quotas_client.py
index a8507c4..783e3a7 100644
--- a/tempest/services/compute/v3/json/quotas_client.py
+++ b/tempest/services/compute/v3/json/quotas_client.py
@@ -45,6 +45,7 @@
url = 'os-quota-sets/%s/detail' % str(tenant_id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.quota_set_detail, resp, body)
return resp, body['quota_set']
def get_default_quota_set(self, tenant_id):
@@ -105,4 +106,6 @@
def delete_quota_set(self, tenant_id):
"""Delete the tenant's quota set."""
- return self.delete('os-quota-sets/%s' % str(tenant_id))
+ resp, body = self.delete('os-quota-sets/%s' % str(tenant_id))
+ self.validate_response(schema.delete_quota, resp, body)
+ return resp, body
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index 6f492d0..6e30bca 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -19,6 +19,7 @@
import time
import urllib
+from tempest.api_schema.compute import servers as common_schema
from tempest.api_schema.compute.v3 import servers as schema
from tempest.common import rest_client
from tempest.common import waiters
@@ -221,6 +222,7 @@
resp, body = self.get("servers/%s/os-server-password" %
str(server_id))
body = json.loads(body)
+ self.validate_response(common_schema.get_password, resp, body)
return resp, body
def delete_password(self, server_id):
@@ -408,19 +410,19 @@
str(server_id))
return resp, json.loads(body)
- def list_instance_actions(self, server_id):
+ def list_server_actions(self, server_id):
"""List the provided server action."""
- resp, body = self.get("servers/%s/os-instance-actions" %
+ resp, body = self.get("servers/%s/os-server-actions" %
str(server_id))
body = json.loads(body)
- return resp, body['instance_actions']
+ return resp, body['server_actions']
- def get_instance_action(self, server_id, request_id):
+ def get_server_action(self, server_id, request_id):
"""Returns the action details of the provided server."""
- resp, body = self.get("servers/%s/os-instance-actions/%s" %
+ resp, body = self.get("servers/%s/os-server-actions/%s" %
(str(server_id), str(request_id)))
body = json.loads(body)
- return resp, body['instance_action']
+ return resp, body['server_action']
def force_delete_server(self, server_id, **kwargs):
"""Force delete a server."""
diff --git a/tempest/services/compute/v3/json/services_client.py b/tempest/services/compute/v3/json/services_client.py
index 88c4d16..96ff580 100644
--- a/tempest/services/compute/v3/json/services_client.py
+++ b/tempest/services/compute/v3/json/services_client.py
@@ -54,6 +54,7 @@
})
resp, body = self.put('os-services/enable', post_body)
body = json.loads(body)
+ self.validate_response(schema.enable_service, resp, body)
return resp, body['service']
def disable_service(self, host_name, binary):
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 37de147..c1105f9 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -119,6 +119,10 @@
'/compute/ext/extended_status/api/v1.1}vm_state')
task_state = ('{http://docs.openstack.org'
'/compute/ext/extended_status/api/v1.1}task_state')
+ if 'tenantId' in json:
+ json['tenant_id'] = json.pop('tenantId')
+ if 'userId' in json:
+ json['user_id'] = json.pop('userId')
if diskConfig in json:
json['OS-DCF:diskConfig'] = json.pop(diskConfig)
if terminated_at in json:
@@ -242,13 +246,19 @@
array.append(xml_utils.xml_to_json(child))
return array
+ def _parse_server_array(self, node):
+ array = []
+ for child in node.getchildren():
+ array.append(self._parse_server(child))
+ return array
+
def list_servers(self, params=None):
url = 'servers'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
- servers = self._parse_array(etree.fromstring(body))
+ servers = self._parse_server_array(etree.fromstring(body))
return resp, {"servers": servers}
def list_servers_with_detail(self, params=None):
@@ -257,7 +267,7 @@
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
- servers = self._parse_array(etree.fromstring(body))
+ servers = self._parse_server_array(etree.fromstring(body))
return resp, {"servers": servers}
def update_server(self, server_id, name=None, meta=None, accessIPv4=None,
diff --git a/tempest/services/identity/json/identity_client.py b/tempest/services/identity/json/identity_client.py
index c95faaa..55239f7 100644
--- a/tempest/services/identity/json/identity_client.py
+++ b/tempest/services/identity/json/identity_client.py
@@ -219,7 +219,7 @@
def list_services(self):
"""List Service - Returns Services."""
- resp, body = self.get('/OS-KSADM/services/')
+ resp, body = self.get('/OS-KSADM/services')
return resp, self._parse_resp(body)
def delete_service(self, service_id):
@@ -275,13 +275,20 @@
return resp, body['access']
- def request(self, method, url, headers=None, body=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None):
"""A simple HTTP request interface."""
if headers is None:
# Always accept 'json', for TokenClientXML too.
# Because XML response is not easily
# converted to the corresponding JSON one
headers = self.get_headers(accept_type="json")
+ elif extra_headers:
+ try:
+ headers.update(self.get_headers(accept_type="json"))
+ except (ValueError, TypeError):
+ headers = self.get_headers(accept_type="json")
+
resp, resp_body = self.http_obj.request(url, method,
headers=headers, body=body)
self._log_request(method, url, resp)
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 4b530f1..6829333 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -163,6 +163,12 @@
body = json.loads(body)
return resp, body['role']
+ def list_roles(self):
+ """Get the list of Roles."""
+ resp, body = self.get("roles")
+ body = json.loads(body)
+ return resp, body['roles']
+
def update_role(self, name, role_id):
"""Create a Role."""
post_body = {
@@ -515,13 +521,20 @@
resp, body = self.post(self.auth_url, body=body)
return resp, body
- def request(self, method, url, headers=None, body=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None):
"""A simple HTTP request interface."""
if headers is None:
# Always accept 'json', for xml token client too.
# Because XML response is not easily
# converted to the corresponding JSON one
headers = self.get_headers(accept_type="json")
+ elif extra_headers:
+ try:
+ headers.update(self.get_headers(accept_type="json"))
+ except (ValueError, TypeError):
+ headers = self.get_headers(accept_type="json")
+
resp, resp_body = self.http_obj.request(url, method,
headers=headers, body=body)
self._log_request(method, url, resp)
diff --git a/tempest/services/identity/v3/xml/endpoints_client.py b/tempest/services/identity/v3/xml/endpoints_client.py
index 93dc3dc..6490e34 100644
--- a/tempest/services/identity/v3/xml/endpoints_client.py
+++ b/tempest/services/identity/v3/xml/endpoints_client.py
@@ -46,12 +46,19 @@
json = common.xml_to_json(body)
return json
- def request(self, method, url, headers=None, body=None, wait=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None, wait=None):
"""Overriding the existing HTTP request in super class RestClient."""
+ if extra_headers:
+ try:
+ headers.update(self.get_headers())
+ except (ValueError, TypeError):
+ headers = self.get_headers()
dscv = CONF.identity.disable_ssl_certificate_validation
self.http_obj = http.ClosingHttp(
disable_ssl_certificate_validation=dscv)
return super(EndPointClientXML, self).request(method, url,
+ extra_headers,
headers=headers,
body=body)
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index c49f361..35295d7 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -217,6 +217,12 @@
body = self._parse_body(etree.fromstring(body))
return resp, body
+ def list_roles(self):
+ """Get the list of Roles."""
+ resp, body = self.get("roles")
+ body = self._parse_roles(etree.fromstring(body))
+ return resp, body
+
def update_role(self, name, role_id):
"""Updates a Role."""
post_body = common.Element("role",
@@ -516,13 +522,19 @@
resp, body = self.post(self.auth_url, body=str(common.Document(auth)))
return resp, body
- def request(self, method, url, headers=None, body=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None):
"""A simple HTTP request interface."""
if headers is None:
# Always accept 'json', for xml token client too.
# Because XML response is not easily
# converted to the corresponding JSON one
headers = self.get_headers(accept_type="json")
+ elif extra_headers:
+ try:
+ headers.update(self.get_headers(accept_type="json"))
+ except (ValueError, TypeError):
+ headers = self.get_headers(accept_type="json")
resp, resp_body = self.http_obj.request(url, method,
headers=headers, body=body)
self._log_request(method, url, resp)
diff --git a/tempest/services/identity/v3/xml/policy_client.py b/tempest/services/identity/v3/xml/policy_client.py
index e903089..73d831b 100644
--- a/tempest/services/identity/v3/xml/policy_client.py
+++ b/tempest/services/identity/v3/xml/policy_client.py
@@ -46,12 +46,19 @@
json = common.xml_to_json(body)
return json
- def request(self, method, url, headers=None, body=None, wait=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None, wait=None):
"""Overriding the existing HTTP request in super class RestClient."""
+ if extra_headers:
+ try:
+ headers.update(self.get_headers())
+ except (ValueError, TypeError):
+ headers = self.get_headers()
dscv = CONF.identity.disable_ssl_certificate_validation
self.http_obj = http.ClosingHttp(
disable_ssl_certificate_validation=dscv)
return super(PolicyClientXML, self).request(method, url,
+ extra_headers,
headers=headers,
body=body)
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index e21abe1..34c61b0 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -31,12 +31,15 @@
'vpnservices': 'vpn',
'ikepolicies': 'vpn',
'metering_labels': 'metering',
- 'metering_label_rules': 'metering'
+ 'metering_label_rules': 'metering',
+ 'firewall_rules': 'fw',
+ 'firewall_policies': 'fw',
+ 'firewalls': 'fw'
}
# The following list represents resource names that do not require
# changing underscore to a hyphen
-hyphen_exceptions = ["health_monitors"]
+hyphen_exceptions = ["health_monitors", "firewall_rules", "firewall_policies"]
# map from resource name to a plural name
# needed only for those which can't be constructed as name + 's'
@@ -44,7 +47,8 @@
'security_groups': 'security_groups',
'security_group_rules': 'security_group_rules',
'ikepolicy': 'ikepolicies',
- 'quotas': 'quotas'
+ 'quotas': 'quotas',
+ 'firewall_policy': 'firewall_policies'
}
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
index 0945b09..a9d4880 100644
--- a/tempest/services/network/xml/network_client.py
+++ b/tempest/services/network/xml/network_client.py
@@ -24,7 +24,7 @@
# list of plurals used for xml serialization
PLURALS = ['dns_nameservers', 'host_routes', 'allocation_pools',
'fixed_ips', 'extensions', 'extra_dhcp_opts', 'pools',
- 'health_monitors', 'vips']
+ 'health_monitors', 'vips', 'members']
def get_rest_client(self, auth_provider):
rc = rest_client.RestClient(auth_provider)
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index 6e7910e..a0506f2 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -162,11 +162,17 @@
self.service = CONF.object_storage.catalog_type
self.format = 'json'
- def request(self, method, url, headers=None, body=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None):
"""A simple HTTP request interface."""
self.http_obj = http.ClosingHttp()
if headers is None:
headers = {}
+ elif extra_headers:
+ try:
+ headers.update(self.get_headers())
+ except (ValueError, TypeError):
+ headers = {}
# Authorize the request
req_url, req_headers, req_body = self.auth_provider.auth_request(
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index 49f7f49..53a3325 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -29,12 +29,16 @@
self.service = CONF.object_storage.catalog_type
- def create_object(self, container, object_name, data, params=None):
+ def create_object(self, container, object_name, data,
+ params=None, metadata=None):
"""Create storage object."""
headers = self.get_headers()
if not data:
headers['content-length'] = '0'
+ if metadata:
+ for key in metadata:
+ headers[str(key)] = metadata[key]
url = "%s/%s" % (str(container), str(object_name))
if params:
url += '?%s' % urllib.urlencode(params)
@@ -146,13 +150,19 @@
self.service = CONF.object_storage.catalog_type
self.format = 'json'
- def request(self, method, url, headers=None, body=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None):
"""A simple HTTP request interface."""
dscv = CONF.identity.disable_ssl_certificate_validation
self.http_obj = http.ClosingHttp(
disable_ssl_certificate_validation=dscv)
if headers is None:
headers = {}
+ elif extra_headers:
+ try:
+ headers.update(self.get_headers())
+ except (ValueError, TypeError):
+ headers = {}
# Authorize the request
req_url, req_headers, req_body = self.auth_provider.auth_request(
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 113003c..2311bdd 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -154,7 +154,8 @@
if resource_status == status:
return
if fail_regexp.search(resource_status):
- raise exceptions.StackBuildErrorException(
+ raise exceptions.StackResourceBuildErrorException(
+ resource_name=resource_name,
stack_identifier=stack_identifier,
resource_status=resource_status,
resource_status_reason=body['resource_status_reason'])
diff --git a/tempest/services/volume/json/volumes_client.py b/tempest/services/volume/json/volumes_client.py
index e4d2e8d..b55a037 100644
--- a/tempest/services/volume/json/volumes_client.py
+++ b/tempest/services/volume/json/volumes_client.py
@@ -67,10 +67,10 @@
body = json.loads(body)
return resp, body['volume']
- def create_volume(self, size, **kwargs):
+ def create_volume(self, size=None, **kwargs):
"""
Creates a new Volume.
- size(Required): Size of volume in GB.
+ size: Size of volume in GB.
Following optional keyword arguments are accepted:
display_name: Optional Volume Name.
metadata: A dictionary of values to be used as metadata.
@@ -78,6 +78,10 @@
snapshot_id: When specified the volume is created from this snapshot
imageRef: When specified the volume is created from this image
"""
+ # for bug #1293885:
+ # If no size specified, read volume size from CONF
+ if size is None:
+ size = CONF.volume.volume_size
post_body = {'size': size}
post_body.update(kwargs)
post_body = json.dumps({'volume': post_body})
diff --git a/tempest/services/volume/v2/json/volumes_client.py b/tempest/services/volume/v2/json/volumes_client.py
index 5bfa75f..df20a2a 100644
--- a/tempest/services/volume/v2/json/volumes_client.py
+++ b/tempest/services/volume/v2/json/volumes_client.py
@@ -68,10 +68,10 @@
body = json.loads(body)
return resp, body['volume']
- def create_volume(self, size, **kwargs):
+ def create_volume(self, size=None, **kwargs):
"""
Creates a new Volume.
- size(Required): Size of volume in GB.
+ size: Size of volume in GB.
Following optional keyword arguments are accepted:
name: Optional Volume Name.
metadata: A dictionary of values to be used as metadata.
@@ -79,6 +79,10 @@
snapshot_id: When specified the volume is created from this snapshot
imageRef: When specified the volume is created from this image
"""
+ # for bug #1293885:
+ # If no size specified, read volume size from CONF
+ if size is None:
+ size = CONF.volume.volume_size
post_body = {'size': size}
post_body.update(kwargs)
post_body = json.dumps({'volume': post_body})
diff --git a/tempest/services/volume/v2/xml/volumes_client.py b/tempest/services/volume/v2/xml/volumes_client.py
index e735a65..1fdaf19 100644
--- a/tempest/services/volume/v2/xml/volumes_client.py
+++ b/tempest/services/volume/v2/xml/volumes_client.py
@@ -117,10 +117,10 @@
body = self._check_if_bootable(body)
return resp, body
- def create_volume(self, size, **kwargs):
+ def create_volume(self, size=None, **kwargs):
"""Creates a new Volume.
- :param size: Size of volume in GB. (Required)
+ :param size: Size of volume in GB.
:param name: Optional Volume Name.
:param metadata: An optional dictionary of values for metadata.
:param volume_type: Optional Name of volume_type for the volume
@@ -129,6 +129,10 @@
:param imageRef: When specified the volume is created from this
image
"""
+ # for bug #1293885:
+ # If no size specified, read volume size from CONF
+ if size is None:
+ size = CONF.volume.volume_size
# NOTE(afazekas): it should use a volume namespace
volume = common.Element("volume", xmlns=common.XMLNS_11, size=size)
diff --git a/tempest/services/volume/xml/volumes_client.py b/tempest/services/volume/xml/volumes_client.py
index 6866dad..65bc321 100644
--- a/tempest/services/volume/xml/volumes_client.py
+++ b/tempest/services/volume/xml/volumes_client.py
@@ -118,10 +118,10 @@
body = self._check_if_bootable(body)
return resp, body
- def create_volume(self, size, **kwargs):
+ def create_volume(self, size=None, **kwargs):
"""Creates a new Volume.
- :param size: Size of volume in GB. (Required)
+ :param size: Size of volume in GB.
:param display_name: Optional Volume Name.
:param metadata: An optional dictionary of values for metadata.
:param volume_type: Optional Name of volume_type for the volume
@@ -130,6 +130,10 @@
:param imageRef: When specified the volume is created from this
image
"""
+ # for bug #1293885:
+ # If no size specified, read volume size from CONF
+ if size is None:
+ size = CONF.volume.volume_size
# NOTE(afazekas): it should use a volume namespace
volume = common.Element("volume", xmlns=common.XMLNS_11, size=size)
diff --git a/tempest/test.py b/tempest/test.py
index e4019f9..8df405c 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -94,6 +94,7 @@
service_list = {
'compute': CONF.service_available.nova,
'image': CONF.service_available.glance,
+ 'baremetal': CONF.service_available.ironic,
'volume': CONF.service_available.cinder,
'orchestration': CONF.service_available.heat,
# NOTE(mtreinish) nova-network will provide networking functionality
diff --git a/tempest/tests/common/test_debug.py b/tempest/tests/common/test_debug.py
index cd9936c..8a880f2 100644
--- a/tempest/tests/common/test_debug.py
+++ b/tempest/tests/common/test_debug.py
@@ -53,15 +53,14 @@
self.useFixture(mockpatch.PatchObject(test.CONF.debug,
'enable', True))
- tables = ['filter', 'nat', 'mangle']
self.ip_ns_list_mock.return_value = [1, 2]
debug.log_ip_ns()
self.ip_addr_raw_mock.assert_called_with()
self.assertTrue(self.log_mock.info.called)
self.ip_route_raw_mock.assert_called_with()
- self.assertEqual(len(tables), self.iptables_raw_mock.call_count)
- for table in tables:
+ self.assertEqual(len(debug.TABLES), self.iptables_raw_mock.call_count)
+ for table in debug.TABLES:
self.assertIn(mock.call(table),
self.iptables_raw_mock.call_args_list)
@@ -76,10 +75,11 @@
self.assertIn(mock.call(ns),
self.ip_ns_route_mock.call_args_list)
- self.assertEqual(len(tables) * len(self.ip_ns_list_mock.return_value),
+ self.assertEqual(len(debug.TABLES) *
+ len(self.ip_ns_list_mock.return_value),
self.iptables_ns_mock.call_count)
for ns in self.ip_ns_list_mock.return_value:
- for table in tables:
+ for table in debug.TABLES:
self.assertIn(mock.call(ns, table),
self.iptables_ns_mock.call_args_list)
diff --git a/tempest/tests/test_commands.py b/tempest/tests/test_commands.py
new file mode 100644
index 0000000..bdb9269
--- /dev/null
+++ b/tempest/tests/test_commands.py
@@ -0,0 +1,87 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import subprocess
+
+from tempest.common import commands
+from tempest.tests import base
+
+
+class TestCommands(base.TestCase):
+
+ def setUp(self):
+ super(TestCommands, self).setUp()
+ self.subprocess_args = {'stdout': subprocess.PIPE,
+ 'stderr': subprocess.STDOUT}
+
+ @mock.patch('subprocess.Popen')
+ def test_ip_addr_raw(self, mock):
+ expected = ['/usr/bin/sudo', '-n', 'ip', 'a']
+ commands.ip_addr_raw()
+ mock.assert_called_once_with(expected, **self.subprocess_args)
+
+ @mock.patch('subprocess.Popen')
+ def test_ip_route_raw(self, mock):
+ expected = ['/usr/bin/sudo', '-n', 'ip', 'r']
+ commands.ip_route_raw()
+ mock.assert_called_once_with(expected, **self.subprocess_args)
+
+ @mock.patch('subprocess.Popen')
+ def test_ip_ns_raw(self, mock):
+ expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'list']
+ commands.ip_ns_raw()
+ mock.assert_called_once_with(expected, **self.subprocess_args)
+
+ @mock.patch('subprocess.Popen')
+ def test_iptables_raw(self, mock):
+ table = 'filter'
+ expected = ['/usr/bin/sudo', '-n', 'iptables', '-v', '-S', '-t',
+ '%s' % table]
+ commands.iptables_raw(table)
+ mock.assert_called_once_with(expected, **self.subprocess_args)
+
+ @mock.patch('subprocess.Popen')
+ def test_ip_ns_list(self, mock):
+ expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'list']
+ commands.ip_ns_list()
+ mock.assert_called_once_with(expected, **self.subprocess_args)
+
+ @mock.patch('subprocess.Popen')
+ def test_ip_ns_addr(self, mock):
+ ns_list = commands.ip_ns_list()
+ for ns in ns_list:
+ expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
+ 'ip', 'a']
+ commands.ip_ns_addr(ns)
+ mock.assert_called_once_with(expected, **self.subprocess_args)
+
+ @mock.patch('subprocess.Popen')
+ def test_ip_ns_route(self, mock):
+ ns_list = commands.ip_ns_list()
+ for ns in ns_list:
+ expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
+ 'ip', 'r']
+ commands.ip_ns_route(ns)
+ mock.assert_called_once_with(expected, **self.subprocess_args)
+
+ @mock.patch('subprocess.Popen')
+ def test_iptables_ns(self, mock):
+ table = 'filter'
+ ns_list = commands.ip_ns_list()
+ for ns in ns_list:
+ expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
+ 'iptables', '-v', '-S', '-t', table]
+ commands.iptables_ns(ns, table)
+ mock.assert_called_once_with(expected, **self.subprocess_args)
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index ebf0ca0..804204a 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -13,6 +13,7 @@
# under the License.
+import mock
import testtools
from oslo.config import cfg
@@ -232,3 +233,19 @@
self._test_requires_ext_helper,
extension='enabled_ext',
service='bad_service')
+
+
+class TestSimpleNegativeDecorator(BaseDecoratorsTest):
+ @test.SimpleNegativeAutoTest
+ class FakeNegativeJSONTest(test.NegativeAutoTest):
+ _schema_file = 'fake/schemas/file.json'
+
+ def test_testfunc_exist(self):
+ self.assertIn("test_fake_negative", dir(self.FakeNegativeJSONTest))
+
+ @mock.patch('tempest.test.NegativeAutoTest.execute')
+ def test_testfunc_calls_execute(self, mock):
+ obj = self.FakeNegativeJSONTest("test_fake_negative")
+ self.assertIn("test_fake_negative", dir(obj))
+ obj.test_fake_negative()
+ mock.assert_called_once_with(self.FakeNegativeJSONTest._schema_file)
diff --git a/tempest/tests/test_rest_client.py b/tempest/tests/test_rest_client.py
index cfbb37d..64ad3bc 100644
--- a/tempest/tests/test_rest_client.py
+++ b/tempest/tests/test_rest_client.py
@@ -139,6 +139,102 @@
self._verify_headers(resp)
+class TestRestClientUpdateHeaders(BaseRestClientTestClass):
+ def setUp(self):
+ self.fake_http = fake_http.fake_httplib2()
+ super(TestRestClientUpdateHeaders, self).setUp()
+ self.useFixture(mockpatch.PatchObject(self.rest_client,
+ '_error_checker'))
+ self.headers = {'X-Configuration-Session': 'session_id'}
+
+ def test_post_update_headers(self):
+ __, return_dict = self.rest_client.post(self.url, {},
+ extra_headers=True,
+ headers=self.headers)
+
+ self.assertDictContainsSubset(
+ {'X-Configuration-Session': 'session_id',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'},
+ return_dict['headers']
+ )
+
+ def test_get_update_headers(self):
+ __, return_dict = self.rest_client.get(self.url,
+ extra_headers=True,
+ headers=self.headers)
+
+ self.assertDictContainsSubset(
+ {'X-Configuration-Session': 'session_id',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'},
+ return_dict['headers']
+ )
+
+ def test_delete_update_headers(self):
+ __, return_dict = self.rest_client.delete(self.url,
+ extra_headers=True,
+ headers=self.headers)
+
+ self.assertDictContainsSubset(
+ {'X-Configuration-Session': 'session_id',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'},
+ return_dict['headers']
+ )
+
+ def test_patch_update_headers(self):
+ __, return_dict = self.rest_client.patch(self.url, {},
+ extra_headers=True,
+ headers=self.headers)
+
+ self.assertDictContainsSubset(
+ {'X-Configuration-Session': 'session_id',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'},
+ return_dict['headers']
+ )
+
+ def test_put_update_headers(self):
+ __, return_dict = self.rest_client.put(self.url, {},
+ extra_headers=True,
+ headers=self.headers)
+
+ self.assertDictContainsSubset(
+ {'X-Configuration-Session': 'session_id',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'},
+ return_dict['headers']
+ )
+
+ def test_head_update_headers(self):
+ self.useFixture(mockpatch.PatchObject(self.rest_client,
+ 'response_checker'))
+
+ __, return_dict = self.rest_client.head(self.url,
+ extra_headers=True,
+ headers=self.headers)
+
+ self.assertDictContainsSubset(
+ {'X-Configuration-Session': 'session_id',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'},
+ return_dict['headers']
+ )
+
+ def test_copy_update_headers(self):
+ __, return_dict = self.rest_client.copy(self.url,
+ extra_headers=True,
+ headers=self.headers)
+
+ self.assertDictContainsSubset(
+ {'X-Configuration-Session': 'session_id',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'},
+ return_dict['headers']
+ )
+
+
class TestRestClientHeadersXML(TestRestClientHeadersJSON):
TYPE = "xml"
diff --git a/tools/check_logs.py b/tools/check_logs.py
index b5b1780..bc4eaca 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -46,7 +46,6 @@
'n-api',
'n-cpu',
'n-net',
- 'n-sch',
'q-agt',
'q-dhcp',
'q-lbaas',
diff --git a/tools/verify_tempest_config.py b/tools/verify_tempest_config.py
index aa92c0b..30785c4 100755
--- a/tools/verify_tempest_config.py
+++ b/tools/verify_tempest_config.py
@@ -16,6 +16,7 @@
import json
import sys
+import urlparse
import httplib2
@@ -39,19 +40,37 @@
not CONF.image_feature_enabled.api_v2))
-def verify_nova_api_versions(os):
- # Check nova api versions - only get base URL without PATH
- os.servers_client.skip_path = True
- # The nova base endpoint url includes the version but to get the versions
- # list the unversioned endpoint is needed
- v2_endpoint = os.servers_client.base_url
- v2_endpoint_parts = v2_endpoint.split('/')
- endpoint = v2_endpoint_parts[0] + '//' + v2_endpoint_parts[2]
+def _get_api_versions(os, service):
+ client_dict = {
+ 'nova': os.servers_client,
+ 'keystone': os.identity_client,
+ }
+ client_dict[service].skip_path()
+ endpoint_parts = urlparse.urlparse(client_dict[service])
+ endpoint = endpoint_parts.scheme + '//' + endpoint_parts.netloc
__, body = RAW_HTTP.request(endpoint, 'GET')
+ client_dict[service].reset_path()
body = json.loads(body)
- # Restore full base_url
- os.servers_client.skip_path = False
- versions = map(lambda x: x['id'], body['versions'])
+ if service == 'keystone':
+ versions = map(lambda x: x['id'], body['versions']['values'])
+ else:
+ versions = map(lambda x: x['id'], body['versions'])
+ return versions
+
+
+def verify_keystone_api_versions(os):
+ # Check keystone api versions
+ versions = _get_api_versions(os, 'keystone')
+ if CONF.identity_feature_enabled.api_v2 != ('v2.0' in versions):
+ print('Config option identity api_v2 should be change to %s' % (
+ not CONF.identity_feature_enabled.api_v2))
+ if CONF.identity_feature_enabled.api_v3 != ('v3.0' in versions):
+ print('Config option identity api_v3 should be change to %s' % (
+ not CONF.identity_feature_enabled.api_v3))
+
+
+def verify_nova_api_versions(os):
+ versions = _get_api_versions(os, 'nova')
if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
print('Config option compute api_v3 should be change to: %s' % (
not CONF.compute_feature_enabled.api_v3))
@@ -197,6 +216,7 @@
elif service not in services:
continue
results = verify_extensions(os, service, results)
+ verify_keystone_api_versions(os)
verify_glance_api_versions(os)
verify_nova_api_versions(os)
display_results(results)
diff --git a/tox.ini b/tox.ini
index 4a625f8..5e8d283 100644
--- a/tox.ini
+++ b/tox.ini
@@ -54,7 +54,7 @@
setenv = OS_TEST_TIMEOUT=1200
# The regex below is used to select heat api/scenario tests tagged as slow.
commands =
- bash tools/pretty_tox_serial.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
+ bash tools/pretty_tox.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
[testenv:large-ops]
sitepackages = True