Merge "Cleanup for local variable in test_attach_volume"
diff --git a/README.rst b/README.rst
index 9daf873..4393ae9 100644
--- a/README.rst
+++ b/README.rst
@@ -7,7 +7,7 @@
deployment.
Design Principles
-----------
+-----------------
Tempest Design Principles that we strive to live by.
- Tempest should be able to run against any OpenStack cloud, be it a
@@ -127,6 +127,6 @@
of tempest when running with Python 2.6. Additionally, to enable testr to work
with tempest using python 2.6 the discover module from the unittest-ext
project has to be patched to switch the unittest.TestSuite to use
-unittest2.TestSuite instead. See::
+unittest2.TestSuite instead. See:
https://code.google.com/p/unittest-ext/issues/detail?id=79
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 1c32b9c..c45273e 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -33,14 +33,6 @@
field_guide/thirdparty
field_guide/unit_tests
-------------------
-API and test cases
-------------------
-.. toctree::
- :maxdepth: 1
-
- api/modules
-
==================
Indices and tables
==================
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index a744339..9f2f924 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -711,6 +711,10 @@
# (integer value)
#build_interval=1
+# List of dns servers whichs hould be used for subnet creation
+# (list value)
+#dns_servers=8.8.8.8,8.8.4.4
+
[network-feature-enabled]
diff --git a/tempest/api/data_processing/test_cluster_templates.py b/tempest/api/data_processing/test_cluster_templates.py
new file mode 100644
index 0000000..c08d6ba
--- /dev/null
+++ b/tempest/api/data_processing/test_cluster_templates.py
@@ -0,0 +1,146 @@
+# Copyright (c) 2014 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.data_processing import base as dp_base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class ClusterTemplateTest(dp_base.BaseDataProcessingTest):
+ """Link to the API documentation is http://docs.openstack.org/developer/
+ sahara/restapi/rest_api_v1.0.html#cluster-templates
+ """
+ @classmethod
+ def setUpClass(cls):
+ super(ClusterTemplateTest, cls).setUpClass()
+ # create node group template
+ node_group_template = {
+ 'name': data_utils.rand_name('sahara-ng-template'),
+ 'description': 'Test node group template',
+ 'plugin_name': 'vanilla',
+ 'hadoop_version': '1.2.1',
+ 'node_processes': ['datanode'],
+ 'flavor_id': cls.flavor_ref,
+ 'node_configs': {
+ 'HDFS': {
+ 'Data Node Heap Size': 1024
+ }
+ }
+ }
+ resp_body = cls.create_node_group_template(**node_group_template)[1]
+
+ cls.full_cluster_template = {
+ 'description': 'Test cluster template',
+ 'plugin_name': 'vanilla',
+ 'hadoop_version': '1.2.1',
+ 'cluster_configs': {
+ 'HDFS': {
+ 'dfs.replication': 2
+ },
+ 'MapReduce': {
+ 'mapred.map.tasks.speculative.execution': False,
+ 'mapred.child.java.opts': '-Xmx500m'
+ },
+ 'general': {
+ 'Enable Swift': False
+ }
+ },
+ 'node_groups': [
+ {
+ 'name': 'master-node',
+ 'flavor_id': cls.flavor_ref,
+ 'node_processes': ['namenode'],
+ 'count': 1
+ },
+ {
+ 'name': 'worker-node',
+ 'node_group_template_id': resp_body['id'],
+ 'count': 3
+ }
+ ]
+ }
+ # create cls.cluster_template variable to use for comparison to cluster
+ # template response body. The 'node_groups' field in the response body
+ # has some extra info that post body does not have. The 'node_groups'
+ # field in the response body is something like this
+ #
+ # 'node_groups': [
+ # {
+ # 'count': 3,
+ # 'name': 'worker-node',
+ # 'volume_mount_prefix': '/volumes/disk',
+ # 'created_at': '2014-05-21 14:31:37',
+ # 'updated_at': None,
+ # 'floating_ip_pool': None,
+ # ...
+ # },
+ # ...
+ # ]
+ cls.cluster_template = cls.full_cluster_template.copy()
+ del cls.cluster_template['node_groups']
+
+ def _create_cluster_template(self, template_name=None):
+ """Creates Cluster Template with optional name specified.
+
+ It creates template and ensures response status, template name and
+ response body. Returns id and name of created template.
+ """
+ if not template_name:
+ # generate random name if it's not specified
+ template_name = data_utils.rand_name('sahara-cluster-template')
+
+ # create cluster template
+ resp, body = self.create_cluster_template(template_name,
+ **self.full_cluster_template)
+
+ # ensure that template created successfully
+ self.assertEqual(202, resp.status)
+ self.assertEqual(template_name, body['name'])
+ self.assertDictContainsSubset(self.cluster_template, body)
+
+ return body['id'], template_name
+
+ @test.attr(type='smoke')
+ def test_cluster_template_create(self):
+ self._create_cluster_template()
+
+ @test.attr(type='smoke')
+ def test_cluster_template_list(self):
+ template_info = self._create_cluster_template()
+
+ # check for cluster template in list
+ resp, templates = self.client.list_cluster_templates()
+ self.assertEqual(200, resp.status)
+ templates_info = [(template['id'], template['name'])
+ for template in templates]
+ self.assertIn(template_info, templates_info)
+
+ @test.attr(type='smoke')
+ def test_cluster_template_get(self):
+ template_id, template_name = self._create_cluster_template()
+
+ # check cluster template fetch by id
+ resp, template = self.client.get_cluster_template(template_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(template_name, template['name'])
+ self.assertDictContainsSubset(self.cluster_template, template)
+
+ @test.attr(type='smoke')
+ def test_cluster_template_delete(self):
+ template_id = self._create_cluster_template()[0]
+
+ # delete the cluster template by id
+ resp = self.client.delete_cluster_template(template_id)[0]
+ self.assertEqual(204, resp.status)
+ #TODO(ylobankov): check that cluster template is really deleted
diff --git a/tempest/api/data_processing/test_job_binary_internals.py b/tempest/api/data_processing/test_job_binary_internals.py
new file mode 100644
index 0000000..6d59177
--- /dev/null
+++ b/tempest/api/data_processing/test_job_binary_internals.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2014 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.data_processing import base as dp_base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class JobBinaryInternalTest(dp_base.BaseDataProcessingTest):
+ """Link to the API documentation is http://docs.openstack.org/developer/
+ sahara/restapi/rest_api_v1.1_EDP.html#job-binary-internals
+ """
+ @classmethod
+ def setUpClass(cls):
+ super(JobBinaryInternalTest, cls).setUpClass()
+ cls.job_binary_internal_data = 'Some script may be data'
+
+ def _create_job_binary_internal(self, binary_name=None):
+ """Creates Job Binary Internal with optional name specified.
+
+ It puts data into Sahara database and ensures response status and
+ job binary internal name. Returns id and name of created job binary
+ internal.
+ """
+ if not binary_name:
+ # generate random name if it's not specified
+ binary_name = data_utils.rand_name('sahara-job-binary-internal')
+
+ # create job binary internal
+ resp, body = self.create_job_binary_internal(
+ binary_name, self.job_binary_internal_data)
+
+ # ensure that job binary internal created successfully
+ self.assertEqual(202, resp.status)
+ self.assertEqual(binary_name, body['name'])
+
+ return body['id'], binary_name
+
+ @test.attr(type='smoke')
+ def test_job_binary_internal_create(self):
+ self._create_job_binary_internal()
+
+ @test.attr(type='smoke')
+ def test_job_binary_internal_list(self):
+ binary_info = self._create_job_binary_internal()
+
+ # check for job binary internal in list
+ resp, binaries = self.client.list_job_binary_internals()
+ self.assertEqual(200, resp.status)
+ binaries_info = [(binary['id'], binary['name']) for binary in binaries]
+ self.assertIn(binary_info, binaries_info)
+
+ @test.attr(type='smoke')
+ def test_job_binary_internal_get(self):
+ binary_id, binary_name = self._create_job_binary_internal()
+
+ # check job binary internal fetch by id
+ resp, binary = self.client.get_job_binary_internal(binary_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(binary_name, binary['name'])
+
+ @test.attr(type='smoke')
+ def test_job_binary_internal_delete(self):
+ binary_id = self._create_job_binary_internal()[0]
+
+ # delete the job binary internal by id
+ resp = self.client.delete_job_binary_internal(binary_id)[0]
+ self.assertEqual(204, resp.status)
+
+ @test.attr(type='smoke')
+ def test_job_binary_internal_get_data(self):
+ binary_id = self._create_job_binary_internal()[0]
+
+ # get data of job binary internal by id
+ resp, data = self.client.get_job_binary_internal_data(binary_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(data, self.job_binary_internal_data)
diff --git a/tempest/api/identity/test_extension.py b/tempest/api/identity/test_extension.py
new file mode 100644
index 0000000..67f20f4
--- /dev/null
+++ b/tempest/api/identity/test_extension.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest import test
+
+
+class ExtensionTestJSON(base.BaseIdentityV2AdminTest):
+ _interface = 'json'
+
+ @test.attr(type='gate')
+ def test_list_extensions(self):
+ # List all the extensions
+ resp, body = self.non_admin_client.list_extensions()
+ self.assertEqual(200, resp.status)
+ self.assertNotEmpty(body)
+ keys = ['name', 'updated', 'alias', 'links',
+ 'namespace', 'description']
+ for value in body:
+ for key in keys:
+ self.assertIn(key, value)
+
+
+class ExtensionTestXML(ExtensionTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index dcd9bff..0ccddcf 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -81,9 +81,13 @@
cls.metering_label_rules = []
cls.fw_rules = []
cls.fw_policies = []
+ cls.ipsecpolicies = []
@classmethod
def tearDownClass(cls):
+ # Clean up ipsec policies
+ for ipsecpolicy in cls.ipsecpolicies:
+ cls.client.delete_ipsecpolicy(ipsecpolicy['id'])
# Clean up firewall policies
for fw_policy in cls.fw_policies:
cls.client.delete_firewall_policy(fw_policy['id'])
@@ -342,6 +346,14 @@
router['id'], i['fixed_ips'][0]['subnet_id'])
cls.client.delete_router(router['id'])
+ @classmethod
+ def create_ipsecpolicy(cls, name):
+ """Wrapper utility that returns a test ipsec policy."""
+ _, body = cls.client.create_ipsecpolicy(name=name)
+ ipsecpolicy = body['ipsecpolicy']
+ cls.ipsecpolicies.append(ipsecpolicy)
+ return ipsecpolicy
+
class BaseAdminNetworkTest(BaseNetworkTest):
diff --git a/tempest/api/network/test_vpnaas_extensions.py b/tempest/api/network/test_vpnaas_extensions.py
index a49e944..d1fe15c 100644
--- a/tempest/api/network/test_vpnaas_extensions.py
+++ b/tempest/api/network/test_vpnaas_extensions.py
@@ -16,6 +16,7 @@
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import config
+from tempest import exceptions
from tempest import test
CONF = config.CONF
@@ -53,6 +54,8 @@
cls.router['id'])
cls.ikepolicy = cls.create_ikepolicy(
data_utils.rand_name("ike-policy-"))
+ cls.ipsecpolicy = cls.create_ipsecpolicy(
+ data_utils.rand_name("ipsec-policy-"))
def _delete_ike_policy(self, ike_policy_id):
# Deletes a ike policy and verifies if it is deleted or not
@@ -70,6 +73,20 @@
ike_id_list.append(i['id'])
self.assertNotIn(ike_policy_id, ike_id_list)
+ def _delete_ipsec_policy(self, ipsec_policy_id):
+ # Deletes an ike policy if it exists
+ try:
+ self.client.delete_ipsecpolicy(ipsec_policy_id)
+
+ except exceptions.NotFound:
+ pass
+
+ def _assertExpected(self, expected, actual):
+ # Check if not expected keys/values exists in actual response body
+ for key, value in expected.iteritems():
+ self.assertIn(key, actual)
+ self.assertEqual(value, actual[key])
+
@test.attr(type='smoke')
def test_list_vpn_services(self):
# Verify the VPN service exists in the list of all VPN services
@@ -177,6 +194,51 @@
self.assertEqual(self.ikepolicy['ike_version'],
ikepolicy['ike_version'])
+ @test.attr(type='smoke')
+ def test_list_ipsec_policies(self):
+ # Verify the ipsec policy exists in the list of all ipsec policies
+ resp, body = self.client.list_ipsecpolicies()
+ self.assertEqual('200', resp['status'])
+ ipsecpolicies = body['ipsecpolicies']
+ self.assertIn(self.ipsecpolicy['id'], [i['id'] for i in ipsecpolicies])
+
+ @test.attr(type='smoke')
+ def test_create_update_delete_ipsec_policy(self):
+ # Creates an ipsec policy
+ ipsec_policy_body = {'name': data_utils.rand_name('ipsec-policy'),
+ 'pfs': 'group5',
+ 'encryption_algorithm': "aes-128",
+ 'auth_algorithm': 'sha1'}
+ resp, resp_body = self.client.create_ipsecpolicy(**ipsec_policy_body)
+ self.assertEqual('201', resp['status'])
+ ipsecpolicy = resp_body['ipsecpolicy']
+ self.addCleanup(self._delete_ipsec_policy, ipsecpolicy['id'])
+ self._assertExpected(ipsec_policy_body, ipsecpolicy)
+ # Verification of ipsec policy update
+ new_ipsec = {'description': 'Updated ipsec policy',
+ 'pfs': 'group2',
+ 'name': data_utils.rand_name("New-IPSec"),
+ 'encryption_algorithm': "aes-256",
+ 'lifetime': {'units': "seconds", 'value': '2000'}}
+ resp, body = self.client.update_ipsecpolicy(ipsecpolicy['id'],
+ **new_ipsec)
+ self.assertEqual('200', resp['status'])
+ updated_ipsec_policy = body['ipsecpolicy']
+ self._assertExpected(new_ipsec, updated_ipsec_policy)
+ # Verification of ipsec policy delete
+ resp, _ = self.client.delete_ipsecpolicy(ipsecpolicy['id'])
+ self.assertEqual('204', resp['status'])
+ self.assertRaises(exceptions.NotFound,
+ self.client.delete_ipsecpolicy, ipsecpolicy['id'])
+
+ @test.attr(type='smoke')
+ def test_show_ipsec_policy(self):
+ # Verifies the details of an ipsec policy
+ resp, body = self.client.show_ipsecpolicy(self.ipsecpolicy['id'])
+ self.assertEqual('200', resp['status'])
+ ipsecpolicy = body['ipsecpolicy']
+ self._assertExpected(self.ipsecpolicy, ipsecpolicy)
+
class VPNaaSTestXML(VPNaaSTestJSON):
_interface = 'xml'
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
index 3e03a30..ffff580 100644
--- a/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
@@ -6,6 +6,7 @@
properties:
size: 1
description: a descriptive description
+ name: volume_name
outputs:
status:
@@ -20,5 +21,8 @@
description: display_description
value: { get_attr: ['volume', 'display_description'] }
+ display_name:
+ value: { get_attr: ['volume', 'display_name'] }
+
volume_id:
value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
index 08e3da4..b660c19 100644
--- a/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
@@ -7,6 +7,7 @@
properties:
size: 1
description: a descriptive description
+ name: volume_name
outputs:
status:
@@ -21,5 +22,8 @@
description: display_description
value: { get_attr: ['volume', 'display_description'] }
+ display_name:
+ value: { get_attr: ['volume', 'display_name'] }
+
volume_id:
value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/neutron_basic.yaml b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
index 63b03f4..878ff68 100644
--- a/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
+++ b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
@@ -8,10 +8,12 @@
type: string
ImageId:
type: string
- ExternalRouterId:
+ SubNetCidr:
type: string
ExternalNetworkId:
type: string
+ DNSServers:
+ type: comma_delimited_list
timeout:
type: number
resources:
@@ -25,21 +27,19 @@
network_id: {Ref: Network}
name: NewSubnet
ip_version: 4
- cidr: 10.0.3.0/24
- dns_nameservers: ["8.8.8.8"]
- allocation_pools:
- - {end: 10.0.3.150, start: 10.0.3.20}
+ cidr: { get_param: SubNetCidr }
+ dns_nameservers: { get_param: DNSServers }
Router:
type: OS::Neutron::Router
properties:
name: NewRouter
- admin_state_up: false
+ admin_state_up: true
external_gateway_info:
network: {get_param: ExternalNetworkId}
RouterInterface:
type: OS::Neutron::RouterInterface
properties:
- router_id: {get_param: ExternalRouterId}
+ router_id: {get_resource: Router}
subnet_id: {get_resource: Subnet}
Server:
type: OS::Nova::Server
@@ -56,8 +56,8 @@
template: |
#!/bin/bash -v
- /opt/aws/bin/cfn-signal -e 0 -r "SmokeServerNeutron created" \
- 'wait_handle'
+ while ! /opt/aws/bin/cfn-signal -e 0 -r "SmokeServerNeutron created" \
+ 'wait_handle' ; do sleep 3; done
params:
wait_handle: {get_resource: WaitHandleNeutron}
WaitHandleNeutron:
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index 3086d78..e92b945 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -12,6 +12,7 @@
import logging
+import netaddr
from tempest.api.orchestration import base
from tempest import clients
@@ -41,9 +42,12 @@
template = cls.load_template('neutron_basic')
cls.keypair_name = (CONF.orchestration.keypair_name or
cls._create_keypair()['name'])
- cls.external_router_id = cls._get_external_router_id()
cls.external_network_id = CONF.network.public_network_id
+ tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
+ mask_bits = CONF.network.tenant_network_mask_bits
+ cls.subnet_cidr = tenant_cidr.subnet(mask_bits).next()
+
# create the stack
cls.stack_identifier = cls.create_stack(
cls.stack_name,
@@ -52,9 +56,10 @@
'KeyName': cls.keypair_name,
'InstanceType': CONF.orchestration.instance_type,
'ImageId': CONF.orchestration.image_ref,
- 'ExternalRouterId': cls.external_router_id,
'ExternalNetworkId': cls.external_network_id,
- 'timeout': CONF.orchestration.build_timeout
+ 'timeout': CONF.orchestration.build_timeout,
+ 'DNSServers': CONF.network.dns_servers,
+ 'SubNetCidr': str(cls.subnet_cidr)
})
cls.stack_id = cls.stack_identifier.split('/')[1]
try:
@@ -77,14 +82,6 @@
for resource in resources:
cls.test_resources[resource['logical_resource_id']] = resource
- @classmethod
- def _get_external_router_id(cls):
- resp, body = cls.network_client.list_ports()
- ports = body['ports']
- router_ports = filter(lambda port: port['device_owner'] ==
- 'network:router_interface', ports)
- return router_ports[0]['device_id']
-
@test.attr(type='slow')
def test_created_resources(self):
"""Verifies created neutron resources."""
@@ -121,11 +118,10 @@
self.assertEqual(subnet_id, subnet['id'])
self.assertEqual(network_id, subnet['network_id'])
self.assertEqual('NewSubnet', subnet['name'])
- self.assertEqual('8.8.8.8', subnet['dns_nameservers'][0])
- self.assertEqual('10.0.3.20', subnet['allocation_pools'][0]['start'])
- self.assertEqual('10.0.3.150', subnet['allocation_pools'][0]['end'])
+ self.assertEqual(sorted(CONF.network.dns_servers),
+ sorted(subnet['dns_nameservers']))
self.assertEqual(4, subnet['ip_version'])
- self.assertEqual('10.0.3.0/24', subnet['cidr'])
+ self.assertEqual(str(self.subnet_cidr), subnet['cidr'])
@test.attr(type='slow')
def test_created_router(self):
@@ -137,18 +133,19 @@
self.assertEqual('NewRouter', router['name'])
self.assertEqual(self.external_network_id,
router['external_gateway_info']['network_id'])
- self.assertEqual(False, router['admin_state_up'])
+ self.assertEqual(True, router['admin_state_up'])
@test.attr(type='slow')
def test_created_router_interface(self):
"""Verifies created router interface."""
+ router_id = self.test_resources.get('Router')['physical_resource_id']
network_id = self.test_resources.get('Network')['physical_resource_id']
subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
resp, body = self.network_client.list_ports()
self.assertEqual('200', resp['status'])
ports = body['ports']
router_ports = filter(lambda port: port['device_id'] ==
- self.external_router_id, ports)
+ router_id, ports)
created_network_ports = filter(lambda port: port['network_id'] ==
network_id, router_ports)
self.assertEqual(1, len(created_network_ports))
@@ -158,7 +155,8 @@
subnet_id, fixed_ips)
self.assertEqual(1, len(subnet_fixed_ips))
router_interface_ip = subnet_fixed_ips[0]['ip_address']
- self.assertEqual('10.0.3.1', router_interface_ip)
+ self.assertEqual(str(self.subnet_cidr.iter_hosts().next()),
+ router_interface_ip)
@test.attr(type='slow')
def test_created_server(self):
@@ -170,8 +168,4 @@
self.assertEqual('ACTIVE', server['status'])
network = server['addresses']['NewNetwork'][0]
self.assertEqual(4, network['version'])
- ip_addr_prefix = network['addr'][:7]
- ip_addr_suffix = int(network['addr'].split('.')[3])
- self.assertEqual('10.0.3.', ip_addr_prefix)
- self.assertTrue(ip_addr_suffix >= 20)
- self.assertTrue(ip_addr_suffix <= 150)
+ self.assertIn(netaddr.IPAddress(network['addr']), self.subnet_cidr)
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
index 2544c41..5ac2a8d 100644
--- a/tempest/api/orchestration/stacks/test_volumes.py
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -39,6 +39,8 @@
self.assertEqual(1, volume.get('size'))
self.assertEqual('a descriptive description',
volume.get('display_description'))
+ self.assertEqual('volume_name',
+ volume.get('display_name'))
def _outputs_verify(self, stack_identifier):
self.assertEqual('available',
@@ -48,6 +50,9 @@
self.assertEqual('a descriptive description',
self.get_stack_output(stack_identifier,
'display_description'))
+ self.assertEqual('volume_name',
+ self.get_stack_output(stack_identifier,
+ 'display_name'))
@test.attr(type='gate')
def test_cinder_volume_create_delete(self):
diff --git a/tempest/api/volume/test_availability_zone.py b/tempest/api/volume/test_availability_zone.py
index 1db7b7b..fe8f96e 100644
--- a/tempest/api/volume/test_availability_zone.py
+++ b/tempest/api/volume/test_availability_zone.py
@@ -22,6 +22,7 @@
"""
Tests Availability Zone API List
"""
+ _interface = 'json'
@classmethod
def setUpClass(cls):
diff --git a/tempest/config.py b/tempest/config.py
index 94a725b..6d9fda6 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -415,6 +415,10 @@
default=1,
help="Time in seconds between network operation status "
"checks."),
+ cfg.ListOpt('dns_servers',
+ default=["8.8.8.8", "8.8.4.4"],
+ help="List of dns servers whichs hould be used"
+ " for subnet creation")
]
network_feature_group = cfg.OptGroup(name='network-feature-enabled',
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index 73e67c3..4465968 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -186,3 +186,9 @@
uri = 'job-binary-internals/%s' % job_binary_id
return self.delete(uri)
+
+ def get_job_binary_internal_data(self, job_binary_id):
+ """Returns data of a single job binary internal."""
+
+ uri = 'job-binary-internals/%s/data' % job_binary_id
+ return self.get(uri)
diff --git a/tempest/services/identity/json/identity_client.py b/tempest/services/identity/json/identity_client.py
index 479a289..b0cab8e 100644
--- a/tempest/services/identity/json/identity_client.py
+++ b/tempest/services/identity/json/identity_client.py
@@ -27,7 +27,8 @@
self.endpoint_url = 'adminURL'
# Needed for xml service client
- self.list_tags = ["roles", "tenants", "users", "services"]
+ self.list_tags = ["roles", "tenants", "users", "services",
+ "extensions"]
def has_admin_extensions(self):
"""
@@ -237,6 +238,12 @@
resp, body = self.put('users/%s/OS-KSADM/password' % user_id, put_body)
return resp, self._parse_resp(body)
+ def list_extensions(self):
+ """List all the extensions."""
+ resp, body = self.get('/extensions')
+ body = json.loads(body)
+ return resp, body['extensions']['values']
+
class TokenClientJSON(IdentityClientJSON):
diff --git a/tempest/services/identity/xml/identity_client.py b/tempest/services/identity/xml/identity_client.py
index b213c1a..886ce7b 100644
--- a/tempest/services/identity/xml/identity_client.py
+++ b/tempest/services/identity/xml/identity_client.py
@@ -127,6 +127,11 @@
str(xml.Document(put_body)))
return resp, self._parse_resp(body)
+ def list_extensions(self):
+ """List all the extensions."""
+ resp, body = self.get('/extensions')
+ return resp, self._parse_resp(body)
+
class TokenClientXML(identity_client.TokenClientJSON):
TYPE = "xml"
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index 2a797b2..81792c4 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -30,6 +30,7 @@
'members': 'lb',
'vpnservices': 'vpn',
'ikepolicies': 'vpn',
+ 'ipsecpolicies': 'vpn',
'metering_labels': 'metering',
'metering_label_rules': 'metering',
'firewall_rules': 'fw',
@@ -47,6 +48,7 @@
'security_groups': 'security_groups',
'security_group_rules': 'security_group_rules',
'ikepolicy': 'ikepolicies',
+ 'ipsecpolicy': 'ipsecpolicies',
'quotas': 'quotas',
'firewall_policy': 'firewall_policies'
}