Merge "Add client response checking for data processing service"
diff --git a/requirements.txt b/requirements.txt
index b86d179..9a3b74d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,11 +8,11 @@
paramiko>=1.13.0
netaddr>=0.7.6
python-ceilometerclient>=1.0.6
-python-glanceclient>=0.9.0
+python-glanceclient>=0.13.1
python-keystoneclient>=0.9.0
python-novaclient>=2.17.0
-python-neutronclient>=2.3.4,<3
-python-cinderclient>=1.0.6
+python-neutronclient>=2.3.5,<3
+python-cinderclient>=1.0.7
python-heatclient>=0.2.9
python-ironicclient
python-saharaclient>=0.6.0
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index cccaf13..f4d010e 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -127,6 +127,8 @@
self.client.migrate_server,
str(uuid.uuid4()))
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index 91eb4c5..9036726 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import StringIO
+
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
@@ -31,25 +33,21 @@
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
+ cls.glance_client = cls.os.image_client
cls.client = cls.images_client
cls.image_id = None
- resp, server = cls.create_test_server(wait_until='ACTIVE')
- cls.server_id = server['id']
-
- # Snapshot the server once to save time
name = data_utils.rand_name('image')
- resp, _ = cls.client.create_image(cls.server_id, name, {})
- cls.image_id = resp['location'].rsplit('/', 1)[1]
-
+ resp, body = cls.glance_client.create_image(name=name,
+ container_format='bare',
+ disk_format='raw',
+ is_public=False)
+ cls.image_id = body['id']
+ cls.images.append(cls.image_id)
+ image_file = StringIO.StringIO(('*' * 1024))
+ cls.glance_client.update_image(cls.image_id, data=image_file)
cls.client.wait_for_image_status(cls.image_id, 'ACTIVE')
- @classmethod
- def tearDownClass(cls):
- if cls.image_id:
- cls.client.delete_image(cls.image_id)
- super(ImagesMetadataTestJSON, cls).tearDownClass()
-
def setUp(self):
super(ImagesMetadataTestJSON, self).setUp()
meta = {'key1': 'value1', 'key2': 'value2'}
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index 86ee4a4..f9350e1 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -13,7 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import StringIO
+import time
+
from tempest.api.compute import base
+from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest import test
@@ -32,7 +36,34 @@
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
cls.client = cls.images_client
+ cls.glance_client = cls.os.image_client
+ def _create_image():
+ name = data_utils.rand_name('image')
+ _, body = cls.glance_client.create_image(name=name,
+ container_format='bare',
+ disk_format='raw',
+ is_public=False)
+ image_id = body['id']
+ cls.images.append(image_id)
+ # Wait 1 second between creation and upload to ensure a delta
+ # between created_at and updated_at.
+ time.sleep(1)
+ image_file = StringIO.StringIO(('*' * 1024))
+ cls.glance_client.update_image(image_id, data=image_file)
+ cls.client.wait_for_image_status(image_id, 'ACTIVE')
+ _, body = cls.client.get_image(image_id)
+ return body
+
+ # Create non-snapshot images via glance
+ cls.image1 = _create_image()
+ cls.image1_id = cls.image1['id']
+ cls.image2 = _create_image()
+ cls.image2_id = cls.image2['id']
+ cls.image3 = _create_image()
+ cls.image3_id = cls.image3['id']
+
+ # Create instances and snapshots via nova
try:
resp, cls.server1 = cls.create_test_server()
resp, cls.server2 = cls.create_test_server(wait_until='ACTIVE')
@@ -41,21 +72,21 @@
'ACTIVE')
# Create images to be used in the filter tests
- resp, cls.image1 = cls.create_image_from_server(
+ resp, cls.snapshot1 = cls.create_image_from_server(
cls.server1['id'], wait_until='ACTIVE')
- cls.image1_id = cls.image1['id']
+ cls.snapshot1_id = cls.snapshot1['id']
# Servers have a hidden property for when they are being imaged
# Performing back-to-back create image calls on a single
# server will sometimes cause failures
- resp, cls.image3 = cls.create_image_from_server(
+ resp, cls.snapshot3 = cls.create_image_from_server(
cls.server2['id'], wait_until='ACTIVE')
- cls.image3_id = cls.image3['id']
+ cls.snapshot3_id = cls.snapshot3['id']
# Wait for the server to be active after the image upload
- resp, cls.image2 = cls.create_image_from_server(
+ resp, cls.snapshot2 = cls.create_image_from_server(
cls.server1['id'], wait_until='ACTIVE')
- cls.image2_id = cls.image2['id']
+ cls.snapshot2_id = cls.snapshot2['id']
except Exception:
LOG.exception('setUpClass failed')
cls.tearDownClass()
@@ -89,11 +120,14 @@
params = {'server': self.server1['id']}
resp, images = self.client.list_images(params)
- self.assertTrue(any([i for i in images if i['id'] == self.image1_id]),
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot1_id]),
"Failed to find image %s in images. Got images %s" %
(self.image1_id, images))
- self.assertTrue(any([i for i in images if i['id'] == self.image2_id]))
- self.assertFalse(any([i for i in images if i['id'] == self.image3_id]))
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot2_id]))
+ self.assertFalse(any([i for i in images
+ if i['id'] == self.snapshot3_id]))
@test.attr(type='gate')
def test_list_images_filter_by_server_ref(self):
@@ -106,11 +140,11 @@
resp, images = self.client.list_images(params)
self.assertFalse(any([i for i in images
- if i['id'] == self.image1_id]))
+ if i['id'] == self.snapshot1_id]))
self.assertFalse(any([i for i in images
- if i['id'] == self.image2_id]))
+ if i['id'] == self.snapshot2_id]))
self.assertTrue(any([i for i in images
- if i['id'] == self.image3_id]))
+ if i['id'] == self.snapshot3_id]))
@test.attr(type='gate')
def test_list_images_filter_by_type(self):
@@ -118,10 +152,14 @@
params = {'type': 'snapshot'}
resp, images = self.client.list_images(params)
- self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
- self.assertTrue(any([i for i in images if i['id'] == self.image2_id]))
- self.assertTrue(any([i for i in images if i['id'] == self.image3_id]))
- self.assertFalse(any([i for i in images if i['id'] == self.image_ref]))
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot1_id]))
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot2_id]))
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot3_id]))
+ self.assertFalse(any([i for i in images
+ if i['id'] == self.image_ref]))
@test.attr(type='gate')
def test_list_images_limit_results(self):
@@ -184,11 +222,11 @@
resp, images = self.client.list_images_with_detail(params)
self.assertFalse(any([i for i in images
- if i['id'] == self.image1_id]))
+ if i['id'] == self.snapshot1_id]))
self.assertFalse(any([i for i in images
- if i['id'] == self.image2_id]))
+ if i['id'] == self.snapshot2_id]))
self.assertTrue(any([i for i in images
- if i['id'] == self.image3_id]))
+ if i['id'] == self.snapshot3_id]))
@test.attr(type='gate')
def test_list_images_with_detail_filter_by_type(self):
@@ -197,10 +235,14 @@
resp, images = self.client.list_images_with_detail(params)
resp, image4 = self.client.get_image(self.image_ref)
- self.assertTrue(any([i for i in images if i['id'] == self.image1_id]))
- self.assertTrue(any([i for i in images if i['id'] == self.image2_id]))
- self.assertTrue(any([i for i in images if i['id'] == self.image3_id]))
- self.assertFalse(any([i for i in images if i['id'] == self.image_ref]))
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot1_id]))
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot2_id]))
+ self.assertTrue(any([i for i in images
+ if i['id'] == self.snapshot3_id]))
+ self.assertFalse(any([i for i in images
+ if i['id'] == self.image_ref]))
@test.attr(type='gate')
def test_list_images_with_detail_filter_by_changes_since(self):
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index f66020c..9d39c9f 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -203,11 +203,13 @@
params = {'status': 'active'}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
+ test_ids = [s['id'] for s in (self.s1, self.s2, self.s3)]
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
- self.assertEqual(['ACTIVE'] * 3, [x['status'] for x in servers])
+ self.assertEqual(['ACTIVE'] * 3, [x['status'] for x in servers
+ if x['id'] in test_ids])
@test.attr(type='gate')
def test_list_servers_filtered_by_name_wildcard(self):
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index fed5171..1561a6e 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -150,7 +150,6 @@
self.assertNotIn('v3/roles/%s' % self.not_delegated_role_id,
role['links']['self'])
- @test.skip_because(bug='1334368')
def check_trust_roles(self):
# Check we find the delegated role
_, roles_get = self.trustor_client.get_trust_roles(
@@ -164,12 +163,6 @@
_, role_get = self.trustor_client.check_trust_role(
self.trust_id, self.delegated_role_id)
- # This tempest two-step change conflicted with the change
- # moving response checking to the client. This test should be
- # re-enabled by removing the following assert and changing
- # the response code in tempest/services/identity/v3/json/
- # identity_client.py in the check_trust_role_method.
- # self.assertEqual('200', resp['status'])
# And that we don't find not_delegated_role
self.assertRaises(exceptions.NotFound,
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 558575e..3c25819 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -65,6 +65,28 @@
self.assertEqual('false', str(new_user_get['enabled']).lower())
@test.attr(type='gate')
+ def test_update_user_password(self):
+ # Creating User to check password updation
+ u_name = data_utils.rand_name('user')
+ original_password = data_utils.rand_name('pass')
+ _, user = self.client.create_user(
+ u_name, password=original_password)
+ # Delete the User at the end all test methods
+ self.addCleanup(self.client.delete_user, user['id'])
+ # Update user with new password
+ new_password = data_utils.rand_name('pass1')
+ self.client.update_user_password(user['id'], new_password,
+ original_password)
+ resp, body = self.token.auth(user['id'], new_password)
+ self.assertEqual(201, resp.status)
+ subject_token = resp['x-subject-token']
+ # Perform GET Token to verify and confirm password is updated
+ _, token_details = self.client.get_token(subject_token)
+ self.assertEqual(resp['x-subject-token'], subject_token)
+ self.assertEqual(token_details['user']['id'], user['id'])
+ self.assertEqual(token_details['user']['name'], u_name)
+
+ @test.attr(type='gate')
def test_list_user_projects(self):
# List the projects that a user has access upon
assigned_project_ids = list()
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index ae777eb..4226815 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -86,7 +86,8 @@
# Verifying deletion
_, images = self.client.image_list()
- self.assertNotIn(image_id, images)
+ images_id = [item['id'] for item in images]
+ self.assertNotIn(image_id, images_id)
@test.attr(type='gate')
def test_update_image(self):
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index d1a8faf..9fa54b1 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -46,8 +46,7 @@
raise cls.skipException(msg)
cls.identity_admin_client = cls.os_adm.identity_client
- @test.attr(type='gate')
- def test_quotas(self):
+ def _check_quotas(self, new_quotas):
# Add a tenant to conduct the test
test_tenant = data_utils.rand_name('test_tenant_')
test_description = data_utils.rand_name('desc_')
@@ -56,14 +55,15 @@
description=test_description)
tenant_id = tenant['id']
self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
+
# Change quotas for tenant
- new_quotas = {'network': 0, 'security_group': 0}
resp, quota_set = self.admin_client.update_quotas(tenant_id,
**new_quotas)
self.assertEqual('200', resp['status'])
self.addCleanup(self.admin_client.reset_quotas, tenant_id)
- self.assertEqual(0, quota_set['network'])
- self.assertEqual(0, quota_set['security_group'])
+ for key, value in new_quotas.iteritems():
+ self.assertEqual(value, quota_set[key])
+
# Confirm our tenant is listed among tenants with non default quotas
resp, non_default_quotas = self.admin_client.list_quotas()
self.assertEqual('200', resp['status'])
@@ -72,12 +72,14 @@
if qs['tenant_id'] == tenant_id:
found = True
self.assertTrue(found)
- # Confirm from APi quotas were changed as requested for tenant
+
+ # Confirm from API quotas were changed as requested for tenant
resp, quota_set = self.admin_client.show_quotas(tenant_id)
quota_set = quota_set['quota']
self.assertEqual('200', resp['status'])
- self.assertEqual(0, quota_set['network'])
- self.assertEqual(0, quota_set['security_group'])
+ for key, value in new_quotas.iteritems():
+ self.assertEqual(value, quota_set[key])
+
# Reset quotas to default and confirm
resp, body = self.admin_client.reset_quotas(tenant_id)
self.assertEqual('204', resp['status'])
@@ -86,49 +88,14 @@
for q in non_default_quotas['quotas']:
self.assertNotEqual(tenant_id, q['tenant_id'])
+ @test.attr(type='gate')
+ def test_quotas(self):
+ new_quotas = {'network': 0, 'security_group': 0}
+ self._check_quotas(new_quotas)
+
@test.requires_ext(extension='lbaas', service='network')
@test.attr(type='gate')
def test_lbaas_quotas(self):
- # Add a tenant to conduct the test
- test_tenant = data_utils.rand_name('test_tenant_')
- test_description = data_utils.rand_name('desc_')
- _, tenant = self.identity_admin_client.create_tenant(
- name=test_tenant,
- description=test_description)
- tenant_id = tenant['id']
- self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
- # Change lbaas quotas for tenant
new_quotas = {'vip': 1, 'pool': 2,
'member': 3, 'health_monitor': 4}
-
- resp, quota_set = self.admin_client.update_quotas(tenant_id,
- **new_quotas)
- self.assertEqual('200', resp['status'])
- self.addCleanup(self.admin_client.reset_quotas, tenant_id)
- self.assertEqual(1, quota_set['vip'])
- self.assertEqual(2, quota_set['pool'])
- self.assertEqual(3, quota_set['member'])
- self.assertEqual(4, quota_set['health_monitor'])
- # Confirm our tenant is listed among tenants with non default quotas
- resp, non_default_quotas = self.admin_client.list_quotas()
- self.assertEqual('200', resp['status'])
- found = False
- for qs in non_default_quotas['quotas']:
- if qs['tenant_id'] == tenant_id:
- found = True
- self.assertTrue(found)
- # Confirm from APi quotas were changed as requested for tenant
- resp, quota_set = self.admin_client.show_quotas(tenant_id)
- quota_set = quota_set['quota']
- self.assertEqual('200', resp['status'])
- self.assertEqual(1, quota_set['vip'])
- self.assertEqual(2, quota_set['pool'])
- self.assertEqual(3, quota_set['member'])
- self.assertEqual(4, quota_set['health_monitor'])
- # Reset quotas to default and confirm
- resp, body = self.admin_client.reset_quotas(tenant_id)
- self.assertEqual('204', resp['status'])
- resp, non_default_quotas = self.admin_client.list_quotas()
- self.assertEqual('200', resp['status'])
- for q in non_default_quotas['quotas']:
- self.assertNotEqual(tenant_id, q['tenant_id'])
+ self._check_quotas(new_quotas)
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 446f4ab..531df2d 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -11,6 +11,7 @@
# under the License.
import os.path
+import yaml
from tempest import clients
from tempest.common.utils import data_utils
@@ -84,11 +85,8 @@
pass
for stack_identifier in cls.stacks:
- try:
- cls.client.wait_for_stack_status(
- stack_identifier, 'DELETE_COMPLETE')
- except exceptions.NotFound:
- pass
+ cls.client.wait_for_stack_status(
+ stack_identifier, 'DELETE_COMPLETE')
@classmethod
def _create_keypair(cls, name_start='keypair-heat-'):
@@ -125,7 +123,7 @@
pass
@classmethod
- def load_template(cls, name, ext='yaml'):
+ def read_template(cls, name, ext='yaml'):
loc = ["stacks", "templates", "%s.%s" % (name, ext)]
fullpath = os.path.join(os.path.dirname(__file__), *loc)
@@ -134,6 +132,14 @@
return content
@classmethod
+ def load_template(cls, name, ext='yaml'):
+ loc = ["stacks", "templates", "%s.%s" % (name, ext)]
+ fullpath = os.path.join(os.path.dirname(__file__), *loc)
+
+ with open(fullpath, "r") as f:
+ return yaml.safe_load(f)
+
+ @classmethod
def tearDownClass(cls):
cls._clear_stacks()
cls._clear_keypairs()
diff --git a/tempest/api/orchestration/stacks/test_environment.py b/tempest/api/orchestration/stacks/test_environment.py
index 3911e72..bc46901 100644
--- a/tempest/api/orchestration/stacks/test_environment.py
+++ b/tempest/api/orchestration/stacks/test_environment.py
@@ -28,7 +28,7 @@
def test_environment_parameter(self):
"""Test passing a stack parameter via the environment."""
stack_name = data_utils.rand_name('heat')
- template = self.load_template('random_string')
+ template = self.read_template('random_string')
environment = {'parameters': {'random_length': 20}}
stack_identifier = self.create_stack(stack_name, template,
@@ -56,7 +56,7 @@
'''
environment = {'resource_registry':
{'My:Random::String': 'my_random.yaml'}}
- files = {'my_random.yaml': self.load_template('random_string')}
+ files = {'my_random.yaml': self.read_template('random_string')}
stack_identifier = self.create_stack(stack_name, template,
environment=environment,
@@ -82,7 +82,7 @@
random_value:
value: {get_attr: [random, random_value]}
'''
- files = {'my_random.yaml': self.load_template('random_string')}
+ files = {'my_random.yaml': self.read_template('random_string')}
stack_identifier = self.create_stack(stack_name, template,
files=files)
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index e92b945..27c6196 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -39,7 +39,7 @@
raise cls.skipException("Neutron support is required")
cls.network_client = os.network_client
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('neutron_basic')
+ template = cls.read_template('neutron_basic')
cls.keypair_name = (CONF.orchestration.keypair_name or
cls._create_keypair()['name'])
cls.external_network_id = CONF.network.public_network_id
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 585c90b..a97c561 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -28,7 +28,7 @@
def setUpClass(cls):
super(StacksTestJSON, cls).setUpClass()
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('non_empty_stack')
+ template = cls.read_template('non_empty_stack')
image_id = (CONF.orchestration.image_ref or
cls._create_image()['id'])
# create the stack
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index a81a540..336fc99 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -28,7 +28,7 @@
def setUpClass(cls):
super(NovaKeyPairResourcesYAMLTest, cls).setUpClass()
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('nova_keypair', ext=cls._tpl_type)
+ template = cls.read_template('nova_keypair', ext=cls._tpl_type)
# create the stack, avoid any duplicated key.
cls.stack_identifier = cls.create_stack(
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index 6d53fb2..2ba2811 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -30,7 +30,7 @@
def setUpClass(cls):
super(SwiftResourcesTestJSON, cls).setUpClass()
cls.stack_name = data_utils.rand_name('heat')
- template = cls.load_template('swift_basic')
+ template = cls.read_template('swift_basic')
os = clients.Manager()
if not CONF.service_available.swift:
raise cls.skipException("Swift support is required")
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
index 5ac2a8d..d422752 100644
--- a/tempest/api/orchestration/stacks/test_volumes.py
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -31,43 +31,44 @@
if not CONF.service_available.cinder:
raise cls.skipException('Cinder support is required')
- def _cinder_verify(self, volume_id):
+ def _cinder_verify(self, volume_id, template):
self.assertIsNotNone(volume_id)
resp, volume = self.volumes_client.get_volume(volume_id)
self.assertEqual(200, resp.status)
self.assertEqual('available', volume.get('status'))
- self.assertEqual(1, volume.get('size'))
- self.assertEqual('a descriptive description',
- volume.get('display_description'))
- self.assertEqual('volume_name',
- volume.get('display_name'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'size'], volume.get('size'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'description'], volume.get('display_description'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'name'], volume.get('display_name'))
- def _outputs_verify(self, stack_identifier):
+ def _outputs_verify(self, stack_identifier, template):
self.assertEqual('available',
self.get_stack_output(stack_identifier, 'status'))
- self.assertEqual('1',
- self.get_stack_output(stack_identifier, 'size'))
- self.assertEqual('a descriptive description',
- self.get_stack_output(stack_identifier,
- 'display_description'))
- self.assertEqual('volume_name',
- self.get_stack_output(stack_identifier,
- 'display_name'))
+ self.assertEqual(str(template['resources']['volume']['properties'][
+ 'size']), self.get_stack_output(stack_identifier, 'size'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'description'], self.get_stack_output(stack_identifier,
+ 'display_description'))
+ self.assertEqual(template['resources']['volume']['properties'][
+ 'name'], self.get_stack_output(stack_identifier, 'display_name'))
@test.attr(type='gate')
def test_cinder_volume_create_delete(self):
"""Create and delete a volume via OS::Cinder::Volume."""
stack_name = data_utils.rand_name('heat')
- template = self.load_template('cinder_basic')
+ template = self.read_template('cinder_basic')
stack_identifier = self.create_stack(stack_name, template)
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
# Verify with cinder that the volume exists, with matching details
volume_id = self.get_stack_output(stack_identifier, 'volume_id')
- self._cinder_verify(volume_id)
+ cinder_basic_template = self.load_template('cinder_basic')
+ self._cinder_verify(volume_id, cinder_basic_template)
# Verify the stack outputs are as expected
- self._outputs_verify(stack_identifier)
+ self._outputs_verify(stack_identifier, cinder_basic_template)
# Delete the stack and ensure the volume is gone
self.client.delete_stack(stack_identifier)
@@ -86,21 +87,22 @@
def test_cinder_volume_create_delete_retain(self):
"""Ensure the 'Retain' deletion policy is respected."""
stack_name = data_utils.rand_name('heat')
- template = self.load_template('cinder_basic_delete_retain')
+ template = self.read_template('cinder_basic_delete_retain')
stack_identifier = self.create_stack(stack_name, template)
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
# Verify with cinder that the volume exists, with matching details
volume_id = self.get_stack_output(stack_identifier, 'volume_id')
self.addCleanup(self._cleanup_volume, volume_id)
- self._cinder_verify(volume_id)
+ retain_template = self.load_template('cinder_basic_delete_retain')
+ self._cinder_verify(volume_id, retain_template)
# Verify the stack outputs are as expected
- self._outputs_verify(stack_identifier)
+ self._outputs_verify(stack_identifier, retain_template)
# Delete the stack and ensure the volume is *not* gone
self.client.delete_stack(stack_identifier)
self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
- self._cinder_verify(volume_id)
+ self._cinder_verify(volume_id, retain_template)
# Volume cleanup happens via addCleanup calling _cleanup_volume
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
index 148f5a3..f401b9b 100644
--- a/tempest/api/telemetry/test_telemetry_notification_api.py
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -32,6 +32,7 @@
@test.attr(type="gate")
@testtools.skipIf(not CONF.service_available.nova,
"Nova is not available.")
+ @test.skip_because(bug="1336755")
def test_check_nova_notification(self):
resp, body = self.create_server()
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index ba94c82..d7b4a16 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -133,7 +133,7 @@
raise CommandFailed(proc.returncode,
cmd,
result,
- stderr=result_err)
+ result_err)
return result
def assertTableStruct(self, items, field_names):
@@ -148,9 +148,15 @@
% lines[:3]))
-class CommandFailed(subprocess.CalledProcessError):
- # adds output attribute for python2.6
- def __init__(self, returncode, cmd, output, stderr=""):
- super(CommandFailed, self).__init__(returncode, cmd)
- self.output = output
+class CommandFailed(Exception):
+ def __init__(self, returncode, cmd, output, stderr):
+ super(CommandFailed, self).__init__()
+ self.returncode = returncode
+ self.cmd = cmd
+ self.stdout = output
self.stderr = stderr
+
+ def __str__(self):
+ return ("Command '%s' returned non-zero exit status %d.\n"
+ "stdout:\n%s\n"
+ "stderr:\n%s" % (self.cmd, self.returncode, self.stdout, self.stderr))
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/test_cinder.py
index 946b89e..9a6b159 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/test_cinder.py
@@ -15,17 +15,16 @@
import logging
import re
-import subprocess
import testtools
-import tempest.cli
+from tempest import cli
from tempest import config
CONF = config.CONF
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyCinderClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyCinderClientTest(cli.ClientTestBase):
"""Basic, read-only tests for Cinder CLI client.
Checks return values and output of read-only commands.
@@ -41,7 +40,7 @@
super(SimpleReadOnlyCinderClientTest, cls).setUpClass()
def test_cinder_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(cli.CommandFailed,
self.cinder,
'this-does-not-exist')
@@ -66,7 +65,7 @@
'Attached to'])
self.cinder('list', params='--all-tenants 1')
self.cinder('list', params='--all-tenants 0')
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(cli.CommandFailed,
self.cinder,
'list',
params='--all-tenants bad')
diff --git a/tempest/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/test_glance.py
index 9869483..3fb1120 100644
--- a/tempest/cli/simple_read_only/test_glance.py
+++ b/tempest/cli/simple_read_only/test_glance.py
@@ -14,9 +14,8 @@
# under the License.
import re
-import subprocess
-import tempest.cli
+from tempest import cli
from tempest import config
from tempest.openstack.common import log as logging
@@ -25,7 +24,7 @@
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyGlanceClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyGlanceClientTest(cli.ClientTestBase):
"""Basic, read-only tests for Glance CLI client.
Checks return values and output of read-only commands.
@@ -41,7 +40,7 @@
super(SimpleReadOnlyGlanceClientTest, cls).setUpClass()
def test_glance_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(cli.CommandFailed,
self.glance,
'this-does-not-exist')
@@ -76,7 +75,7 @@
commands = set(commands)
wanted_commands = set(('image-create', 'image-delete', 'help',
'image-download', 'image-show', 'image-update',
- 'member-add', 'member-create', 'member-delete',
+ 'member-create', 'member-delete',
'member-list'))
self.assertFalse(wanted_commands - commands)
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index dda65c1..f8dcdba 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -14,9 +14,8 @@
# under the License.
import re
-import subprocess
-import tempest.cli
+from tempest import cli
from tempest import config
from tempest.openstack.common import log as logging
@@ -26,7 +25,7 @@
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyKeystoneClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyKeystoneClientTest(cli.ClientTestBase):
"""Basic, read-only tests for Keystone CLI client.
Checks return values and output of read-only commands.
@@ -35,7 +34,7 @@
"""
def test_admin_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(cli.CommandFailed,
self.keystone,
'this-does-not-exist')
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
index 49d079e..2643596 100644
--- a/tempest/cli/simple_read_only/test_neutron.py
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -14,7 +14,6 @@
# under the License.
import re
-import subprocess
from tempest import cli
from tempest import config
@@ -43,7 +42,7 @@
@test.attr(type='smoke')
def test_neutron_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(cli.CommandFailed,
self.neutron,
'this-does-not-exist')
diff --git a/tempest/cli/simple_read_only/test_nova.py b/tempest/cli/simple_read_only/test_nova.py
index 1c1ddf1..70eb9ef 100644
--- a/tempest/cli/simple_read_only/test_nova.py
+++ b/tempest/cli/simple_read_only/test_nova.py
@@ -13,11 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import subprocess
-
import testtools
-import tempest.cli
+from tempest import cli
from tempest import config
from tempest.openstack.common import log as logging
import tempest.test
@@ -27,7 +25,7 @@
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyNovaClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyNovaClientTest(cli.ClientTestBase):
"""
This is a first pass at a simple read only python-novaclient test. This
@@ -49,7 +47,7 @@
super(SimpleReadOnlyNovaClientTest, cls).setUpClass()
def test_admin_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(cli.CommandFailed,
self.nova,
'this-does-nova-exist')
@@ -86,11 +84,11 @@
self.nova('endpoints')
def test_admin_flavor_acces_list(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(cli.CommandFailed,
self.nova,
'flavor-access-list')
# Failed to get access list for public flavor type
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(cli.CommandFailed,
self.nova,
'flavor-access-list',
params='--flavor m1.tiny')
@@ -127,7 +125,7 @@
self.nova('list')
self.nova('list', params='--all-tenants 1')
self.nova('list', params='--all-tenants 0')
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(cli.CommandFailed,
self.nova,
'list',
params='--all-tenants bad')
diff --git a/tempest/cli/simple_read_only/test_nova_manage.py b/tempest/cli/simple_read_only/test_nova_manage.py
index f1fee2e..67c19d8 100644
--- a/tempest/cli/simple_read_only/test_nova_manage.py
+++ b/tempest/cli/simple_read_only/test_nova_manage.py
@@ -13,9 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import subprocess
-
-import tempest.cli
+from tempest import cli
from tempest import config
from tempest.openstack.common import log as logging
@@ -24,7 +22,7 @@
LOG = logging.getLogger(__name__)
-class SimpleReadOnlyNovaManageTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlyNovaManageTest(cli.ClientTestBase):
"""
This is a first pass at a simple read only nova-manage test. This
@@ -48,7 +46,7 @@
super(SimpleReadOnlyNovaManageTest, cls).setUpClass()
def test_admin_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(cli.CommandFailed,
self.nova_manage,
'this-does-nova-exist')
diff --git a/tempest/cli/simple_read_only/test_sahara.py b/tempest/cli/simple_read_only/test_sahara.py
index f00dcae..773921a 100644
--- a/tempest/cli/simple_read_only/test_sahara.py
+++ b/tempest/cli/simple_read_only/test_sahara.py
@@ -14,7 +14,6 @@
# limitations under the License.
import logging
import re
-import subprocess
from tempest import cli
from tempest import config
@@ -42,7 +41,7 @@
@test.attr(type='negative')
def test_sahara_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(cli.CommandFailed,
self.sahara,
'this-does-not-exist')
diff --git a/tempest/cli/simple_read_only/test_swift.py b/tempest/cli/simple_read_only/test_swift.py
index 6d6caa7..c778542 100644
--- a/tempest/cli/simple_read_only/test_swift.py
+++ b/tempest/cli/simple_read_only/test_swift.py
@@ -14,15 +14,14 @@
# under the License.
import re
-import subprocess
-import tempest.cli
+from tempest import cli
from tempest import config
CONF = config.CONF
-class SimpleReadOnlySwiftClientTest(tempest.cli.ClientTestBase):
+class SimpleReadOnlySwiftClientTest(cli.ClientTestBase):
"""Basic, read-only tests for Swift CLI client.
Checks return values and output of read-only commands.
@@ -38,7 +37,7 @@
super(SimpleReadOnlySwiftClientTest, cls).setUpClass()
def test_swift_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
+ self.assertRaises(cli.CommandFailed,
self.swift,
'this-does-not-exist')
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 0b72b1c..c1a2e46 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -28,6 +28,7 @@
import argparse
import tempest.auth
+from tempest import config
from tempest import exceptions
from tempest.services.compute.json import flavors_client
from tempest.services.compute.json import servers_client
@@ -218,6 +219,8 @@
def check_objects(self):
"""Check that the objects created are still there."""
+ if 'objects' not in self.res:
+ return
LOG.info("checking objects")
for obj in self.res['objects']:
client = client_for_user(obj['owner'])
@@ -228,6 +231,8 @@
def check_servers(self):
"""Check that the servers are still up and running."""
+ if 'servers' not in self.res:
+ return
LOG.info("checking servers")
for server in self.res['servers']:
client = client_for_user(server['owner'])
@@ -239,12 +244,18 @@
r, found = client.servers.get_server(found['id'])
# get the ipv4 address
addr = found['addresses']['private'][0]['addr']
- self.assertEqual(os.system("ping -c 1 " + addr), 0,
- "Server %s is not pingable at %s" % (
- server['name'], addr))
+ for count in range(60):
+ return_code = os.system("ping -c1 " + addr)
+ if return_code is 0:
+ break
+ self.assertNotEqual(count, 59,
+ "Server %s is not pingable at %s" % (
+ server['name'], addr))
def check_volumes(self):
"""Check that the volumes are still there and attached."""
+ if 'volumes' not in self.res:
+ return
LOG.info("checking volumes")
for volume in self.res['volumes']:
client = client_for_user(volume['owner'])
@@ -273,6 +284,8 @@
def create_objects(objects):
+ if not objects:
+ return
LOG.info("Creating objects")
for obj in objects:
LOG.debug("Object %s" % obj)
@@ -297,6 +310,9 @@
def create_images(images):
+ if not images:
+ return
+ LOG.info("Creating images")
for image in images:
client = client_for_user(image['owner'])
@@ -304,6 +320,7 @@
r, body = client.images.image_list()
names = [x['name'] for x in body]
if image['name'] in names:
+ LOG.info("Image '%s' already exists" % image['name'])
continue
# special handling for 3 part image
@@ -359,15 +376,39 @@
def create_servers(servers):
+ if not servers:
+ return
+ LOG.info("Creating servers")
for server in servers:
client = client_for_user(server['owner'])
if _get_server_by_name(client, server['name']):
+ LOG.info("Server '%s' already exists" % server['name'])
continue
image_id = _get_image_by_name(client, server['image'])['id']
flavor_id = _get_flavor_by_name(client, server['flavor'])['id']
- client.servers.create_server(server['name'], image_id, flavor_id)
+ resp, body = client.servers.create_server(server['name'], image_id,
+ flavor_id)
+ server_id = body['id']
+ client.servers.wait_for_server_status(server_id, 'ACTIVE')
+
+
+def destroy_servers(servers):
+ if not servers:
+ return
+ LOG.info("Destroying servers")
+ for server in servers:
+ client = client_for_user(server['owner'])
+
+ response = _get_server_by_name(client, server['name'])
+ if not response:
+ LOG.info("Server '%s' does not exist" % server['name'])
+ continue
+
+ client.servers.delete_server(response['id'])
+ client.servers.wait_for_server_termination(response['id'],
+ ignore_error=True)
#######################
@@ -428,6 +469,23 @@
# attach_volumes(RES['volumes'])
+def destroy_resources():
+ LOG.info("Destroying Resources")
+ # Destroy in inverse order of create
+
+ # Future
+ # detach_volumes
+ # destroy_volumes
+
+ destroy_servers(RES['servers'])
+ LOG.warn("Destroy mode incomplete")
+ # destroy_images
+ # destroy_objects
+
+ # destroy_users
+ # destroy_tenants
+
+
def get_options():
global OPTS
parser = argparse.ArgumentParser(
@@ -440,11 +498,17 @@
required=True,
metavar='resourcefile.yaml',
help='Resources definition yaml file')
+
parser.add_argument(
'-d', '--devstack-base',
required=True,
metavar='/opt/stack/old',
help='Devstack base directory for retrieving artifacts')
+ parser.add_argument(
+ '-c', '--config-file',
+ metavar='/etc/tempest.conf',
+ help='path to javelin2(tempest) config file')
+
# auth bits, letting us also just source the devstack openrc
parser.add_argument('--os-username',
metavar='<auth-user-name>',
@@ -464,6 +528,8 @@
print("ERROR: Unknown mode -m %s\n" % OPTS.mode)
parser.print_help()
sys.exit(1)
+ if OPTS.config_file:
+ config.CONF.set_config_path(OPTS.config_file)
def setup_logging(debug=True):
@@ -491,15 +557,20 @@
if OPTS.mode == 'create':
create_resources()
+ # Make sure the resources we just created actually work
+ checker = JavelinCheck(USERS, RES)
+ checker.check()
elif OPTS.mode == 'check':
collect_users(RES['users'])
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'destroy':
- LOG.warn("Destroy mode not yet implemented")
+ collect_users(RES['users'])
+ destroy_resources()
else:
LOG.error('Unknown mode %s' % OPTS.mode)
return 1
+ LOG.info('javelin2 successfully finished')
return 0
if __name__ == "__main__":
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 0834cff..673da4f 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -29,13 +29,12 @@
CONF = config.CONF
RAW_HTTP = httplib2.Http()
-CONF_FILE = None
-OUTFILE = sys.stdout
+CONF_PARSER = None
def _get_config_file():
default_config_dir = os.path.join(os.path.abspath(
- os.path.dirname(os.path.dirname(__file__))), "etc")
+ os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), "etc")
default_config_file = "tempest.conf"
conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', default_config_dir)
@@ -46,14 +45,9 @@
def change_option(option, group, value):
- config_parse = moves.configparser.SafeConfigParser()
- config_parse.optionxform = str
- config_parse.readfp(CONF_FILE)
- if not config_parse.has_section(group):
- config_parse.add_section(group)
- config_parse.set(group, option, str(value))
- global OUTFILE
- config_parse.write(OUTFILE)
+ if not CONF_PARSER.has_section(group):
+ CONF_PARSER.add_section(group)
+ CONF_PARSER.set(group, option, str(value))
def print_and_or_update(option, group, value, update):
@@ -288,6 +282,9 @@
if update:
change_option(codename_match[cfgname],
'service_available', True)
+ # If we are going to enable this we should allow
+ # extension checks.
+ avail_services.append(codename_match[cfgname])
else:
avail_services.append(codename_match[cfgname])
return avail_services
@@ -321,12 +318,16 @@
opts = parse_args()
update = opts.update
replace = opts.replace_ext
- global CONF_FILE
- global OUTFILE
+ global CONF_PARSER
+
+ outfile = sys.stdout
if update:
- CONF_FILE = _get_config_file()
+ conf_file = _get_config_file()
if opts.output:
- OUTFILE = open(opts.output, 'w+')
+ outfile = open(opts.output, 'w+')
+ CONF_PARSER = moves.configparser.SafeConfigParser()
+ CONF_PARSER.optionxform = str
+ CONF_PARSER.readfp(conf_file)
os = clients.ComputeAdminManager(interface='json')
services = check_service_availability(os, update)
results = {}
@@ -341,9 +342,10 @@
verify_nova_api_versions(os, update)
verify_cinder_api_versions(os, update)
display_results(results, update, replace)
- if CONF_FILE:
- CONF_FILE.close()
- OUTFILE.close()
+ if update:
+ conf_file.close()
+ CONF_PARSER.write(outfile)
+ outfile.close()
if __name__ == "__main__":
diff --git a/tempest/common/cred_provider.py b/tempest/common/cred_provider.py
new file mode 100644
index 0000000..dc4f049
--- /dev/null
+++ b/tempest/common/cred_provider.py
@@ -0,0 +1,44 @@
+# (c) 2014 Deutsche Telekom AG
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import six
+
+from tempest import config
+from tempest.openstack.common import log as logging
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class CredentialProvider(object):
+ def __init__(self, name, tempest_client=True, interface='json',
+ password='pass', network_resources=None):
+ self.name = name
+
+ @abc.abstractmethod
+ def get_primary_creds(self):
+ return
+
+ @abc.abstractmethod
+ def get_admin_creds(self):
+ return
+
+ @abc.abstractmethod
+ def get_alt_creds(self):
+ return
+
+ @abc.abstractmethod
+ def clear_isolated_creds(self):
+ return
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index 208f42f..98b0116 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -16,6 +16,7 @@
from tempest import auth
from tempest import clients
+from tempest.common import cred_provider
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
@@ -25,15 +26,16 @@
LOG = logging.getLogger(__name__)
-class IsolatedCreds(object):
+class IsolatedCreds(cred_provider.CredentialProvider):
def __init__(self, name, tempest_client=True, interface='json',
password='pass', network_resources=None):
+ super(IsolatedCreds, self).__init__(name, tempest_client, interface,
+ password, network_resources)
self.network_resources = network_resources
self.isolated_creds = {}
self.isolated_net_resources = {}
self.ports = []
- self.name = name
self.tempest_client = tempest_client
self.interface = interface
self.password = password
diff --git a/tempest/config.py b/tempest/config.py
index 0796d98..c83f500 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -1085,18 +1085,22 @@
cfg.CONF.set_default('domain_name', self.identity.admin_domain_name,
group='compute-admin')
- def __init__(self, parse_conf=True):
+ def __init__(self, parse_conf=True, config_path=None):
"""Initialize a configuration from a conf directory and conf file."""
super(TempestConfigPrivate, self).__init__()
config_files = []
failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE
- # Environment variables override defaults...
- conf_dir = os.environ.get('TEMPEST_CONFIG_DIR',
- self.DEFAULT_CONFIG_DIR)
- conf_file = os.environ.get('TEMPEST_CONFIG', self.DEFAULT_CONFIG_FILE)
+ if config_path:
+ path = config_path
+ else:
+ # Environment variables override defaults...
+ conf_dir = os.environ.get('TEMPEST_CONFIG_DIR',
+ self.DEFAULT_CONFIG_DIR)
+ conf_file = os.environ.get('TEMPEST_CONFIG',
+ self.DEFAULT_CONFIG_FILE)
- path = os.path.join(conf_dir, conf_file)
+ path = os.path.join(conf_dir, conf_file)
if not os.path.isfile(path):
path = failsafe_path
@@ -1118,6 +1122,7 @@
class TempestConfigProxy(object):
_config = None
+ _path = None
_extra_log_defaults = [
'keystoneclient.session=INFO',
@@ -1134,9 +1139,12 @@
def __getattr__(self, attr):
if not self._config:
self._fix_log_levels()
- self._config = TempestConfigPrivate()
+ self._config = TempestConfigPrivate(config_path=self._path)
return getattr(self._config, attr)
+ def set_config_path(self, path):
+ self._path = path
+
CONF = TempestConfigProxy()
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index ca79325..aa24c31 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -53,6 +53,32 @@
LOG_cinder_client.addHandler(log.NullHandler())
+class ScenarioTest(tempest.test.BaseTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(ScenarioTest, cls).setUpClass()
+ cls.isolated_creds = isolated_creds.IsolatedCreds(
+ cls.__name__, tempest_client=True,
+ network_resources=cls.network_resources)
+ cls.manager = clients.Manager(
+ credentials=cls.credentials()
+ )
+
+ @classmethod
+ def _get_credentials(cls, get_creds, ctype):
+ if CONF.compute.allow_tenant_isolation:
+ creds = get_creds()
+ else:
+ creds = auth.get_default_credentials(ctype)
+ return creds
+
+ @classmethod
+ def credentials(cls):
+ return cls._get_credentials(cls.isolated_creds.get_primary_creds,
+ 'user')
+
+
class OfficialClientTest(tempest.test.BaseTestCase):
"""
Official Client test base class for scenario testing.
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index 6418a73..4fcc70a 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -24,7 +24,7 @@
CONF = config.CONF
-class TestDashboardBasicOps(manager.OfficialClientTest):
+class TestDashboardBasicOps(manager.ScenarioTest):
"""
This is a basic scenario test:
diff --git a/tempest/services/__init__.py b/tempest/services/__init__.py
index e7bec60..e69de29 100644
--- a/tempest/services/__init__.py
+++ b/tempest/services/__init__.py
@@ -1,37 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Base Service class, which acts as a descriptor for an OpenStack service
-in the test environment
-"""
-
-
-class Service(object):
-
- def __init__(self, config):
- """
- Initializes the service.
-
- :param config: `tempest.config.Config` object
- """
- self.config = config
-
- def get_client(self):
- """
- Returns a client object that may be used to query
- the service API.
- """
- raise NotImplementedError
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 329f026..0188c2a 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -77,6 +77,17 @@
body = json.loads(body)
return resp, body['user']
+ def update_user_password(self, user_id, password, original_password):
+ """Updates a user password."""
+ update_user = {
+ 'password': password,
+ 'original_password': original_password
+ }
+ update_user = json.dumps({'user': update_user})
+ resp, _ = self.post('users/%s/password' % user_id, update_user)
+ self.expected_success(204, resp.status)
+ return resp
+
def list_user_projects(self, user_id):
"""Lists the projects on which a user has roles assigned."""
resp, body = self.get('users/%s/projects' % user_id)
@@ -502,10 +513,7 @@
"""HEAD Check if role is delegated by a trust."""
resp, body = self.head("OS-TRUST/trusts/%s/roles/%s"
% (trust_id, role_id))
- # This code needs to change to 200 when the keystone changes
- # for bug 1334368 merge and check_trust_roles test is
- # unskipped
- self.expected_success(204, resp.status)
+ self.expected_success(200, resp.status)
return resp, body
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 3790f13..f3e084e 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -139,6 +139,17 @@
body = self._parse_body(etree.fromstring(body))
return resp, body
+ def update_user_password(self, user_id, password, original_password):
+ """Updates a user password."""
+ update_user = common.Element("user",
+ xmlns=XMLNS,
+ password=password,
+ original_password=original_password)
+ resp, _ = self.post('users/%s/password' % user_id,
+ str(common.Document(update_user)))
+ self.expected_success(204, resp.status)
+ return resp
+
def list_user_projects(self, user_id):
"""Lists the projects on which a user has roles assigned."""
resp, body = self.get('users/%s/projects' % user_id)
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index d325eb5..46b0ec4 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -181,7 +181,11 @@
fail_regexp = re.compile(failure_pattern)
while True:
- resp, body = self.get_stack(stack_identifier)
+ try:
+ resp, body = self.get_stack(stack_identifier)
+ except exceptions.NotFound:
+ if status == 'DELETE_COMPLETE':
+ return
stack_name = body['stack_name']
stack_status = body['stack_status']
if stack_status == status:
diff --git a/tempest/tests/cli/test_command_failed.py b/tempest/tests/cli/test_command_failed.py
new file mode 100644
index 0000000..c539ac6
--- /dev/null
+++ b/tempest/tests/cli/test_command_failed.py
@@ -0,0 +1,30 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import cli
+from tempest.tests import base
+
+
+class TestOutputParser(base.TestCase):
+
+ def test_command_failed_exception(self):
+ returncode = 1
+ cmd = "foo"
+ stdout = "output"
+ stderr = "error"
+ try:
+ raise cli.CommandFailed(returncode, cmd, stdout, stderr)
+ except cli.CommandFailed as e:
+ self.assertIn(str(returncode), str(e))
+ self.assertIn(cmd, str(e))
+ self.assertIn(stdout, str(e))
+ self.assertIn(stderr, str(e))
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index 4bed0c2..536cbcf 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -58,6 +58,6 @@
class FakePrivate(config.TempestConfigPrivate):
- def __init__(self):
+ def __init__(self, parse_conf=True, config_path=None):
cfg.CONF([], default_config_files=[])
self._set_attrs()
diff --git a/tempest/tests/test_waiters.py b/tempest/tests/test_waiters.py
index a29cb46..1f9825e 100644
--- a/tempest/tests/test_waiters.py
+++ b/tempest/tests/test_waiters.py
@@ -15,7 +15,6 @@
import time
import mock
-import testtools
from tempest.common import waiters
from tempest import exceptions
@@ -48,221 +47,3 @@
self.assertRaises(exceptions.AddImageException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
-
-
-class TestServerWaiters(base.TestCase):
- def setUp(self):
- super(TestServerWaiters, self).setUp()
- self.client = mock.MagicMock()
- self.client.build_timeout = 1
- self.client.build_interval = 1
-
- def test_wait_for_server_status(self):
- self.client.get_server.return_value = (None, {'status':
- 'active'}
- )
- start_time = int(time.time())
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'active'
- )
- end_time = int(time.time())
- # Ensure waiter returns before build_timeout
- self.assertTrue((end_time - start_time) < 2)
-
- def test_wait_for_server_status_BUILD_from_not_UNKNOWN(self):
- self.client.get_server.return_value = (None, {'status': 'active'})
- start_time = int(time.time())
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'BUILD')
- end_time = int(time.time())
- # Ensure waiter returns before build_timeout
- self.assertTrue((end_time - start_time) < 2)
-
- def test_wait_for_server_status_ready_wait_with_BUILD(self):
- self.client.get_server.return_value = (None, {'status': 'BUILD'})
- start_time = int(time.time())
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'BUILD', True)
- end_time = int(time.time())
- # Ensure waiter returns before build_timeout
- self.assertTrue((end_time - start_time) < 2)
-
- def test_wait_for_server_status_ready_wait(self):
- self.client.get_server.return_value = (None, {'status':
- 'ERROR',
- 'OS-EXT-STS:task_state':
- 'n/a'
- }
- )
- self.client.get_console_output.return_value = (None,
- {'output': 'Server fake_svr_id failed to reach '
- 'active status and task state n/a within the '
- 'required time (1 s).\nCurrent status: SUSPENDED.'
- '\nCurrent task state: None.'}
- )
- self.assertRaises(exceptions.BuildErrorException,
- waiters.wait_for_server_status,
- self.client, 'fake_svr_id', 'active',
- ready_wait=True, extra_timeout=0,
- raise_on_error=True
- )
-
- def test_wait_for_server_status_no_ready_wait(self):
- self.client.get_server.return_value = (None, {'status':
- 'ERROR',
- 'OS-EXT-STS:task_state':
- 'n/a'
- }
- )
- start_time = int(time.time())
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'ERROR', ready_wait=False,
- extra_timeout=10, raise_on_error=True
- )
- end_time = int(time.time())
- # Ensure waiter returns before build_timeout + extra_timeout
- self.assertTrue((end_time - start_time) < 12)
-
- def test_wait_for_server_status_timeout(self):
- self.client.get_server.return_value = (None, {'status': 'SUSPENDED'})
- self.client.get_console_output.return_value = (None,
- {'output': 'Server fake_svr_id failed to reach '
- 'active status and task state n/a within the '
- 'required time (1 s).\nCurrent status: SUSPENDED.'
- '\nCurrent task state: None.'}
- )
- self.assertRaises(exceptions.TimeoutException,
- waiters.wait_for_server_status,
- self.client, 'fake_svr_id', 'active')
-
- def test_wait_for_server_status_extra_timeout(self):
- self.client.get_server.return_value = (None, {'status': 'SUSPENDED'})
- start_time = int(time.time())
- self.client.get_console_output.return_value = (None,
- {'output': 'Server fake_svr_id failed to reach '
- 'active status and task state n/a within the '
- 'required time (10 s). \nCurrent status: SUSPENDED.'
- '\nCurrent task state: None.'}
- )
- self.assertRaises(exceptions.TimeoutException,
- waiters.wait_for_server_status,
- self.client, 'fake_svr_id',
- 'active', ready_wait=True,
- extra_timeout=10, raise_on_error=True
- )
- end_time = int(time.time())
- # Ensure waiter returns after build_timeout but
- # before build_timeout+extra timeout
- self.assertTrue(10 < (end_time - start_time) < 12)
-
- def test_wait_for_server_status_error_on_server_create(self):
- self.client.get_server.return_value = (None, {'status': 'ERROR'})
- self.client.get_console_output.return_value = (None,
- {'output': 'Server fake_svr_id failed to reach '
- 'activestatus and task state n/a within the '
- 'required time (1 s).\nCurrent status: ERROR.'
- '\nCurrent task state: None.'}
- )
- self.assertRaises(exceptions.BuildErrorException,
- waiters.wait_for_server_status,
- self.client, 'fake_svr_id', 'active')
-
- def test_wait_for_server_status_no_raise_on_error(self):
- self.client.get_server.return_value = (None, {'status': 'ERROR'})
- self.client.get_console_output.return_value = (None,
- {'output': 'Server fake_svr_id failed to reach '
- 'activestatus and task state n/a within the '
- 'required time (1 s).\nCurrent status: ERROR.'
- '\nCurrent task state: None.'}
- )
- self.assertRaises(exceptions.TimeoutException,
- waiters.wait_for_server_status,
- self.client, 'fake_svr_id', 'active',
- ready_wait=True, extra_timeout=0,
- raise_on_error=False
- )
-
- def test_wait_for_server_status_no_ready_wait_timeout(self):
- self.client.get_server.return_value = (None, {'status': 'ERROR'})
- self.client.get_console_output.return_value = (None,
- {'output': 'Server fake_svr_id failed to reach '
- 'active status and task state n/a within the '
- 'required time (11 s).\nCurrent status: ERROR.'
- '\nCurrent task state: None.'}
- )
- expected_msg = '''Request timed out
-Details: (TestServerWaiters:test_wait_for_server_status_no_ready_wait_timeout)\
- Server fake_svr_id failed to reach active status and task state "n/a" within\
- the required time (11 s). Current status: ERROR. Current task state: None.\
-'''
- with testtools.ExpectedException(exceptions.TimeoutException,
- testtools.matchers.AfterPreprocessing(
- str,
- testtools.matchers.Equals(expected_msg)
- )
- ):
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'active', ready_wait=False,
- extra_timeout=10,
- raise_on_error=False
- )
-
- def test_wait_for_server_status_ready_wait_timeout(self):
- self.client.get_server.return_value = (None, {'status': 'ERROR'})
- self.client.get_console_output.return_value = (None,
- {'output': 'Server fake_svr_id failed to reach '
- 'activestatus and task state n/a within the '
- 'required time (11 s).\nCurrent status: ERROR.'
- '\nCurrent task state: None.'}
- )
- expected_msg = '''Request timed out
-Details: (TestServerWaiters:test_wait_for_server_status_ready_wait_timeout)\
- Server fake_svr_id failed to reach active status and task state "None" within\
- the required time (11 s). Current status: ERROR. Current task state: None.\
-'''
- with testtools.ExpectedException(exceptions.TimeoutException,
- testtools.matchers.AfterPreprocessing(
- str,
- testtools.matchers.Equals(expected_msg)
- )
- ):
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'active', ready_wait=True,
- extra_timeout=10,
- raise_on_error=False
- )
-
- def test_wait_for_changing_server_status(self):
- self.client.get_server.side_effect = [(None, {'status': 'BUILD'}),
- (None, {'status': 'active'})]
- start_time = int(time.time())
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'active', ready_wait=True,
- extra_timeout=10,
- raise_on_error=True
- )
- end_time = int(time.time())
- # Ensure waiter returns before build_timeout + extra_timeout
- self.assertTrue((end_time - start_time) < 12)
-
- def test_wait_for_changing_server_task_status(self):
- self.client.get_server.side_effect = [(None, {'status': 'BUILD',
- 'OS-EXT-STS:task_state':
- 'n/a'
- }
- ),
- (None, {'status': 'active',
- 'OS-EXT-STS:task_state':
- 'None'
- }
- )
- ]
- start_time = int(time.time())
- waiters.wait_for_server_status(self.client, 'fake_svr_id',
- 'active', ready_wait=True,
- extra_timeout=10,
- raise_on_error=True
- )
- end_time = int(time.time())
- # Ensure waiter returns before build_timeout + extra_timeout
- self.assertTrue((end_time - start_time) < 12)