Merge "Added test - conditional object download"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index f9f02dc..12f7cc3 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -65,6 +65,10 @@
flavor_ref = 1
flavor_ref_alt = 2
+# User names used to authenticate to an instance for a given image.
+image_ssh_user = root
+image_alt_ssh_user = root
+
# Number of seconds to wait while looping to check the status of an
# instance that is building.
build_interval = 10
@@ -222,12 +226,12 @@
# Number of seconds to time out on waiting for a volume
# to be available or reach an expected status
build_timeout = 300
-# Runs Cinder multi-backend tests (requires 2 backend declared in cinder.conf)
+# Runs Cinder multi-backend tests (requires 2 backends declared in cinder.conf)
# They must have different volume_backend_name (backend1_name and backend2_name
# have to be different)
multi_backend_enabled = false
-backend1_name = LVM_iSCSI
-backend2_name = LVM_iSCSI_1
+backend1_name = BACKEND_1
+backend2_name = BACKEND_2
[object-storage]
# This section contains configuration options used when executing tests
@@ -314,3 +318,19 @@
# Name of existing keypair to launch servers with. The default is not to specify
# any key, which will generate a keypair for each test class
#keypair_name = heat_key
+
+[scenario]
+# Directory containing image files
+img_dir = /opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec
+
+# AMI image file name
+ami_img_file = cirros-0.3.1-x86_64-blank.img
+
+# ARI image file name
+ari_img_file = cirros-0.3.1-x86_64-initrd
+
+# AKI image file name
+aki_img_file = cirros-0.3.1-x86_64-vmlinuz
+
+# ssh username for the image file
+ssh_user = cirros
diff --git a/requirements.txt b/requirements.txt
index 4873d75..19d6e0b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -12,6 +12,7 @@
python-keystoneclient>=0.2.0
python-novaclient>=2.10.0
python-quantumclient>=2.1
+python-cinderclient>=1.0.4,<2
testresources
keyring
testrepository
diff --git a/run_tests.sh b/run_tests.sh
index d5b2494..366564e 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -166,11 +166,14 @@
fi
run_tests
+retval=$?
if [ $nova_coverage -eq 1 ]; then
run_coverage_report
fi
if [ -z "$noseargs" ]; then
- run_pep8
+ run_pep8
fi
+
+exit $retval
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 9883c00..48ef296 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -186,15 +186,24 @@
flavor = kwargs.get('flavor', cls.flavor_ref)
image_id = kwargs.get('image_id', cls.image_ref)
- resp, server = cls.servers_client.create_server(
+ resp, body = cls.servers_client.create_server(
name, image_id, flavor, **kwargs)
- cls.servers.append(server)
+
+ # handle the case of multiple servers
+ servers = [body]
+ if 'min_count' in kwargs or 'max_count' in kwargs:
+ # Get servers created which name match with name param.
+ r, b = cls.servers_client.list_servers()
+ servers = [s for s in b['servers'] if s['name'].startswith(name)]
+
+ cls.servers.extend(servers)
if 'wait_until' in kwargs:
- cls.servers_client.wait_for_server_status(
- server['id'], kwargs['wait_until'])
+ for server in servers:
+ cls.servers_client.wait_for_server_status(
+ server['id'], kwargs['wait_until'])
- return resp, server
+ return resp, body
def wait_for(self, condition):
"""Repeatedly calls condition() until a timeout."""
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index c7f0b23..4163245 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -131,7 +131,7 @@
# Verify the image was deleted correctly
resp, body = self.client.delete_image(image_id)
self.assertEqual('204', resp['status'])
- self.assertRaises(exceptions.NotFound, self.client.get_image, image_id)
+ self.client.wait_for_resource_deletion(image_id)
@testtools.skipUnless(compute.MULTI_USER,
'Need multiple users for this test.')
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index 0f35ee5..db9bdc1 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
from tempest.api import compute
from tempest.api.compute import base
@@ -170,7 +171,8 @@
@attr(type='gate')
def test_list_servers_by_changes_since(self):
# Servers are listed by specifying changes-since date
- changes_since = {'changes-since': '2011-01-01T12:34:00Z'}
+ since = datetime.datetime.utcnow() - datetime.timedelta(minutes=2)
+ changes_since = {'changes-since': since.isoformat()}
resp, body = self.client.list_servers(changes_since)
self.assertEqual('200', resp['status'])
# changes-since returns all instances, including deleted.
diff --git a/tempest/api/compute/servers/test_multiple_create.py b/tempest/api/compute/servers/test_multiple_create.py
index 63bb86d..9fde618 100644
--- a/tempest/api/compute/servers/test_multiple_create.py
+++ b/tempest/api/compute/servers/test_multiple_create.py
@@ -25,16 +25,6 @@
_interface = 'json'
_name = 'multiple-create-test'
- def _get_created_servers(self, name):
- """Get servers created which name match with name param."""
- resp, body = self.servers_client.list_servers()
- servers = body['servers']
- servers_created = []
- for server in servers:
- if server['name'].startswith(name):
- servers_created.append(server)
- return servers_created
-
def _generate_name(self):
return rand_name(self._name)
@@ -45,18 +35,6 @@
"""
kwargs['name'] = kwargs.get('name', self._generate_name())
resp, body = self.create_server(**kwargs)
- created_servers = self._get_created_servers(kwargs['name'])
- # NOTE(maurosr): append it to cls.servers list from base.BaseCompute
- # class.
- self.servers.extend(created_servers)
- # NOTE(maurosr): get a server list, check status of the ones with names
- # that match and wait for them become active. At a first look, since
- # they are building in parallel, wait inside the for doesn't seem be
- # harmful to the performance
- if wait_until is not None:
- for server in created_servers:
- self.servers_client.wait_for_server_status(server['id'],
- wait_until)
return resp, body
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 5f53080..bbe489c 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -236,7 +236,11 @@
# Create a server with a nonexistent security group
security_groups = [{'name': 'does_not_exist'}]
- self.assertRaises(exceptions.BadRequest,
+ if self.config.network.quantum_available:
+ expected_exception = exceptions.NotFound
+ else:
+ expected_exception = exceptions.BadRequest
+ self.assertRaises(expected_exception,
self.create_server,
security_groups=security_groups)
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
new file mode 100644
index 0000000..8d019fe
--- /dev/null
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -0,0 +1,54 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+class DomainsTestJSON(base.BaseIdentityAdminTest):
+ _interface = 'json'
+
+ def _delete_domain(self, domain_id):
+ # It is necessary to disable the domian before deleting,
+ # or else it would result in unauthorized error
+ _, body = self.v3_client.update_domain(domain_id, enabled=False)
+ resp, _ = self.v3_client.delete_domain(domain_id)
+ self.assertEqual(204, resp.status)
+
+ @attr(type='smoke')
+ def test_list_domains(self):
+ #Test to list domains
+ domain_ids = list()
+ fetched_ids = list()
+ for _ in range(3):
+ _, domain = self.v3_client.create_domain(
+ rand_name('domain-'), description=rand_name('domain-desc-'))
+ # Delete the domian at the end of this method
+ self.addCleanup(self._delete_domain, domain['id'])
+ domain_ids.append(domain['id'])
+ # List and Verify Domains
+ resp, body = self.v3_client.list_domains()
+ self.assertEqual(resp['status'], '200')
+ for d in body:
+ fetched_ids.append(d['id'])
+ missing_doms = [d for d in domain_ids if d not in fetched_ids]
+ self.assertEqual(0, len(missing_doms))
+
+
+class DomainsTestXML(DomainsTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index c5d3f93..640daa5 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -47,7 +47,6 @@
properties=properties)
self.assertTrue('id' in body)
image_id = body.get('id')
- self.created_images.append(image_id)
self.assertEqual('New Name', body.get('name'))
self.assertTrue(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
@@ -71,8 +70,6 @@
properties={'key1': 'value1',
'key2': 'value2'})
self.assertTrue('id' in body)
- image_id = body.get('id')
- self.created_images.append(image_id)
self.assertEqual('New Remote Image', body.get('name'))
self.assertTrue(body.get('is_public'))
self.assertEqual('active', body.get('status'))
@@ -88,7 +85,6 @@
copy_from=self.config.images.http_image)
self.assertTrue('id' in body)
image_id = body.get('id')
- self.created_images.append(image_id)
self.assertEqual('New Http Image', body.get('name'))
self.assertTrue(body.get('is_public'))
self.client.wait_for_image_status(image_id, 'active')
@@ -106,8 +102,6 @@
min_ram=40,
properties=properties)
self.assertTrue('id' in body)
- image_id = body.get('id')
- self.created_images.append(image_id)
self.assertEqual('New_image_with_min_ram', body.get('name'))
self.assertTrue(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 966adc3..34db6e3 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -50,7 +50,6 @@
visibility='public')
self.assertTrue('id' in body)
image_id = body.get('id')
- self.created_images.append(image_id)
self.assertTrue('name' in body)
self.assertEqual('New Name', body.get('name'))
self.assertTrue('visibility' in body)
@@ -79,7 +78,7 @@
# We add a few images here to test the listing functionality of
# the images API
for x in xrange(0, 10):
- cls.created_images.append(cls._create_standard_image(x))
+ cls._create_standard_image(x)
@classmethod
def _create_standard_image(cls, number):
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index d7b87d1..b40774e 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -53,27 +53,25 @@
self.assertIn('x-account-bytes-used', resp)
@attr(type='smoke')
- def test_create_account_metadata(self):
+ def test_create_and_delete_account_metadata(self):
+ header = 'test-account-meta'
+ data = 'Meta!'
# add metadata to account
- metadata = {'test-account-meta': 'Meta!'}
- resp, _ = \
- self.account_client.create_account_metadata(metadata=metadata)
+ resp, _ = self.account_client.create_account_metadata(
+ metadata={header: data})
self.assertEqual(resp['status'], '204')
- resp, metadata = self.account_client.list_account_metadata()
- self.assertIn('x-account-meta-test-account-meta', resp)
- self.assertEqual(resp['x-account-meta-test-account-meta'], 'Meta!')
+ resp, _ = self.account_client.list_account_metadata()
+ self.assertIn('x-account-meta-' + header, resp)
+ self.assertEqual(resp['x-account-meta-' + header], data)
- @attr(type='smoke')
- def test_delete_account_metadata(self):
# delete metadata from account
- metadata = ['test-account-meta']
resp, _ = \
- self.account_client.delete_account_metadata(metadata=metadata)
+ self.account_client.delete_account_metadata(metadata=[header])
self.assertEqual(resp['status'], '204')
- resp, metadata = self.account_client.list_account_metadata()
- self.assertNotIn('x-account-meta-test-account-meta', resp)
+ resp, _ = self.account_client.list_account_metadata()
+ self.assertNotIn('x-account-meta-' + header, resp)
@attr(type=['negative', 'gate'])
def test_list_containers_with_non_authorized_user(self):
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 8d6cf4b..a83e92c 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -18,8 +18,6 @@
import hashlib
import time
-import testtools
-
from tempest.api.object_storage import base
from tempest.common.utils.data_utils import arbitrary_string
from tempest.common.utils.data_utils import rand_name
@@ -299,7 +297,6 @@
self.container_name, object_name,
metadata=self.custom_headers)
- @testtools.skip('Until Bug #1097137 is resolved.')
@attr(type='gate')
def test_get_object_using_temp_url(self):
# access object using temporary URL within expiration time
@@ -469,71 +466,3 @@
except Exception as e:
self.fail("Failed to get public readable object with another"
" user creds raised exception is %s" % e)
-
- @testtools.skip('Until Bug #1020722 is resolved.')
- @attr(type='smoke')
- def test_write_public_object_without_using_creds(self):
- # make container public-writable, and create object anonymously, e.g.
- # without using credentials
- try:
- # update container metadata to make publicly writable
- cont_headers = {'X-Container-Write': '-*'}
- resp_meta, body = self.container_client.update_container_metadata(
- self.container_name, metadata=cont_headers, metadata_prefix='')
- self.assertEqual(resp_meta['status'], '204')
- # list container metadata
- resp, _ = self.container_client.list_container_metadata(
- self.container_name)
- self.assertEqual(resp['status'], '204')
- self.assertIn('x-container-write', resp)
- self.assertEqual(resp['x-container-write'], '-*')
-
- object_name = rand_name(name='Object')
- data = arbitrary_string(size=len(object_name),
- base_text=object_name)
- headers = {'Content-Type': 'application/json',
- 'Accept': 'application/json'}
- # create object as anonymous user
- resp, body = self.custom_object_client.create_object(
- self.container_name, object_name, data, metadata=headers)
- self.assertEqual(resp['status'], '201')
-
- except Exception as e:
- self.fail("Failed to create public writable object without using"
- " creds raised exception is %s" % e)
-
- @testtools.skip('Until Bug #1020722 is resolved.')
- @attr(type='smoke')
- def test_write_public_with_another_user_creds(self):
- # make container public-writable, and create object with another user's
- # credentials
- try:
- # update container metadata to make it publicly writable
- cont_headers = {'X-Container-Write': '-*'}
- resp_meta, body = self.container_client.update_container_metadata(
- self.container_name, metadata=cont_headers,
- metadata_prefix='')
- self.assertEqual(resp_meta['status'], '204')
- # list container metadata
- resp, _ = self.container_client.list_container_metadata(
- self.container_name)
- self.assertEqual(resp['status'], '204')
- self.assertIn('x-container-write', resp)
- self.assertEqual(resp['x-container-write'], '-*')
-
- # trying to get auth token of alternative user
- token = self.identity_client_alt.get_auth()
- headers = {'Content-Type': 'application/json',
- 'Accept': 'application/json',
- 'X-Auth-Token': token}
-
- # trying to create an object with another user's creds
- object_name = rand_name(name='Object')
- data = arbitrary_string(size=len(object_name),
- base_text=object_name)
- resp, body = self.custom_object_client.create_object(
- self.container_name, object_name, data, metadata=headers)
- self.assertEqual(resp['status'], '201')
- except Exception as e:
- self.fail("Failed to create public writable object with another"
- " user creds raised exception is %s" % e)
diff --git a/tempest/api/orchestration/stacks/test_instance_cfn_init.py b/tempest/api/orchestration/stacks/test_instance_cfn_init.py
new file mode 100644
index 0000000..2349830
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_instance_cfn_init.py
@@ -0,0 +1,152 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+LOG = logging.getLogger(__name__)
+
+
+class InstanceCfnInitTestJSON(base.BaseOrchestrationTest):
+ _interface = 'json'
+
+ template = """
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+ Template which uses a wait condition to confirm that a minimal
+ cfn-init and cfn-signal has worked
+Parameters:
+ KeyName:
+ Type: String
+ InstanceType:
+ Type: String
+ ImageId:
+ Type: String
+Resources:
+ CfnUser:
+ Type: AWS::IAM::User
+ SmokeKeys:
+ Type: AWS::IAM::AccessKey
+ Properties:
+ UserName: {Ref: CfnUser}
+ SmokeServer:
+ Type: AWS::EC2::Instance
+ Metadata:
+ AWS::CloudFormation::Init:
+ config:
+ files:
+ /tmp/smoke-status:
+ content: smoke test complete
+ /etc/cfn/cfn-credentials:
+ content:
+ Fn::Join:
+ - ''
+ - - AWSAccessKeyId=
+ - {Ref: SmokeKeys}
+ - '
+
+ '
+ - AWSSecretKey=
+ - Fn::GetAtt: [SmokeKeys, SecretAccessKey]
+ - '
+
+ '
+ mode: '000400'
+ owner: root
+ group: root
+ Properties:
+ ImageId: {Ref: ImageId}
+ InstanceType: {Ref: InstanceType}
+ KeyName: {Ref: KeyName}
+ UserData:
+ Fn::Base64:
+ Fn::Join:
+ - ''
+ - - |-
+ #!/bin/bash -v
+ /opt/aws/bin/cfn-init
+ - |-
+ || error_exit ''Failed to run cfn-init''
+ /opt/aws/bin/cfn-signal -e 0 --data "`cat /tmp/smoke-status`" '
+ - {Ref: WaitHandle}
+ - '''
+
+ '
+ WaitHandle:
+ Type: AWS::CloudFormation::WaitConditionHandle
+ WaitCondition:
+ Type: AWS::CloudFormation::WaitCondition
+ DependsOn: SmokeServer
+ Properties:
+ Handle: {Ref: WaitHandle}
+ Timeout: '600'
+Outputs:
+ WaitConditionStatus:
+ Description: Contents of /tmp/smoke-status on SmokeServer
+ Value:
+ Fn::GetAtt: [WaitCondition, Data]
+"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(InstanceCfnInitTestJSON, cls).setUpClass()
+ if not cls.orchestration_cfg.image_ref:
+ raise cls.skipException("No image available to test")
+ cls.client = cls.orchestration_client
+
+ def setUp(self):
+ super(InstanceCfnInitTestJSON, self).setUp()
+ stack_name = rand_name('heat')
+ keypair_name = (self.orchestration_cfg.keypair_name or
+ self._create_keypair()['name'])
+
+ # create the stack
+ self.stack_identifier = self.create_stack(
+ stack_name,
+ self.template,
+ parameters={
+ 'KeyName': keypair_name,
+ 'InstanceType': self.orchestration_cfg.instance_type,
+ 'ImageId': self.orchestration_cfg.image_ref
+ })
+
+ @attr(type='gate')
+ def test_stack_wait_condition_data(self):
+
+ sid = self.stack_identifier
+
+ # wait for create to complete.
+ self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
+
+ # fetch the stack
+ resp, body = self.client.get_stack(sid)
+ self.assertEqual('CREATE_COMPLETE', body['stack_status'])
+
+ # fetch the stack
+ resp, body = self.client.get_stack(sid)
+ self.assertEqual('CREATE_COMPLETE', body['stack_status'])
+
+ # This is an assert of great significance, as it means the following
+ # has happened:
+ # - cfn-init read the provided metadata and wrote out a file
+ # - a user was created and credentials written to the instance
+ # - a cfn-signal was built which was signed with provided credentials
+ # - the wait condition was fulfilled and the stack has changed state
+ wait_status = json.loads(body['outputs'][0]['output_value'])
+ self.assertEqual('smoke test complete', wait_status['00000'])
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index e278f59..086b981 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -1,8 +1,5 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -15,12 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testtools
-
from tempest.api.volume import base
from tempest.common import log as logging
from tempest.common.utils.data_utils import rand_name
-from tempest import config
from tempest.services.volume.json.admin import volume_types_client
from tempest.services.volume.json import volumes_client
from tempest.test import attr
@@ -31,66 +25,62 @@
class VolumeMultiBackendTest(base.BaseVolumeAdminTest):
_interface = "json"
- multi_backend_enabled = config.TempestConfig().volume.multi_backend_enabled
- backend1_name = config.TempestConfig().volume.backend1_name
- backend2_name = config.TempestConfig().volume.backend2_name
- backend_names_equal = False
- if (backend1_name == backend2_name):
- backend_names_equal = True
-
@classmethod
- @testtools.skipIf(not multi_backend_enabled,
- "Cinder multi-backend feature is not available")
def setUpClass(cls):
super(VolumeMultiBackendTest, cls).setUpClass()
+ if not cls.config.volume.multi_backend_enabled:
+ raise cls.skipException("Cinder multi-backend feature disabled")
+
+ cls.backend1_name = cls.config.volume.backend1_name
+ cls.backend2_name = cls.config.volume.backend2_name
adm_user = cls.config.identity.admin_username
adm_pass = cls.config.identity.admin_password
adm_tenant = cls.config.identity.admin_tenant_name
auth_url = cls.config.identity.uri
- cls.client = volumes_client.VolumesClientJSON(cls.config,
- adm_user,
- adm_pass,
- auth_url,
- adm_tenant)
- cls.client2 = volume_types_client.VolumeTypesClientJSON(cls.config,
- adm_user,
- adm_pass,
- auth_url,
- adm_tenant)
+ cls.volume_client = volumes_client.VolumesClientJSON(cls.config,
+ adm_user,
+ adm_pass,
+ auth_url,
+ adm_tenant)
+ cls.type_client = volume_types_client.VolumeTypesClientJSON(cls.config,
+ adm_user,
+ adm_pass,
+ auth_url,
+ adm_tenant)
- ## variables initialization
- type_name1 = rand_name('type-')
- type_name2 = rand_name('type-')
- cls.volume_type_list = []
-
- vol_name1 = rand_name('Volume-')
- vol_name2 = rand_name('Volume-')
+ cls.volume_type_id_list = []
cls.volume_id_list = []
-
try:
- ## Volume types creation
+ # Volume/Type creation (uses backend1_name)
+ type1_name = rand_name('Type-')
+ vol1_name = rand_name('Volume-')
extra_specs1 = {"volume_backend_name": cls.backend1_name}
- resp, cls.body1 = cls.client2.create_volume_type(
- type_name1, extra_specs=extra_specs1)
- cls.volume_type_list.append(cls.body1)
+ resp, cls.type1 = cls.type_client.create_volume_type(
+ type1_name, extra_specs=extra_specs1)
+ cls.volume_type_id_list.append(cls.type1['id'])
- extra_specs2 = {"volume_backend_name": cls.backend2_name}
- resp, cls.body2 = cls.client2.create_volume_type(
- type_name2, extra_specs=extra_specs2)
- cls.volume_type_list.append(cls.body2)
-
- ## Volumes creation
- resp, cls.volume1 = cls.client.create_volume(
- size=1, display_name=vol_name1, volume_type=type_name1)
- cls.client.wait_for_volume_status(cls.volume1['id'], 'available')
+ resp, cls.volume1 = cls.volume_client.create_volume(
+ size=1, display_name=vol1_name, volume_type=type1_name)
cls.volume_id_list.append(cls.volume1['id'])
+ cls.volume_client.wait_for_volume_status(cls.volume1['id'],
+ 'available')
- resp, cls.volume2 = cls.client.create_volume(
- size=1, display_name=vol_name2, volume_type=type_name2)
- cls.client.wait_for_volume_status(cls.volume2['id'], 'available')
- cls.volume_id_list.append(cls.volume2['id'])
+ if cls.backend1_name != cls.backend2_name:
+ # Volume/Type creation (uses backend2_name)
+ type2_name = rand_name('Type-')
+ vol2_name = rand_name('Volume-')
+ extra_specs2 = {"volume_backend_name": cls.backend2_name}
+ resp, cls.type2 = cls.type_client.create_volume_type(
+ type2_name, extra_specs=extra_specs2)
+ cls.volume_type_id_list.append(cls.type2['id'])
+
+ resp, cls.volume2 = cls.volume_client.create_volume(
+ size=1, display_name=vol2_name, volume_type=type2_name)
+ cls.volume_id_list.append(cls.volume2['id'])
+ cls.volume_client.wait_for_volume_status(cls.volume2['id'],
+ 'available')
except Exception:
LOG.exception("setup failed")
cls.tearDownClass()
@@ -100,60 +90,43 @@
def tearDownClass(cls):
## volumes deletion
for volume_id in cls.volume_id_list:
- cls.client.delete_volume(volume_id)
- cls.client.wait_for_resource_deletion(volume_id)
+ cls.volume_client.delete_volume(volume_id)
+ cls.volume_client.wait_for_resource_deletion(volume_id)
## volume types deletion
- for volume_type in cls.volume_type_list:
- cls.client2.delete_volume_type(volume_type)
+ for volume_type_id in cls.volume_type_id_list:
+ cls.type_client.delete_volume_type(volume_type_id)
super(VolumeMultiBackendTest, cls).tearDownClass()
@attr(type='smoke')
- def test_multi_backend_enabled(self):
- # this test checks that multi backend is enabled for at least the
- # computes where the volumes created in setUp were made
+ def test_backend_name_reporting(self):
+ # this test checks if os-vol-attr:host is populated correctly after
+ # the multi backend feature has been enabled
# if multi-backend is enabled: os-vol-attr:host should be like:
# host@backend_name
- # this test fails if:
- # - multi backend is not enabled
- resp, fetched_volume = self.client.get_volume(self.volume1['id'])
+ resp, volume = self.volume_client.get_volume(self.volume1['id'])
self.assertEqual(200, resp.status)
- volume_host1 = fetched_volume['os-vol-host-attr:host']
- msg = ("Multi-backend is not available for at least host "
- "%(volume_host1)s") % locals()
- self.assertTrue(len(volume_host1.split("@")) > 1, msg)
-
- resp, fetched_volume = self.client.get_volume(self.volume2['id'])
- self.assertEqual(200, resp.status)
-
- volume_host2 = fetched_volume['os-vol-host-attr:host']
- msg = ("Multi-backend is not available for at least host "
- "%(volume_host2)s") % locals()
- self.assertTrue(len(volume_host2.split("@")) > 1, msg)
+ volume1_host = volume['os-vol-host-attr:host']
+ msg = ("multi-backend reporting incorrect values for volume %s" %
+ self.volume1['id'])
+ self.assertTrue(len(volume1_host.split("@")) > 1, msg)
@attr(type='gate')
def test_backend_name_distinction(self):
- # this test checks that the two volumes created at setUp doesn't
- # belong to the same backend (if they are in the same backend, that
- # means, volume_backend_name distinction is not working properly)
- # this test fails if:
- # - tempest.conf is not well configured
- # - the two volumes belongs to the same backend
+ # this test checks that the two volumes created at setUp don't
+ # belong to the same backend (if they are, than the
+ # volume backend distinction is not working properly)
+ if self.backend1_name == self.backend2_name:
+ raise self.skipException("backends configured with same name")
- # checks tempest.conf
- msg = ("tempest.conf is not well configured, "
- "backend1_name and backend2_name are equal")
- self.assertEqual(self.backend_names_equal, False, msg)
+ resp, volume = self.volume_client.get_volume(self.volume1['id'])
+ volume1_host = volume['os-vol-host-attr:host']
- # checks the two volumes belongs to different backend
- resp, fetched_volume = self.client.get_volume(self.volume1['id'])
- volume_host1 = fetched_volume['os-vol-host-attr:host']
+ resp, volume = self.volume_client.get_volume(self.volume2['id'])
+ volume2_host = volume['os-vol-host-attr:host']
- resp, fetched_volume = self.client.get_volume(self.volume2['id'])
- volume_host2 = fetched_volume['os-vol-host-attr:host']
-
- msg = ("volume2 was created in the same backend as volume1: "
- "%(volume_host2)s.") % locals()
- self.assertNotEqual(volume_host2, volume_host1, msg)
+ msg = ("volumes %s and %s were created in the same backend" %
+ (self.volume1['id'], self.volume2['id']))
+ self.assertNotEqual(volume1_host, volume2_host, msg)
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index 067f58c..45d519b 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -107,3 +107,14 @@
def test_admin_bashcompletion(self):
self.keystone('bash-completion')
+
+ # Optional arguments:
+
+ def test_admin_version(self):
+ self.keystone('', flags='--version')
+
+ def test_admin_debug_list(self):
+ self.keystone('catalog', flags='--debug')
+
+ def test_admin_timeout(self):
+ self.keystone('catalog', flags='--timeout 15')
diff --git a/tempest/config.py b/tempest/config.py
index 7164b95..150d561 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -120,6 +120,13 @@
cfg.IntOpt('flavor_ref_alt',
default=2,
help='Valid secondary flavor to be used in tests.'),
+ cfg.StrOpt('image_ssh_user',
+ default="root",
+ help="User name used to authenticate to an instance."),
+ cfg.StrOpt('image_alt_ssh_user',
+ default="root",
+ help="User name used to authenticate to an instance using "
+ "the alternate image."),
cfg.BoolOpt('resize_available',
default=False,
help="Does the test environment support resizing?"),
@@ -318,12 +325,12 @@
help="Catalog type of the Volume Service"),
cfg.BoolOpt('multi_backend_enabled',
default=False,
- help="Runs Cinder multi-backend test (requires 2 backend)"),
+ help="Runs Cinder multi-backend test (requires 2 backends)"),
cfg.StrOpt('backend1_name',
- default='LVM_iSCSI',
+ default='BACKEND_1',
help="Name of the backend1 (must be declared in cinder.conf)"),
cfg.StrOpt('backend2_name',
- default='LVM_iSCSI_1',
+ default='BACKEND_2',
help="Name of the backend2 (must be declared in cinder.conf)"),
]
@@ -488,6 +495,34 @@
conf.register_opt(opt, group='stress')
+scenario_group = cfg.OptGroup(name='scenario', title='Scenario Test Options')
+
+ScenarioGroup = [
+ cfg.StrOpt('img_dir',
+ default='/opt/stack/new/devstack/files/images/'
+ 'cirros-0.3.1-x86_64-uec',
+ help='Directory containing image files'),
+ cfg.StrOpt('ami_img_file',
+ default='cirros-0.3.1-x86_64-blank.img',
+ help='AMI image file name'),
+ cfg.StrOpt('ari_img_file',
+ default='cirros-0.3.1-x86_64-initrd',
+ help='ARI image file name'),
+ cfg.StrOpt('aki_img_file',
+ default='cirros-0.3.1-x86_64-vmlinuz',
+ help='AKI image file name'),
+ cfg.StrOpt('ssh_user',
+ default='cirros',
+ help='ssh username for the image file')
+]
+
+
+def register_scenario_opts(conf):
+ conf.register_group(scenario_group)
+ for opt in ScenarioGroup:
+ conf.register_opt(opt, group='scenario')
+
+
@singleton
class TempestConfig:
"""Provides OpenStack configuration information."""
@@ -537,6 +572,7 @@
register_boto_opts(cfg.CONF)
register_compute_admin_opts(cfg.CONF)
register_stress_opts(cfg.CONF)
+ register_scenario_opts(cfg.CONF)
self.compute = cfg.CONF.compute
self.whitebox = cfg.CONF.whitebox
self.identity = cfg.CONF.identity
@@ -548,6 +584,7 @@
self.boto = cfg.CONF.boto
self.compute_admin = cfg.CONF['compute-admin']
self.stress = cfg.CONF.stress
+ self.scenario = cfg.CONF.scenario
if not self.compute_admin.username:
self.compute_admin.username = self.identity.admin_username
self.compute_admin.password = self.identity.admin_password
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index a358f20..b62e8bb 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -20,6 +20,7 @@
import subprocess
# Default client libs
+import cinderclient.client
import glanceclient
import keystoneclient.v2_0.client
import netaddr
@@ -33,6 +34,7 @@
pass
from tempest.api.network import common as net_common
+from tempest.common import ssh
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
import tempest.manager
@@ -49,6 +51,7 @@
"""
NOVACLIENT_VERSION = '2'
+ CINDERCLIENT_VERSION = '1'
def __init__(self):
super(OfficialClientManager, self).__init__()
@@ -56,11 +59,13 @@
self.image_client = self._get_image_client()
self.identity_client = self._get_identity_client()
self.network_client = self._get_network_client()
+ self.volume_client = self._get_volume_client()
self.client_attr_names = [
'compute_client',
'image_client',
'identity_client',
'network_client',
+ 'volume_client'
]
def _get_compute_client(self, username=None, password=None,
@@ -103,6 +108,22 @@
return glanceclient.Client('1', endpoint=endpoint, token=token,
insecure=dscv)
+ def _get_volume_client(self, username=None, password=None,
+ tenant_name=None):
+ if not username:
+ username = self.config.identity.username
+ if not password:
+ password = self.config.identity.password
+ if not tenant_name:
+ tenant_name = self.config.identity.tenant_name
+
+ auth_url = self.config.identity.uri
+ return cinderclient.client.Client(self.CINDERCLIENT_VERSION,
+ username,
+ password,
+ tenant_name,
+ auth_url)
+
def _get_identity_client(self, username=None, password=None,
tenant_name=None):
# This identity client is not intended to check the security
@@ -263,6 +284,11 @@
self.fail("SecurityGroup object not successfully created.")
# Add rules to the security group
+
+ # These rules are intended to permit inbound ssh and icmp
+ # traffic from all sources, so no group_id is provided.
+ # Setting a group_id would only permit traffic from ports
+ # belonging to the same security group.
rulesets = [
{
# ssh
@@ -270,7 +296,6 @@
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
- 'group_id': secgroup.id
},
{
# ping
@@ -278,7 +303,6 @@
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
- 'group_id': secgroup.id
}
]
for ruleset in rulesets:
@@ -420,3 +444,22 @@
# TODO(mnewby) Allow configuration of execution and sleep duration.
return tempest.test.call_until_true(ping, 20, 1)
+
+ def _is_reachable_via_ssh(self, ip_address, username, private_key,
+ timeout=120):
+ ssh_client = ssh.Client(ip_address, username,
+ pkey=private_key,
+ timeout=timeout)
+ return ssh_client.test_connection_auth()
+
+ def _check_vm_connectivity(self, ip_address, username, private_key,
+ timeout=120):
+ self.assertTrue(self._ping_ip_address(ip_address),
+ "Timed out waiting for %s to become "
+ "reachable" % ip_address)
+ self.assertTrue(self._is_reachable_via_ssh(ip_address,
+ username,
+ private_key,
+ timeout=timeout),
+ 'Auth failure in connecting to %s@%s via ssh' %
+ (username, ip_address))
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
new file mode 100644
index 0000000..a55bbb2
--- /dev/null
+++ b/tempest/scenario/test_minimum_basic.py
@@ -0,0 +1,208 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from tempest.common.utils.data_utils import rand_name
+from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest.scenario import manager
+
+
+LOG = logging.getLogger(__name__)
+
+
+class TestMinimumBasicScenario(manager.OfficialClientTest):
+
+ """
+ This is a basic minimum scenario test.
+
+ This test below:
+ * across the multiple components
+ * as a regular user
+ * with and without optional parameters
+ * check command outputs
+
+ """
+
+ def _wait_for_server_status(self, status):
+ server_id = self.server.id
+ self.status_timeout(
+ self.compute_client.servers, server_id, status)
+
+ def _wait_for_volume_status(self, status):
+ volume_id = self.volume.id
+ self.status_timeout(
+ self.volume_client.volumes, volume_id, status)
+
+ def _image_create(self, name, fmt, path, properties={}):
+ name = rand_name('%s-' % name)
+ image_file = open(path, 'rb')
+ self.addCleanup(image_file.close)
+ params = {
+ 'name': name,
+ 'container_format': fmt,
+ 'disk_format': fmt,
+ 'is_public': 'True',
+ }
+ params.update(properties)
+ image = self.image_client.images.create(**params)
+ self.addCleanup(self.image_client.images.delete, image)
+ self.assertEqual("queued", image.status)
+ image.update(data=image_file)
+ return image.id
+
+ def glance_image_create(self):
+ aki_img_path = self.config.scenario.img_dir + "/" + \
+ self.config.scenario.aki_img_file
+ ari_img_path = self.config.scenario.img_dir + "/" + \
+ self.config.scenario.ari_img_file
+ ami_img_path = self.config.scenario.img_dir + "/" + \
+ self.config.scenario.ami_img_file
+ LOG.debug("paths: ami: %s, ari: %s, aki: %s"
+ % (ami_img_path, ari_img_path, aki_img_path))
+ kernel_id = self._image_create('scenario-aki', 'aki', aki_img_path)
+ ramdisk_id = self._image_create('scenario-ari', 'ari', ari_img_path)
+ properties = {
+ 'properties': {'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id}
+ }
+ self.image = self._image_create('scenario-ami', 'ami',
+ path=ami_img_path,
+ properties=properties)
+
+ def nova_keypair_add(self):
+ name = rand_name('scenario-keypair-')
+
+ self.keypair = self.compute_client.keypairs.create(name=name)
+ self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
+ self.assertEqual(name, self.keypair.name)
+
+ def nova_boot(self):
+ name = rand_name('scenario-server-')
+ client = self.compute_client
+ flavor_id = self.config.compute.flavor_ref
+ self.server = client.servers.create(name=name, image=self.image,
+ flavor=flavor_id,
+ key_name=self.keypair.name)
+ self.addCleanup(self.compute_client.servers.delete, self.server)
+ self.assertEqual(name, self.server.name)
+ self._wait_for_server_status('ACTIVE')
+
+ def nova_list(self):
+ servers = self.compute_client.servers.list()
+ LOG.debug("server_list:%s" % servers)
+ self.assertTrue(self.server in servers)
+
+ def nova_show(self):
+ got_server = self.compute_client.servers.get(self.server)
+ LOG.debug("got server:%s" % got_server)
+ self.assertEqual(self.server, got_server)
+
+ def cinder_create(self):
+ name = rand_name('scenario-volume-')
+ LOG.debug("volume display-name:%s" % name)
+ self.volume = self.volume_client.volumes.create(size=1,
+ display_name=name)
+ LOG.debug("volume created:%s" % self.volume.display_name)
+ self._wait_for_volume_status('available')
+
+ self.addCleanup(self.volume_client.volumes.delete, self.volume)
+ self.assertEqual(name, self.volume.display_name)
+
+ def cinder_list(self):
+ volumes = self.volume_client.volumes.list()
+ self.assertTrue(self.volume in volumes)
+
+ def cinder_show(self):
+ volume = self.volume_client.volumes.get(self.volume.id)
+ self.assertEqual(self.volume, volume)
+
+ def nova_volume_attach(self):
+ attach_volume_client = self.compute_client.volumes.create_server_volume
+ volume = attach_volume_client(self.server.id,
+ self.volume.id,
+ '/dev/vdb')
+ self.assertEqual(self.volume.id, volume.id)
+ self._wait_for_volume_status('in-use')
+
+ def nova_reboot(self):
+ self.server.reboot()
+ self._wait_for_server_status('ACTIVE')
+
+ def nova_floating_ip_create(self):
+ self.floating_ip = self.compute_client.floating_ips.create()
+ self.addCleanup(self.floating_ip.delete)
+
+ def nova_floating_ip_add(self):
+ self.server.add_floating_ip(self.floating_ip)
+
+ def nova_security_group_rule_create(self):
+ sgs = self.compute_client.security_groups.list()
+ for sg in sgs:
+ if sg.name == 'default':
+ secgroup = sg
+
+ ruleset = {
+ # ssh
+ 'ip_protocol': 'tcp',
+ 'from_port': 22,
+ 'to_port': 22,
+ 'cidr': '0.0.0.0/0',
+ 'group_id': None
+ }
+ sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
+ **ruleset)
+ self.addCleanup(self.compute_client.security_group_rules.delete,
+ sg_rule.id)
+
+ def ssh_to_server(self):
+ username = self.config.scenario.ssh_user
+ self.linux_client = RemoteClient(self.floating_ip.ip,
+ username,
+ pkey=self.keypair.private_key)
+
+ def check_partitions(self):
+ partitions = self.linux_client.get_partitions()
+ self.assertEqual(1, partitions.count('vdb'))
+
+ def nova_volume_detach(self):
+ detach_volume_client = self.compute_client.volumes.delete_server_volume
+ detach_volume_client(self.server.id, self.volume.id)
+ self._wait_for_volume_status('available')
+
+ volume = self.volume_client.volumes.get(self.volume.id)
+ self.assertEqual('available', volume.status)
+
+ def test_minimum_basic_scenario(self):
+ self.glance_image_create()
+ self.nova_keypair_add()
+ self.nova_boot()
+ self.nova_list()
+ self.nova_show()
+ self.cinder_create()
+ self.cinder_list()
+ self.cinder_show()
+ self.nova_volume_attach()
+ self.cinder_show()
+ self.nova_reboot()
+
+ self.nova_floating_ip_create()
+ self.nova_floating_ip_add()
+ self.nova_security_group_rule_create()
+ self.ssh_to_server()
+ self.check_partitions()
+
+ self.nova_volume_detach()
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 5ccfd52..b94caaa 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -31,10 +31,15 @@
* For a freshly-booted VM with an IP address ("port") on a given network:
- - the Tempest host can ping the IP address. This implies that
- the VM has been assigned the correct IP address and has
+ - the Tempest host can ping the IP address. This implies, but
+ does not guarantee (see the ssh check that follows), that the
+ VM has been assigned the correct IP address and has
connectivity to the Tempest host.
+ - the Tempest host can perform key-based authentication to an
+ ssh server hosted at the IP address. This check guarantees
+ that the IP address is associated with the target VM.
+
#TODO(mnewby) - Need to implement the following:
- the Tempest host can ssh into the VM via the IP address and
successfully execute the following:
@@ -214,12 +219,15 @@
raise self.skipTest(msg)
if not self.servers:
raise self.skipTest("No VM's have been created")
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ ssh_login = self.config.compute.image_ssh_user
+ private_key = self.keypairs[self.tenant_id].private_key
for server in self.servers:
for net_name, ip_addresses in server.networks.iteritems():
for ip_address in ip_addresses:
- self.assertTrue(self._ping_ip_address(ip_address),
- "Timed out waiting for %s's ip to become "
- "reachable" % server.name)
+ self._check_vm_connectivity(ip_address, ssh_login,
+ private_key)
@attr(type='smoke')
def test_007_assign_floating_ips(self):
@@ -237,9 +245,11 @@
def test_008_check_public_network_connectivity(self):
if not self.floating_ips:
raise self.skipTest('No floating ips have been allocated.')
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ ssh_login = self.config.compute.image_ssh_user
+ private_key = self.keypairs[self.tenant_id].private_key
for server, floating_ips in self.floating_ips.iteritems():
for floating_ip in floating_ips:
ip_address = floating_ip.floating_ip_address
- self.assertTrue(self._ping_ip_address(ip_address),
- "Timed out waiting for %s's ip to become "
- "reachable" % server.name)
+ self._check_vm_connectivity(ip_address, ssh_login, private_key)
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 9ac0cc0..6202e91 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -28,6 +28,7 @@
This test case stresses some advanced server instance operations:
* Resizing an instance
+ * Sequence suspend resume
"""
@classmethod
@@ -44,11 +45,6 @@
msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
raise cls.skipException(msg)
- @classmethod
- def tearDownClass(cls):
- for thing in cls.resources:
- thing.delete()
-
def test_resize_server_confirm(self):
# We create an instance for use in this test
i_name = rand_name('instance')
@@ -56,12 +52,8 @@
base_image_id = self.config.compute.image_ref
self.instance = self.compute_client.servers.create(
i_name, base_image_id, flavor_id)
- try:
- self.assertEqual(self.instance.name, i_name)
- self.set_resource('instance', self.instance)
- except AttributeError:
- self.fail("Instance not successfully created.")
-
+ self.assertEqual(self.instance.name, i_name)
+ self.set_resource('instance', self.instance)
self.assertEqual(self.instance.status, 'BUILD')
instance_id = self.get_resource('instance').id
self.status_timeout(
@@ -77,5 +69,42 @@
LOG.debug("Confirming resize of instance %s", instance_id)
instance.confirm_resize()
+
self.status_timeout(
self.compute_client.servers, instance_id, 'ACTIVE')
+
+ def test_server_sequence_suspend_resume(self):
+ # We create an instance for use in this test
+ i_name = rand_name('instance')
+ flavor_id = self.config.compute.flavor_ref
+ base_image_id = self.config.compute.image_ref
+ self.instance = self.compute_client.servers.create(
+ i_name, base_image_id, flavor_id)
+ self.assertEqual(self.instance.name, i_name)
+ self.set_resource('instance', self.instance)
+ self.assertEqual(self.instance.status, 'BUILD')
+ instance_id = self.get_resource('instance').id
+ self.status_timeout(
+ self.compute_client.servers, instance_id, 'ACTIVE')
+ instance = self.get_resource('instance')
+ instance_id = instance.id
+ LOG.debug("Suspending instance %s. Current status: %s",
+ instance_id, instance.status)
+ instance.suspend()
+ self.status_timeout(self.compute_client.servers, instance_id,
+ 'SUSPENDED')
+ LOG.debug("Resuming instance %s. Current status: %s",
+ instance_id, instance.status)
+ instance.resume()
+ self.status_timeout(self.compute_client.servers, instance_id,
+ 'ACTIVE')
+ LOG.debug("Suspending instance %s. Current status: %s",
+ instance_id, instance.status)
+ instance.suspend()
+ self.status_timeout(self.compute_client.servers, instance_id,
+ 'SUSPENDED')
+ LOG.debug("Resuming instance %s. Current status: %s",
+ instance_id, instance.status)
+ instance.resume()
+ self.status_timeout(self.compute_client.servers, instance_id,
+ 'ACTIVE')
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index 376dafc..b13d0f1 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -150,3 +150,10 @@
resp, body = self.delete("images/%s/metadata/%s" %
(str(image_id), key))
return resp, body
+
+ def is_resource_deleted(self, id):
+ try:
+ self.get_image(id)
+ except exceptions.NotFound:
+ return True
+ return False
diff --git a/tempest/services/compute/xml/images_client.py b/tempest/services/compute/xml/images_client.py
index c7e337b..cc13aa1 100644
--- a/tempest/services/compute/xml/images_client.py
+++ b/tempest/services/compute/xml/images_client.py
@@ -226,3 +226,10 @@
"""Deletes a single image metadata key/value pair."""
return self.delete("images/%s/metadata/%s" % (str(image_id), key),
self.headers)
+
+ def is_resource_deleted(self, id):
+ try:
+ self.get_image(id)
+ except exceptions.NotFound:
+ return True
+ return False
diff --git a/tempest/services/identity/v3/json/endpoints_client.py b/tempest/services/identity/v3/json/endpoints_client.py
old mode 100755
new mode 100644
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 014df1e..adbdc83 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -160,3 +160,51 @@
(project_id, user_id, role_id), None,
self.headers)
return resp, body
+
+ def create_domain(self, name, **kwargs):
+ """Creates a domain."""
+ description = kwargs.get('description', None)
+ en = kwargs.get('enabled', True)
+ post_body = {
+ 'description': description,
+ 'enabled': en,
+ 'name': name
+ }
+ post_body = json.dumps({'domain': post_body})
+ resp, body = self.post('domains', post_body, self.headers)
+ body = json.loads(body)
+ return resp, body['domain']
+
+ def delete_domain(self, domain_id):
+ """Delete a domain."""
+ resp, body = self.delete('domains/%s' % str(domain_id))
+ return resp, body
+
+ def list_domains(self):
+ """List Domains."""
+ resp, body = self.get('domains')
+ body = json.loads(body)
+ return resp, body['domains']
+
+ def update_domain(self, domain_id, **kwargs):
+ """Updates a domain."""
+ resp, body = self.get_domain(domain_id)
+ description = kwargs.get('description', body['description'])
+ en = kwargs.get('enabled', body['enabled'])
+ name = kwargs.get('name', body['name'])
+ post_body = {
+ 'description': description,
+ 'enabled': en,
+ 'name': name
+ }
+ post_body = json.dumps({'domain': post_body})
+ resp, body = self.patch('domains/%s' % domain_id, post_body,
+ self.headers)
+ body = json.loads(body)
+ return resp, body['domain']
+
+ def get_domain(self, domain_id):
+ """Get Domain details."""
+ resp, body = self.get('domains/%s' % domain_id)
+ body = json.loads(body)
+ return resp, body['domain']
diff --git a/tempest/services/identity/v3/xml/endpoints_client.py b/tempest/services/identity/v3/xml/endpoints_client.py
old mode 100755
new mode 100644
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 92151dd..708ee28 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -44,6 +44,14 @@
array.append(xml_to_json(child))
return array
+ def _parse_domains(self, node):
+ array = []
+ for child in node.getchildren():
+ tag_list = child.tag.split('}', 1)
+ if tag_list[1] == "domain":
+ array.append(xml_to_json(child))
+ return array
+
def _parse_array(self, node):
array = []
for child in node.getchildren():
@@ -185,3 +193,51 @@
resp, body = self.put('projects/%s/users/%s/roles/%s' %
(project_id, user_id, role_id), '', self.headers)
return resp, body
+
+ def create_domain(self, name, **kwargs):
+ """Creates a domain."""
+ description = kwargs.get('description', None)
+ en = kwargs.get('enabled', True)
+ post_body = Element("domain",
+ xmlns=XMLNS,
+ name=name,
+ description=description,
+ enabled=str(en).lower())
+ resp, body = self.post('domains', str(Document(post_body)),
+ self.headers)
+ body = self._parse_body(etree.fromstring(body))
+ return resp, body
+
+ def list_domains(self):
+ """Get the list of domains."""
+ resp, body = self.get("domains", self.headers)
+ body = self._parse_domains(etree.fromstring(body))
+ return resp, body
+
+ def delete_domain(self, domain_id):
+ """Delete a domain."""
+ resp, body = self.delete('domains/%s' % domain_id, self.headers)
+ return resp, body
+
+ def update_domain(self, domain_id, **kwargs):
+ """Updates a domain."""
+ resp, body = self.get_domain(domain_id)
+ description = kwargs.get('description', body['description'])
+ en = kwargs.get('enabled', body['enabled'])
+ name = kwargs.get('name', body['name'])
+ post_body = Element("domain",
+ xmlns=XMLNS,
+ name=name,
+ description=description,
+ enabled=str(en).lower())
+ resp, body = self.patch('domains/%s' % domain_id,
+ str(Document(post_body)),
+ self.headers)
+ body = self._parse_body(etree.fromstring(body))
+ return resp, body
+
+ def get_domain(self, domain_id):
+ """Get Domain details."""
+ resp, body = self.get('domains/%s' % domain_id, self.headers)
+ body = self._parse_body(etree.fromstring(body))
+ return resp, body
diff --git a/tempest/thirdparty/boto/test_s3_ec2_images.py b/tempest/thirdparty/boto/test_s3_ec2_images.py
index 2c0d8ae..0f836d0 100644
--- a/tempest/thirdparty/boto/test_s3_ec2_images.py
+++ b/tempest/thirdparty/boto/test_s3_ec2_images.py
@@ -72,21 +72,22 @@
retrieved_image = self.images_client.get_image(image["image_id"])
self.assertTrue(retrieved_image.name == image["name"])
self.assertTrue(retrieved_image.id == image["image_id"])
- if retrieved_image.state != "available":
+ state = retrieved_image.state
+ if state != "available":
def _state():
retr = self.images_client.get_image(image["image_id"])
return retr.state
state = state_wait(_state, "available")
self.assertEqual("available", state)
self.images_client.deregister_image(image["image_id"])
- #TODO(afazekas): double deregister ?
+ self.assertNotIn(image["image_id"], str(
+ self.images_client.get_all_images()))
self.cancelResourceCleanUp(image["cleanUp"])
- @testtools.skip("Skipped until the Bug #1074904 is resolved")
def test_register_get_deregister_aki_image(self):
# Register and deregister aki image
image = {"name": rand_name("aki-name-"),
- "location": self.bucket_name + "/" + self.ari_manifest,
+ "location": self.bucket_name + "/" + self.aki_manifest,
"type": "aki"}
image["image_id"] = self.images_client.register_image(
name=image["name"],
@@ -102,9 +103,8 @@
if retrieved_image.state != "available":
self.assertImageStateWait(retrieved_image, "available")
self.images_client.deregister_image(image["image_id"])
- #TODO(afazekas): verify deregister in a better way
- retrieved_image = self.images_client.get_image(image["image_id"])
- self.assertIn(retrieved_image.state, self.valid_image_state)
+ self.assertNotIn(image["image_id"], str(
+ self.images_client.get_all_images()))
self.cancelResourceCleanUp(image["cleanUp"])
@testtools.skip("Skipped until the Bug #1074908 and #1074904 is resolved")
diff --git a/tools/skip_tracker.py b/tools/skip_tracker.py
index a4cf394..c7b0033 100755
--- a/tools/skip_tracker.py
+++ b/tools/skip_tracker.py
@@ -28,7 +28,7 @@
from launchpadlib import launchpad
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
-TESTDIR = os.path.join(BASEDIR, 'tempest', 'tests')
+TESTDIR = os.path.join(BASEDIR, 'tempest')
LPCACHEDIR = os.path.expanduser('~/.launchpadlib/cache')