Merge "Unskipped object storage test"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index f9f02dc..282e455 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -65,6 +65,10 @@
flavor_ref = 1
flavor_ref_alt = 2
+# User names used to authenticate to an instance for a given image.
+image_ssh_user = root
+image_alt_ssh_user = root
+
# Number of seconds to wait while looping to check the status of an
# instance that is building.
build_interval = 10
@@ -314,3 +318,19 @@
# Name of existing keypair to launch servers with. The default is not to specify
# any key, which will generate a keypair for each test class
#keypair_name = heat_key
+
+[scenario]
+# Directory containing image files
+img_dir = /opt/stack/new/devstack/files/images/cirros-0.3.1-x86_64-uec
+
+# AMI image file name
+ami_img_file = cirros-0.3.1-x86_64-blank.img
+
+# ARI image file name
+ari_img_file = cirros-0.3.1-x86_64-initrd
+
+# AKI image file name
+aki_img_file = cirros-0.3.1-x86_64-vmlinuz
+
+# ssh username for the image file
+ssh_user = cirros
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
new file mode 100644
index 0000000..8d019fe
--- /dev/null
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -0,0 +1,54 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest.common.utils.data_utils import rand_name
+from tempest.test import attr
+
+
+class DomainsTestJSON(base.BaseIdentityAdminTest):
+ _interface = 'json'
+
+ def _delete_domain(self, domain_id):
+ # It is necessary to disable the domian before deleting,
+ # or else it would result in unauthorized error
+ _, body = self.v3_client.update_domain(domain_id, enabled=False)
+ resp, _ = self.v3_client.delete_domain(domain_id)
+ self.assertEqual(204, resp.status)
+
+ @attr(type='smoke')
+ def test_list_domains(self):
+ #Test to list domains
+ domain_ids = list()
+ fetched_ids = list()
+ for _ in range(3):
+ _, domain = self.v3_client.create_domain(
+ rand_name('domain-'), description=rand_name('domain-desc-'))
+ # Delete the domian at the end of this method
+ self.addCleanup(self._delete_domain, domain['id'])
+ domain_ids.append(domain['id'])
+ # List and Verify Domains
+ resp, body = self.v3_client.list_domains()
+ self.assertEqual(resp['status'], '200')
+ for d in body:
+ fetched_ids.append(d['id'])
+ missing_doms = [d for d in domain_ids if d not in fetched_ids]
+ self.assertEqual(0, len(missing_doms))
+
+
+class DomainsTestXML(DomainsTestJSON):
+ _interface = 'xml'
diff --git a/tempest/config.py b/tempest/config.py
index 7164b95..89a3614 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -120,6 +120,13 @@
cfg.IntOpt('flavor_ref_alt',
default=2,
help='Valid secondary flavor to be used in tests.'),
+ cfg.StrOpt('image_ssh_user',
+ default="root",
+ help="User name used to authenticate to an instance."),
+ cfg.StrOpt('image_alt_ssh_user',
+ default="root",
+ help="User name used to authenticate to an instance using "
+ "the alternate image."),
cfg.BoolOpt('resize_available',
default=False,
help="Does the test environment support resizing?"),
@@ -488,6 +495,34 @@
conf.register_opt(opt, group='stress')
+scenario_group = cfg.OptGroup(name='scenario', title='Scenario Test Options')
+
+ScenarioGroup = [
+ cfg.StrOpt('img_dir',
+ default='/opt/stack/new/devstack/files/images/'
+ 'cirros-0.3.1-x86_64-uec',
+ help='Directory containing image files'),
+ cfg.StrOpt('ami_img_file',
+ default='cirros-0.3.1-x86_64-blank.img',
+ help='AMI image file name'),
+ cfg.StrOpt('ari_img_file',
+ default='cirros-0.3.1-x86_64-initrd',
+ help='ARI image file name'),
+ cfg.StrOpt('aki_img_file',
+ default='cirros-0.3.1-x86_64-vmlinuz',
+ help='AKI image file name'),
+ cfg.StrOpt('ssh_user',
+ default='cirros',
+ help='ssh username for the image file')
+]
+
+
+def register_scenario_opts(conf):
+ conf.register_group(scenario_group)
+ for opt in ScenarioGroup:
+ conf.register_opt(opt, group='scenario')
+
+
@singleton
class TempestConfig:
"""Provides OpenStack configuration information."""
@@ -537,6 +572,7 @@
register_boto_opts(cfg.CONF)
register_compute_admin_opts(cfg.CONF)
register_stress_opts(cfg.CONF)
+ register_scenario_opts(cfg.CONF)
self.compute = cfg.CONF.compute
self.whitebox = cfg.CONF.whitebox
self.identity = cfg.CONF.identity
@@ -548,6 +584,7 @@
self.boto = cfg.CONF.boto
self.compute_admin = cfg.CONF['compute-admin']
self.stress = cfg.CONF.stress
+ self.scenario = cfg.CONF.scenario
if not self.compute_admin.username:
self.compute_admin.username = self.identity.admin_username
self.compute_admin.password = self.identity.admin_password
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index a358f20..b62e8bb 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -20,6 +20,7 @@
import subprocess
# Default client libs
+import cinderclient.client
import glanceclient
import keystoneclient.v2_0.client
import netaddr
@@ -33,6 +34,7 @@
pass
from tempest.api.network import common as net_common
+from tempest.common import ssh
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
import tempest.manager
@@ -49,6 +51,7 @@
"""
NOVACLIENT_VERSION = '2'
+ CINDERCLIENT_VERSION = '1'
def __init__(self):
super(OfficialClientManager, self).__init__()
@@ -56,11 +59,13 @@
self.image_client = self._get_image_client()
self.identity_client = self._get_identity_client()
self.network_client = self._get_network_client()
+ self.volume_client = self._get_volume_client()
self.client_attr_names = [
'compute_client',
'image_client',
'identity_client',
'network_client',
+ 'volume_client'
]
def _get_compute_client(self, username=None, password=None,
@@ -103,6 +108,22 @@
return glanceclient.Client('1', endpoint=endpoint, token=token,
insecure=dscv)
+ def _get_volume_client(self, username=None, password=None,
+ tenant_name=None):
+ if not username:
+ username = self.config.identity.username
+ if not password:
+ password = self.config.identity.password
+ if not tenant_name:
+ tenant_name = self.config.identity.tenant_name
+
+ auth_url = self.config.identity.uri
+ return cinderclient.client.Client(self.CINDERCLIENT_VERSION,
+ username,
+ password,
+ tenant_name,
+ auth_url)
+
def _get_identity_client(self, username=None, password=None,
tenant_name=None):
# This identity client is not intended to check the security
@@ -263,6 +284,11 @@
self.fail("SecurityGroup object not successfully created.")
# Add rules to the security group
+
+ # These rules are intended to permit inbound ssh and icmp
+ # traffic from all sources, so no group_id is provided.
+ # Setting a group_id would only permit traffic from ports
+ # belonging to the same security group.
rulesets = [
{
# ssh
@@ -270,7 +296,6 @@
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
- 'group_id': secgroup.id
},
{
# ping
@@ -278,7 +303,6 @@
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
- 'group_id': secgroup.id
}
]
for ruleset in rulesets:
@@ -420,3 +444,22 @@
# TODO(mnewby) Allow configuration of execution and sleep duration.
return tempest.test.call_until_true(ping, 20, 1)
+
+ def _is_reachable_via_ssh(self, ip_address, username, private_key,
+ timeout=120):
+ ssh_client = ssh.Client(ip_address, username,
+ pkey=private_key,
+ timeout=timeout)
+ return ssh_client.test_connection_auth()
+
+ def _check_vm_connectivity(self, ip_address, username, private_key,
+ timeout=120):
+ self.assertTrue(self._ping_ip_address(ip_address),
+ "Timed out waiting for %s to become "
+ "reachable" % ip_address)
+ self.assertTrue(self._is_reachable_via_ssh(ip_address,
+ username,
+ private_key,
+ timeout=timeout),
+ 'Auth failure in connecting to %s@%s via ssh' %
+ (username, ip_address))
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
new file mode 100644
index 0000000..a55bbb2
--- /dev/null
+++ b/tempest/scenario/test_minimum_basic.py
@@ -0,0 +1,208 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from tempest.common.utils.data_utils import rand_name
+from tempest.common.utils.linux.remote_client import RemoteClient
+from tempest.scenario import manager
+
+
+LOG = logging.getLogger(__name__)
+
+
+class TestMinimumBasicScenario(manager.OfficialClientTest):
+
+ """
+ This is a basic minimum scenario test.
+
+ This test below:
+ * across the multiple components
+ * as a regular user
+ * with and without optional parameters
+ * check command outputs
+
+ """
+
+ def _wait_for_server_status(self, status):
+ server_id = self.server.id
+ self.status_timeout(
+ self.compute_client.servers, server_id, status)
+
+ def _wait_for_volume_status(self, status):
+ volume_id = self.volume.id
+ self.status_timeout(
+ self.volume_client.volumes, volume_id, status)
+
+ def _image_create(self, name, fmt, path, properties={}):
+ name = rand_name('%s-' % name)
+ image_file = open(path, 'rb')
+ self.addCleanup(image_file.close)
+ params = {
+ 'name': name,
+ 'container_format': fmt,
+ 'disk_format': fmt,
+ 'is_public': 'True',
+ }
+ params.update(properties)
+ image = self.image_client.images.create(**params)
+ self.addCleanup(self.image_client.images.delete, image)
+ self.assertEqual("queued", image.status)
+ image.update(data=image_file)
+ return image.id
+
+ def glance_image_create(self):
+ aki_img_path = self.config.scenario.img_dir + "/" + \
+ self.config.scenario.aki_img_file
+ ari_img_path = self.config.scenario.img_dir + "/" + \
+ self.config.scenario.ari_img_file
+ ami_img_path = self.config.scenario.img_dir + "/" + \
+ self.config.scenario.ami_img_file
+ LOG.debug("paths: ami: %s, ari: %s, aki: %s"
+ % (ami_img_path, ari_img_path, aki_img_path))
+ kernel_id = self._image_create('scenario-aki', 'aki', aki_img_path)
+ ramdisk_id = self._image_create('scenario-ari', 'ari', ari_img_path)
+ properties = {
+ 'properties': {'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id}
+ }
+ self.image = self._image_create('scenario-ami', 'ami',
+ path=ami_img_path,
+ properties=properties)
+
+ def nova_keypair_add(self):
+ name = rand_name('scenario-keypair-')
+
+ self.keypair = self.compute_client.keypairs.create(name=name)
+ self.addCleanup(self.compute_client.keypairs.delete, self.keypair)
+ self.assertEqual(name, self.keypair.name)
+
+ def nova_boot(self):
+ name = rand_name('scenario-server-')
+ client = self.compute_client
+ flavor_id = self.config.compute.flavor_ref
+ self.server = client.servers.create(name=name, image=self.image,
+ flavor=flavor_id,
+ key_name=self.keypair.name)
+ self.addCleanup(self.compute_client.servers.delete, self.server)
+ self.assertEqual(name, self.server.name)
+ self._wait_for_server_status('ACTIVE')
+
+ def nova_list(self):
+ servers = self.compute_client.servers.list()
+ LOG.debug("server_list:%s" % servers)
+ self.assertTrue(self.server in servers)
+
+ def nova_show(self):
+ got_server = self.compute_client.servers.get(self.server)
+ LOG.debug("got server:%s" % got_server)
+ self.assertEqual(self.server, got_server)
+
+ def cinder_create(self):
+ name = rand_name('scenario-volume-')
+ LOG.debug("volume display-name:%s" % name)
+ self.volume = self.volume_client.volumes.create(size=1,
+ display_name=name)
+ LOG.debug("volume created:%s" % self.volume.display_name)
+ self._wait_for_volume_status('available')
+
+ self.addCleanup(self.volume_client.volumes.delete, self.volume)
+ self.assertEqual(name, self.volume.display_name)
+
+ def cinder_list(self):
+ volumes = self.volume_client.volumes.list()
+ self.assertTrue(self.volume in volumes)
+
+ def cinder_show(self):
+ volume = self.volume_client.volumes.get(self.volume.id)
+ self.assertEqual(self.volume, volume)
+
+ def nova_volume_attach(self):
+ attach_volume_client = self.compute_client.volumes.create_server_volume
+ volume = attach_volume_client(self.server.id,
+ self.volume.id,
+ '/dev/vdb')
+ self.assertEqual(self.volume.id, volume.id)
+ self._wait_for_volume_status('in-use')
+
+ def nova_reboot(self):
+ self.server.reboot()
+ self._wait_for_server_status('ACTIVE')
+
+ def nova_floating_ip_create(self):
+ self.floating_ip = self.compute_client.floating_ips.create()
+ self.addCleanup(self.floating_ip.delete)
+
+ def nova_floating_ip_add(self):
+ self.server.add_floating_ip(self.floating_ip)
+
+ def nova_security_group_rule_create(self):
+ sgs = self.compute_client.security_groups.list()
+ for sg in sgs:
+ if sg.name == 'default':
+ secgroup = sg
+
+ ruleset = {
+ # ssh
+ 'ip_protocol': 'tcp',
+ 'from_port': 22,
+ 'to_port': 22,
+ 'cidr': '0.0.0.0/0',
+ 'group_id': None
+ }
+ sg_rule = self.compute_client.security_group_rules.create(secgroup.id,
+ **ruleset)
+ self.addCleanup(self.compute_client.security_group_rules.delete,
+ sg_rule.id)
+
+ def ssh_to_server(self):
+ username = self.config.scenario.ssh_user
+ self.linux_client = RemoteClient(self.floating_ip.ip,
+ username,
+ pkey=self.keypair.private_key)
+
+ def check_partitions(self):
+ partitions = self.linux_client.get_partitions()
+ self.assertEqual(1, partitions.count('vdb'))
+
+ def nova_volume_detach(self):
+ detach_volume_client = self.compute_client.volumes.delete_server_volume
+ detach_volume_client(self.server.id, self.volume.id)
+ self._wait_for_volume_status('available')
+
+ volume = self.volume_client.volumes.get(self.volume.id)
+ self.assertEqual('available', volume.status)
+
+ def test_minimum_basic_scenario(self):
+ self.glance_image_create()
+ self.nova_keypair_add()
+ self.nova_boot()
+ self.nova_list()
+ self.nova_show()
+ self.cinder_create()
+ self.cinder_list()
+ self.cinder_show()
+ self.nova_volume_attach()
+ self.cinder_show()
+ self.nova_reboot()
+
+ self.nova_floating_ip_create()
+ self.nova_floating_ip_add()
+ self.nova_security_group_rule_create()
+ self.ssh_to_server()
+ self.check_partitions()
+
+ self.nova_volume_detach()
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 5ccfd52..b94caaa 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -31,10 +31,15 @@
* For a freshly-booted VM with an IP address ("port") on a given network:
- - the Tempest host can ping the IP address. This implies that
- the VM has been assigned the correct IP address and has
+ - the Tempest host can ping the IP address. This implies, but
+ does not guarantee (see the ssh check that follows), that the
+ VM has been assigned the correct IP address and has
connectivity to the Tempest host.
+ - the Tempest host can perform key-based authentication to an
+ ssh server hosted at the IP address. This check guarantees
+ that the IP address is associated with the target VM.
+
#TODO(mnewby) - Need to implement the following:
- the Tempest host can ssh into the VM via the IP address and
successfully execute the following:
@@ -214,12 +219,15 @@
raise self.skipTest(msg)
if not self.servers:
raise self.skipTest("No VM's have been created")
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ ssh_login = self.config.compute.image_ssh_user
+ private_key = self.keypairs[self.tenant_id].private_key
for server in self.servers:
for net_name, ip_addresses in server.networks.iteritems():
for ip_address in ip_addresses:
- self.assertTrue(self._ping_ip_address(ip_address),
- "Timed out waiting for %s's ip to become "
- "reachable" % server.name)
+ self._check_vm_connectivity(ip_address, ssh_login,
+ private_key)
@attr(type='smoke')
def test_007_assign_floating_ips(self):
@@ -237,9 +245,11 @@
def test_008_check_public_network_connectivity(self):
if not self.floating_ips:
raise self.skipTest('No floating ips have been allocated.')
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ ssh_login = self.config.compute.image_ssh_user
+ private_key = self.keypairs[self.tenant_id].private_key
for server, floating_ips in self.floating_ips.iteritems():
for floating_ip in floating_ips:
ip_address = floating_ip.floating_ip_address
- self.assertTrue(self._ping_ip_address(ip_address),
- "Timed out waiting for %s's ip to become "
- "reachable" % server.name)
+ self._check_vm_connectivity(ip_address, ssh_login, private_key)
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 014df1e..adbdc83 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -160,3 +160,51 @@
(project_id, user_id, role_id), None,
self.headers)
return resp, body
+
+ def create_domain(self, name, **kwargs):
+ """Creates a domain."""
+ description = kwargs.get('description', None)
+ en = kwargs.get('enabled', True)
+ post_body = {
+ 'description': description,
+ 'enabled': en,
+ 'name': name
+ }
+ post_body = json.dumps({'domain': post_body})
+ resp, body = self.post('domains', post_body, self.headers)
+ body = json.loads(body)
+ return resp, body['domain']
+
+ def delete_domain(self, domain_id):
+ """Delete a domain."""
+ resp, body = self.delete('domains/%s' % str(domain_id))
+ return resp, body
+
+ def list_domains(self):
+ """List Domains."""
+ resp, body = self.get('domains')
+ body = json.loads(body)
+ return resp, body['domains']
+
+ def update_domain(self, domain_id, **kwargs):
+ """Updates a domain."""
+ resp, body = self.get_domain(domain_id)
+ description = kwargs.get('description', body['description'])
+ en = kwargs.get('enabled', body['enabled'])
+ name = kwargs.get('name', body['name'])
+ post_body = {
+ 'description': description,
+ 'enabled': en,
+ 'name': name
+ }
+ post_body = json.dumps({'domain': post_body})
+ resp, body = self.patch('domains/%s' % domain_id, post_body,
+ self.headers)
+ body = json.loads(body)
+ return resp, body['domain']
+
+ def get_domain(self, domain_id):
+ """Get Domain details."""
+ resp, body = self.get('domains/%s' % domain_id)
+ body = json.loads(body)
+ return resp, body['domain']
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 92151dd..708ee28 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -44,6 +44,14 @@
array.append(xml_to_json(child))
return array
+ def _parse_domains(self, node):
+ array = []
+ for child in node.getchildren():
+ tag_list = child.tag.split('}', 1)
+ if tag_list[1] == "domain":
+ array.append(xml_to_json(child))
+ return array
+
def _parse_array(self, node):
array = []
for child in node.getchildren():
@@ -185,3 +193,51 @@
resp, body = self.put('projects/%s/users/%s/roles/%s' %
(project_id, user_id, role_id), '', self.headers)
return resp, body
+
+ def create_domain(self, name, **kwargs):
+ """Creates a domain."""
+ description = kwargs.get('description', None)
+ en = kwargs.get('enabled', True)
+ post_body = Element("domain",
+ xmlns=XMLNS,
+ name=name,
+ description=description,
+ enabled=str(en).lower())
+ resp, body = self.post('domains', str(Document(post_body)),
+ self.headers)
+ body = self._parse_body(etree.fromstring(body))
+ return resp, body
+
+ def list_domains(self):
+ """Get the list of domains."""
+ resp, body = self.get("domains", self.headers)
+ body = self._parse_domains(etree.fromstring(body))
+ return resp, body
+
+ def delete_domain(self, domain_id):
+ """Delete a domain."""
+ resp, body = self.delete('domains/%s' % domain_id, self.headers)
+ return resp, body
+
+ def update_domain(self, domain_id, **kwargs):
+ """Updates a domain."""
+ resp, body = self.get_domain(domain_id)
+ description = kwargs.get('description', body['description'])
+ en = kwargs.get('enabled', body['enabled'])
+ name = kwargs.get('name', body['name'])
+ post_body = Element("domain",
+ xmlns=XMLNS,
+ name=name,
+ description=description,
+ enabled=str(en).lower())
+ resp, body = self.patch('domains/%s' % domain_id,
+ str(Document(post_body)),
+ self.headers)
+ body = self._parse_body(etree.fromstring(body))
+ return resp, body
+
+ def get_domain(self, domain_id):
+ """Get Domain details."""
+ resp, body = self.get('domains/%s' % domain_id, self.headers)
+ body = self._parse_body(etree.fromstring(body))
+ return resp, body