Merge "Volume xml client translate json attribute names"
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..c9b6467
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,4 @@
+[run]
+branch = True
+source = tempest
+omit = tempest/tests/*,tempest/openstack/*
diff --git a/.gitignore b/.gitignore
index 8d2b281..1777cb9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,5 +15,6 @@
dist
build
.testrepository
-.coverage
+.coverage*
+!.coveragerc
cover/
diff --git a/.testr.conf b/.testr.conf
index abaf14a..4f6e0b3 100644
--- a/.testr.conf
+++ b/.testr.conf
@@ -2,6 +2,7 @@
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-500} \
+ OS_TEST_LOCK_PATH=${OS_TEST_LOCK_PATH:-${TMPDIR:-'/tmp'}} \
${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./tempest/test_discover} $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list
diff --git a/HACKING.rst b/HACKING.rst
index c0df0fb..8652971 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -120,13 +120,14 @@
- A json schema: defines properties for a request.
After that a test class must be added to automatically generate test scenarios
-out of the given interface description:
+out of the given interface description::
+
+ load_tests = test.NegativeAutoTest.load_tests
class SampeTestNegativeTestJSON(<your base class>, test.NegativeAutoTest):
_interface = 'json'
_service = 'compute'
- _schema_file = 'compute/servers/get_console_output.json'
- scenarios = test.NegativeAutoTest.generate_scenario(_schema_file)
+ _schema_file = <your Schema file>
Negative tests must be marked with a negative attribute::
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index d75de90..28a4d1c 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -101,14 +101,32 @@
# Options defined in tempest.config
#
-# Catalog type of the baremetal provisioning service. (string
+# Catalog type of the baremetal provisioning service (string
# value)
#catalog_type=baremetal
+# Whether the Ironic nova-compute driver is enabled (boolean
+# value)
+#driver_enabled=false
+
# The endpoint type to use for the baremetal provisioning
-# service. (string value)
+# service (string value)
#endpoint_type=publicURL
+# Timeout for Ironic node to completely provision (integer
+# value)
+#active_timeout=300
+
+# Timeout for association of Nova instance and Ironic node
+# (integer value)
+#association_timeout=10
+
+# Timeout for Ironic power transitions. (integer value)
+#power_timeout=20
+
+# Timeout for unprovisioning an Ironic node. (integer value)
+#unprovision_timeout=20
+
[boto]
@@ -128,6 +146,9 @@
# AWS Access Key (string value)
#aws_access=<None>
+# AWS Zone for EC2 tests (string value)
+#aws_zone=nova
+
# S3 Materials Path (string value)
#s3_materials_path=/opt/stack/devstack/files/images/s3-materials/cirros-0.3.0
@@ -190,7 +211,7 @@
# admin credentials are known. (boolean value)
#allow_tenant_isolation=false
-# Valid secondary image reference to be used in tests. (string
+# Valid primary image reference to be used in tests. (string
# value)
#image_ref={$IMAGE_ID}
@@ -229,6 +250,19 @@
# Should the tests ssh to instances? (boolean value)
#run_ssh=false
+# Auth method used for authenticate to the instance. Valid
+# choices are: keypair, configured, adminpass. keypair: start
+# the servers with an ssh keypair. configured: use the
+# configured user and password. adminpass: use the injected
+# adminPass. disabled: avoid using ssh when it is an option.
+# (string value)
+#ssh_auth_method=keypair
+
+# How to connect to the instance? fixed: using the first ip
+# belongs the fixed network floating: creating and using a
+# floating ip (string value)
+#ssh_connect_method=fixed
+
# User name used to authenticate to an instance. (string
# value)
#ssh_user=root
@@ -258,7 +292,7 @@
# IP version used for SSH connections. (integer value)
#ip_version_for_ssh=4
-# Dose the SSH uses Floating IP? (boolean value)
+# Does SSH use Floating IPs? (boolean value)
#use_floatingip_for_ssh=true
# Catalog type of the Compute service. (string value)
@@ -307,14 +341,18 @@
# Administrative Username to use for Nova API requests.
# (string value)
-#username=admin
+#username=<None>
# Administrative Tenant name to use for Nova API requests.
# (string value)
-#tenant_name=admin
+#tenant_name=<None>
# API key to use when authenticating as admin. (string value)
-#password=pass
+#password=<None>
+
+# Domain name for authentication as admin (Keystone V3).The
+# same domain applies to user and project (string value)
+#domain_name=<None>
[compute-feature-enabled]
@@ -341,12 +379,16 @@
# password? (boolean value)
#change_password=false
-# Does the test environment support snapshots? (boolean value)
-#create_image=false
-
# Does the test environment support resizing? (boolean value)
#resize=false
+# Does the test environment support pausing? (boolean value)
+#pause=true
+
+# Does the test environment support suspend/resume? (boolean
+# value)
+#suspend=true
+
# Does the test environment support live migration available?
# (boolean value)
#live_migration=false
@@ -363,6 +405,14 @@
# as [nova.vnc]->vnc_enabled in nova.conf (boolean value)
#vnc_console=false
+# Enable Spice console. This configuration value should be
+# same as [nova.spice]->enabled in nova.conf (boolean value)
+#spice_console=false
+
+# Enable RDP console. This configuration value should be same
+# as [nova.rdp]->enabled in nova.conf (boolean value)
+#rdp_console=false
+
[dashboard]
@@ -404,6 +454,10 @@
# value)
#db_flavor_ref=1
+# Current database version to use in database tests. (string
+# value)
+#db_current_version=v1.0
+
[debug]
@@ -414,6 +468,22 @@
# Enable diagnostic commands (boolean value)
#enable=true
+# A regex to determine which requests should be traced. This
+# is a regex to match the caller for rest client requests to
+# be able to selectively trace calls out of specific classes
+# and methods. It largely exists for test development, and is
+# not expected to be used in a real deploy of tempest. This
+# will be matched against the discovered ClassName:method in
+# the test environment. Expected values for this field are:
+# * ClassName:test_method_name - traces one test_method *
+# ClassName:setUp(Class) - traces specific setup functions *
+# ClassName:tearDown(Class) - traces specific teardown
+# functions * ClassName:_run_cleanups - traces the cleanup
+# functions If nothing is specified, this feature is not
+# enabled. To trace everything specify .* as the regex.
+# (string value)
+#trace_requests=
+
[identity]
@@ -451,16 +521,20 @@
#endpoint_type=publicURL
# Username to use for Nova API requests. (string value)
-#username=demo
+#username=<None>
# Tenant name to use for Nova API requests. (string value)
-#tenant_name=demo
+#tenant_name=<None>
# Role required to administrate keystone. (string value)
#admin_role=admin
# API key to use when authenticating. (string value)
-#password=pass
+#password=<None>
+
+# Domain name for authentication (Keystone V3).The same domain
+# applies to user and project (string value)
+#domain_name=<None>
# Username of alternate user to use for Nova API requests.
# (string value)
@@ -474,16 +548,24 @@
# (string value)
#alt_password=<None>
+# Alternate domain name for authentication (Keystone V3).The
+# same domain applies to user and project (string value)
+#alt_domain_name=<None>
+
# Administrative Username to use for Keystone API requests.
# (string value)
-#admin_username=admin
+#admin_username=<None>
# Administrative Tenant name to use for Keystone API requests.
# (string value)
-#admin_tenant_name=admin
+#admin_tenant_name=<None>
# API key to use when authenticating as admin. (string value)
-#admin_password=pass
+#admin_password=<None>
+
+# Admin domain name for authentication (Keystone V3).The same
+# domain applies to user and project (string value)
+#admin_domain_name=<None>
[identity-feature-enabled]
@@ -711,7 +793,7 @@
# Timeout in seconds to wait for a stack to build. (integer
# value)
-#build_timeout=600
+#build_timeout=1200
# Instance type for tests. Needs to be big enough for a full
# OS plus the test workload (string value)
@@ -729,6 +811,10 @@
# (integer value)
#max_template_size=524288
+# Value must match heat configuration of the same name.
+# (integer value)
+#max_resources_per_stack=1000
+
[queuing]
@@ -807,9 +893,9 @@
# value)
#horizon=true
-# Whether or not Savanna is expected to be available (boolean
+# Whether or not Sahara is expected to be available (boolean
# value)
-#savanna=false
+#sahara=false
# Whether or not Ironic is expected to be available (boolean
# value)
@@ -933,6 +1019,10 @@
# value)
#disk_format=raw
+# Default size in GB for volumes created by volumes tests
+# (integer value)
+#volume_size=1
+
[volume-feature-enabled]
@@ -947,6 +1037,9 @@
# Runs Cinder volumes backup test (boolean value)
#backup=true
+# Runs Cinder volume snapshot test (boolean value)
+#snapshot=true
+
# A list of enabled volume extensions with a special entry all
# which indicates every extension is enabled (list value)
#api_extensions=all
diff --git a/etc/whitelist.yaml b/etc/whitelist.yaml
index 2d8b741..e69de29 100644
--- a/etc/whitelist.yaml
+++ b/etc/whitelist.yaml
@@ -1,233 +0,0 @@
-n-cpu:
- - module: "nova.virt.libvirt.driver"
- message: "During wait destroy, instance disappeared"
- - module: "glanceclient.common.http"
- message: "Request returned failure status"
- - module: "nova.openstack.common.periodic_task"
- message: "Error during ComputeManager\\.update_available_resource: \
- 'NoneType' object is not iterable"
- - module: "nova.compute.manager"
- message: "Possibly task preempted"
- - module: "nova.openstack.common.rpc.amqp"
- message: "Exception during message handling"
- - module: "nova.network.api"
- message: "Failed storing info cache"
- - module: "nova.compute.manager"
- message: "Error while trying to clean up image"
- - module: "nova.virt.libvirt.driver"
- message: "Error injecting data into image.*\\(Unexpected error while \
- running command"
- - module: "nova.compute.manager"
- message: "Instance failed to spawn"
- - module: "nova.compute.manager"
- message: "Error: Unexpected error while running command"
- - module: "nova.virt.libvirt.driver"
- message: "Error from libvirt during destroy"
- - module: "nova.virt.libvirt.vif"
- message: "Failed while unplugging vif"
- - module: "nova.openstack.common.loopingcal"
- message: "in fixed duration looping call"
- - module: "nova.virt.libvirt.driver"
- message: "Getting disk size of instance"
- - module: "nova.virt.libvirt.driver"
- message: "No such file or directory: '/opt/stack/data/nova/instances"
- - module: "nova.virt.libvirt.driver"
- message: "Nova requires libvirt version 0\\.9\\.11 or greater"
- - module: "nova.compute.manager"
- message: "error during stop\\(\\) in sync_power_state"
- - module: "nova.compute.manager"
- message: "Instance failed network setup after 1 attempt"
- - module: "nova.compute.manager"
- message: "Periodic sync_power_state task had an error"
- - module: "nova.virt.driver"
- message: "Info cache for instance .* could not be found"
-
-g-api:
- - module: "glance.store.sheepdog"
- message: "Error in store configuration: Unexpected error while \
- running command"
- - module: "swiftclient"
- message: "Container HEAD failed: .*404 Not Found"
- - module: "glance.api.middleware.cache"
- message: "however the registry did not contain metadata for that image"
- - module: "oslo.messaging.notify._impl_messaging"
- message: ".*"
-
-ceilometer-acompute:
- - module: "ceilometer.compute.pollsters.disk"
- message: "Unable to read from monitor: Connection reset by peer"
- - module: "ceilometer.compute.pollsters.disk"
- message: "Requested operation is not valid: domain is not running"
- - module: "ceilometer.compute.pollsters.net"
- message: "Requested operation is not valid: domain is not running"
- - module: "ceilometer.compute.pollsters.disk"
- message: "Domain not found: no domain with matching uuid"
- - module: "ceilometer.compute.pollsters.net"
- message: "Domain not found: no domain with matching uuid"
- - module: "ceilometer.compute.pollsters.net"
- message: "No module named libvirt"
- - module: "ceilometer.compute.pollsters.net"
- message: "Unable to write to monitor: Broken pipe"
- - module: "ceilometer.compute.pollsters.cpu"
- message: "Domain not found: no domain with matching uuid"
- - module: "ceilometer.compute.pollsters.net"
- message: ".*"
- - module: "ceilometer.compute.pollsters.disk"
- message: ".*"
-
-ceilometer-acentral:
- - module: "ceilometer.central.manager"
- message: "403 Forbidden"
- - module: "ceilometer.central.manager"
- message: "get_samples\\(\\) got an unexpected keyword argument 'resources'"
-
-ceilometer-alarm-evaluator:
- - module: "ceilometer.alarm.service"
- message: "alarm evaluation cycle failed"
- - module: "ceilometer.alarm.evaluator.threshold"
- message: ".*"
-
-ceilometer-api:
- - module: "wsme.api"
- message: ".*"
-
-h-api:
- - module: "root"
- message: "Returning 400 to user: The server could not comply with \
- the request since it is either malformed or otherwise incorrect"
- - module: "root"
- message: "Unexpected error occurred serving API: Request limit \
- exceeded: Template exceeds maximum allowed size"
- - module: "root"
- message: "Unexpected error occurred serving API: The Stack \
- .*could not be found"
-
-h-eng:
- - module: "heat.openstack.common.rpc.amqp"
- message: "Exception during message handling"
- - module: "heat.openstack.common.rpc.common"
- message: "The Stack .* could not be found"
-
-n-api:
- - module: "glanceclient.common.http"
- message: "Request returned failure status"
- - module: "nova.api.openstack"
- message: "Caught error: Quota exceeded for"
- - module: "nova.compute.api"
- message: "ServerDiskConfigTest"
- - module: "nova.compute.api"
- message: "ServersTest"
- - module: "nova.compute.api"
- message: "\\{u'kernel_id'.*u'ramdisk_id':"
- - module: "nova.api.openstack.wsgi"
- message: "takes exactly 4 arguments"
- - module: "nova.api.openstack"
- message: "Caught error: Instance .* could not be found"
- - module: "nova.api.metadata.handler"
- message: "Failed to get metadata for instance id:"
-
-n-cond:
- - module: "nova.notifications"
- message: "Failed to send state update notification"
- - module: "nova.openstack.common.rpc.amqp"
- message: "Exception during message handling"
- - module: "nova.openstack.common.rpc.common"
- message: "but the actual state is deleting to caller"
- - module: "nova.openstack.common.rpc.common"
- message: "Traceback \\(most recent call last"
- - module: "nova.openstack.common.threadgroup"
- message: "Service with host .* topic conductor exists."
-
-n-sch:
- - module: "nova.scheduler.filter_scheduler"
- message: "Error from last host: "
-
-n-net:
- - module: "nova.openstack.common.rpc.amqp"
- message: "Exception during message handling"
- - module: "nova.openstack.common.rpc.common"
- message: "'NoneType' object has no attribute '__getitem__'"
- - module: "nova.openstack.common.rpc.common"
- message: "Instance .* could not be found"
-
-c-api:
- - module: "cinder.api.middleware.fault"
- message: "Caught error: Volume .* could not be found"
- - module: "cinder.api.middleware.fault"
- message: "Caught error: Snapshot .* could not be found"
- - module: "cinder.api.openstack.wsgi"
- message: "argument must be a string or a number, not 'NoneType'"
- - module: "cinder.volume.api"
- message: "Volume status must be available to reserve"
-
-c-vol:
- - module: "cinder.brick.iscsi.iscsi"
- message: "Failed to create iscsi target for volume id"
- - module: "cinder.brick.local_dev.lvm"
- message: "stat failed: No such file or directory"
- - module: "cinder.brick.local_dev.lvm"
- message: "LV stack-volumes.*in use: not deactivating"
- - module: "cinder.brick.local_dev.lvm"
- message: "Can't remove open logical volume"
-
-ceilometer-collector:
- - module: "stevedore.extension"
- message: ".*"
- - module: "ceilometer.collector.dispatcher.database"
- message: "duplicate key value violates unique constraint"
- - module: "ceilometer.collector.dispatcher.database"
- message: "Failed to record metering data: QueuePool limit"
- - module: "ceilometer.dispatcher.database"
- message: "\\(DataError\\) integer out of range"
- - module: "ceilometer.collector.dispatcher.database"
- message: "Failed to record metering data: .* integer out of range"
- - module: "ceilometer.collector.dispatcher.database"
- message: "Failed to record metering data: .* integer out of range"
- - module: "ceilometer.openstack.common.db.sqlalchemy.session"
- message: "DB exception wrapped"
-
-q-agt:
- - module: "neutron.agent.linux.ovs_lib"
- message: "Unable to execute.*Exception:"
-
-q-dhcp:
- - module: "neutron.common.legacy"
- message: "Skipping unknown group key: firewall_driver"
- - module: "neutron.agent.dhcp_agent"
- message: "Unable to enable dhcp"
- - module: "neutron.agent.dhcp_agent"
- message: "Network .* RPC info call failed"
-
-q-l3:
- - module: "neutron.common.legacy"
- message: "Skipping unknown group key: firewall_driver"
- - module: "neutron.agent.l3_agent"
- message: "Failed synchronizing routers"
-
-q-vpn:
- - module: "neutron.common.legacy"
- message: "Skipping unknown group key: firewall_driver"
-
-q-lbaas:
- - module: "neutron.common.legacy"
- message: "Skipping unknown group key: firewall_driver"
- - module: "neutron.services.loadbalancer.drivers.haproxy.agent_manager"
- message: "Error upating stats"
- - module: "neutron.services.loadbalancer.drivers.haproxy.agent_manager"
- message: "Unable to destroy device for pool"
-
-q-svc:
- - module: "neutron.common.legacy"
- message: "Skipping unknown group key: firewall_driver"
- - module: "neutron.openstack.common.rpc.amqp"
- message: "Exception during message handling"
- - module: "neutron.openstack.common.rpc.common"
- message: "(Network|Pool|Subnet|Agent|Port) .* could not be found"
- - module: "neutron.api.v2.resource"
- message: ".* failed"
- - module: ".*"
- message: ".*"
-
-s-proxy:
- - module: "proxy-server"
- message: "Timeout talking to memcached"
diff --git a/requirements.txt b/requirements.txt
index 48d1b12..75a61e7 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-pbr>=0.6,<1.0
+pbr>=0.6,!=0.7,<1.0
anyjson>=0.3.3
httplib2>=0.7.5
jsonschema>=2.0.0,<3.0.0
@@ -8,18 +8,19 @@
paramiko>=1.9.0
netaddr>=0.7.6
python-glanceclient>=0.9.0
-python-keystoneclient>=0.6.0
+python-keystoneclient>=0.8.0
python-novaclient>=2.17.0
python-neutronclient>=2.3.4,<3
python-cinderclient>=1.0.6
python-heatclient>=0.2.3
-python-savannaclient>=0.5.0
-python-swiftclient>=1.6
+python-ironicclient
+python-saharaclient>=0.6.0
+python-swiftclient>=2.0.2
testresources>=0.2.4
-keyring>=1.6.1,<2.0,>=2.1
+keyring>=2.1
testrepository>=0.0.18
oslo.config>=1.2.0
-six>=1.5.2
-iso8601>=0.1.8
+six>=1.6.0
+iso8601>=0.1.9
fixtures>=0.3.14
testscenarios>=0.4
diff --git a/setup.cfg b/setup.cfg
index a701572..f4aa3e1 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -17,6 +17,10 @@
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
+[entry_points]
+console_scripts =
+ verify-tempest-config = tempest.cmd.verify_tempest_config:main
+
[build_sphinx]
all_files = 1
build-dir = doc/build
diff --git a/setup.py b/setup.py
index 70c2b3f..7363757 100755
--- a/setup.py
+++ b/setup.py
@@ -17,6 +17,14 @@
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+ import multiprocessing # noqa
+except ImportError:
+ pass
+
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
diff --git a/tempest/api/baremetal/base.py b/tempest/api/baremetal/base.py
index 2e745f8..021adaf 100644
--- a/tempest/api/baremetal/base.py
+++ b/tempest/api/baremetal/base.py
@@ -106,17 +106,20 @@
@classmethod
@creates('port')
- def create_port(cls, node_id, address=None):
+ def create_port(cls, node_id, address, extra=None, uuid=None):
"""
Wrapper utility for creating test ports.
- :param address: MAC address of the port. If not supplied, a random
- value will be generated.
+ :param address: MAC address of the port.
+ :param extra: Meta data of the port. If not supplied, an empty
+ dictionary will be created.
+ :param uuid: UUID of the port.
:return: Created port.
"""
- address = address or data_utils.rand_mac_address()
- resp, body = cls.client.create_port(address=address, node_id=node_id)
+ extra = extra or {}
+ resp, body = cls.client.create_port(address=address, node_id=node_id,
+ extra=extra, uuid=uuid)
return {'port': body, 'response': resp}
@@ -170,3 +173,12 @@
cls.created_objects['port'].remove(port_id)
return resp
+
+ def validate_self_link(self, resource, uuid, link):
+ """Check whether the given self link formatted correctly."""
+ expected_link = "{base}/{pref}/{res}/{uuid}".format(
+ base=self.client.base_url,
+ pref=self.client.uri_prefix,
+ res=resource,
+ uuid=uuid)
+ self.assertEqual(expected_link, link)
diff --git a/tempest/api/baremetal/test_drivers.py b/tempest/api/baremetal/test_drivers.py
new file mode 100644
index 0000000..445ca60
--- /dev/null
+++ b/tempest/api/baremetal/test_drivers.py
@@ -0,0 +1,26 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.baremetal import base
+from tempest import test
+
+
+class TestDrivers(base.BaseBaremetalTest):
+ """Tests for drivers."""
+
+ @test.attr(type="smoke")
+ def test_list_drivers(self):
+ resp, drivers = self.client.list_drivers()
+ self.assertEqual('200', resp['status'])
+ self.assertIn('fake', [d['name'] for d in drivers['drivers']])
diff --git a/tempest/api/baremetal/test_nodestates.py b/tempest/api/baremetal/test_nodestates.py
new file mode 100644
index 0000000..c658d7f
--- /dev/null
+++ b/tempest/api/baremetal/test_nodestates.py
@@ -0,0 +1,33 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.baremetal import base
+from tempest import test
+
+
+class TestNodeStates(base.BaseBaremetalTest):
+ """Tests for baremetal NodeStates."""
+
+ @classmethod
+ def setUpClass(self):
+ super(TestNodeStates, self).setUpClass()
+ chassis = self.create_chassis()['chassis']
+ self.node = self.create_node(chassis['uuid'])['node']
+
+ @test.attr(type='smoke')
+ def test_list_nodestates(self):
+ resp, nodestates = self.client.list_nodestates(self.node['uuid'])
+ self.assertEqual('200', resp['status'])
+ for key in nodestates:
+ self.assertEqual(nodestates[key], self.node[key])
diff --git a/tempest/api/baremetal/test_ports.py b/tempest/api/baremetal/test_ports.py
index fb2acc7..8b76811 100644
--- a/tempest/api/baremetal/test_ports.py
+++ b/tempest/api/baremetal/test_ports.py
@@ -30,54 +30,268 @@
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
- port = self.create_port(node_id=node_id, address=address)['port']
+ result = self.create_port(node_id=node_id, address=address)
- self.assertEqual(port['address'], address)
- self.assertEqual(port['node_uuid'], node_id)
+ port = result['port']
+
+ resp, body = self.client.show_port(port['uuid'])
+
+ self.assertEqual(200, resp.status)
+ self.assertEqual(port['uuid'], body['uuid'])
+ self.assertEqual(address, body['address'])
+ self.assertEqual({}, body['extra'])
+ self.assertEqual(node_id, body['node_uuid'])
+
+ @test.attr(type='smoke')
+ def test_create_port_specifying_uuid(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ uuid = data_utils.rand_uuid()
+
+ self.create_port(node_id=node_id, address=address, uuid=uuid)
+
+ resp, body = self.client.show_port(uuid)
+
+ self.assertEqual(200, resp.status)
+ self.assertEqual(uuid, body['uuid'])
+ self.assertEqual(address, body['address'])
+ self.assertEqual({}, body['extra'])
+ self.assertEqual(node_id, body['node_uuid'])
+
+ @test.attr(type='smoke')
+ def test_create_port_with_extra(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key': 'value'}
+
+ result = self.create_port(node_id=node_id, address=address,
+ extra=extra)
+ port = result['port']
+
+ resp, body = self.client.show_port(port['uuid'])
+
+ self.assertEqual(200, resp.status)
+ self.assertEqual(port['uuid'], body['uuid'])
+ self.assertEqual(address, body['address'])
+ self.assertEqual(extra, body['extra'])
+ self.assertEqual(node_id, body['node_uuid'])
@test.attr(type='smoke')
def test_delete_port(self):
node_id = self.node['uuid']
- port_id = self.create_port(node_id=node_id)['port']['uuid']
+ address = data_utils.rand_mac_address()
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
resp = self.delete_port(port_id)
- self.assertEqual(resp['status'], '204')
+ self.assertEqual(204, resp.status)
self.assertRaises(exc.NotFound, self.client.show_port, port_id)
@test.attr(type='smoke')
def test_show_port(self):
node_id = self.node['uuid']
address = data_utils.rand_mac_address()
+ extra = {'key': 'value'}
- port_id = self.create_port(node_id=node_id,
- address=address)['port']['uuid']
+ port_id = self.create_port(node_id=node_id, address=address,
+ extra=extra)['port']['uuid']
resp, port = self.client.show_port(port_id)
- self.assertEqual(port['uuid'], port_id)
- self.assertEqual(port['address'], address)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(port_id, port['uuid'])
+ self.assertEqual(address, port['address'])
+ self.assertEqual(extra, port['extra'])
+
+ @test.attr(type='smoke')
+ def test_show_port_with_links(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+
+ resp, body = self.client.show_port(port_id)
+
+ self.assertEqual(200, resp.status)
+ self.assertIn('links', body.keys())
+ self.assertEqual(2, len(body['links']))
+ self.assertIn(port_id, body['links'][0]['href'])
@test.attr(type='smoke')
def test_list_ports(self):
node_id = self.node['uuid']
- uuids = [self.create_port(node_id=node_id)['port']['uuid']
- for i in range(0, 5)]
+ uuids = [self.create_port(node_id=node_id,
+ address=data_utils.rand_mac_address())
+ ['port']['uuid'] for i in xrange(5)]
resp, body = self.client.list_ports()
+ self.assertEqual(200, resp.status)
loaded_uuids = [p['uuid'] for p in body['ports']]
- for u in uuids:
- self.assertIn(u, loaded_uuids)
+ for uuid in uuids:
+ self.assertIn(uuid, loaded_uuids)
+
+ # Verify self links.
+ for port in body['ports']:
+ self.validate_self_link('ports', port['uuid'],
+ port['links'][0]['href'])
@test.attr(type='smoke')
- def test_update_port(self):
+ def test_list_with_limit(self):
node_id = self.node['uuid']
- port_id = self.create_port(node_id=node_id)['port']['uuid']
+
+ for i in xrange(5):
+ self.create_port(node_id=node_id,
+ address=data_utils.rand_mac_address())
+
+ resp, body = self.client.list_ports(limit=3)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(3, len(body['ports']))
+
+ next_marker = body['ports'][-1]['uuid']
+ self.assertIn(next_marker, body['next'])
+
+ def test_list_ports_details(self):
+ node_id = self.node['uuid']
+
+ uuids = [
+ self.create_port(node_id=node_id,
+ address=data_utils.rand_mac_address())
+ ['port']['uuid'] for i in range(0, 5)]
+
+ resp, body = self.client.list_ports_detail()
+ self.assertEqual(200, resp.status)
+
+ ports_dict = {port['uuid']: port for port in body['ports']
+ if port['uuid'] in uuids}
+
+ for uuid in uuids:
+ self.assertIn(uuid, ports_dict)
+ port = ports_dict[uuid]
+ self.assertIn('extra', port)
+ self.assertIn('node_uuid', port)
+ # never expose the node_id
+ self.assertNotIn('node_id', port)
+ # Verify self link.
+ self.validate_self_link('ports', port['uuid'],
+ port['links'][0]['href'])
+
+ @test.attr(type='smoke')
+ def test_update_port_replace(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
+
+ port_id = self.create_port(node_id=node_id, address=address,
+ extra=extra)['port']['uuid']
new_address = data_utils.rand_mac_address()
- self.client.update_port(port_id, address=new_address)
+ new_extra = {'key1': 'new-value1', 'key2': 'new-value2',
+ 'key3': 'new-value3'}
+
+ patch = [{'path': '/address',
+ 'op': 'replace',
+ 'value': new_address},
+ {'path': '/extra/key1',
+ 'op': 'replace',
+ 'value': new_extra['key1']},
+ {'path': '/extra/key2',
+ 'op': 'replace',
+ 'value': new_extra['key2']},
+ {'path': '/extra/key3',
+ 'op': 'replace',
+ 'value': new_extra['key3']}]
+
+ self.client.update_port(port_id, patch)
resp, body = self.client.show_port(port_id)
- self.assertEqual(body['address'], new_address)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(new_address, body['address'])
+ self.assertEqual(new_extra, body['extra'])
+
+ @test.attr(type='smoke')
+ def test_update_port_remove(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
+
+ port_id = self.create_port(node_id=node_id, address=address,
+ extra=extra)['port']['uuid']
+
+ # Removing one item from the collection
+ resp, _ = self.client.update_port(port_id, [{'path': '/extra/key2',
+ 'op': 'remove'}])
+ self.assertEqual(200, resp.status)
+ extra.pop('key2')
+ resp, body = self.client.show_port(port_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(extra, body['extra'])
+
+ # Removing the collection
+ resp, _ = self.client.update_port(port_id, [{'path': '/extra',
+ 'op': 'remove'}])
+ self.assertEqual(200, resp.status)
+ resp, body = self.client.show_port(port_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual({}, body['extra'])
+
+ # Assert nothing else was changed
+ self.assertEqual(node_id, body['node_uuid'])
+ self.assertEqual(address, body['address'])
+
+ @test.attr(type='smoke')
+ def test_update_port_add(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+
+ extra = {'key1': 'value1', 'key2': 'value2'}
+
+ patch = [{'path': '/extra/key1',
+ 'op': 'add',
+ 'value': extra['key1']},
+ {'path': '/extra/key2',
+ 'op': 'add',
+ 'value': extra['key2']}]
+
+ self.client.update_port(port_id, patch)
+
+ resp, body = self.client.show_port(port_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(extra, body['extra'])
+
+ @test.attr(type='smoke')
+ def test_update_port_mixed_ops(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key1': 'value1', 'key2': 'value2'}
+
+ port_id = self.create_port(node_id=node_id, address=address,
+ extra=extra)['port']['uuid']
+
+ new_address = data_utils.rand_mac_address()
+ new_extra = {'key1': 'new-value1', 'key3': 'new-value3'}
+
+ patch = [{'path': '/address',
+ 'op': 'replace',
+ 'value': new_address},
+ {'path': '/extra/key1',
+ 'op': 'replace',
+ 'value': new_extra['key1']},
+ {'path': '/extra/key2',
+ 'op': 'remove'},
+ {'path': '/extra/key3',
+ 'op': 'add',
+ 'value': new_extra['key3']}]
+
+ self.client.update_port(port_id, patch)
+
+ resp, body = self.client.show_port(port_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(new_address, body['address'])
+ self.assertEqual(new_extra, body['extra'])
diff --git a/tempest/api/baremetal/test_ports_negative.py b/tempest/api/baremetal/test_ports_negative.py
index 6cb8812..4cbe00e 100644
--- a/tempest/api/baremetal/test_ports_negative.py
+++ b/tempest/api/baremetal/test_ports_negative.py
@@ -25,16 +25,346 @@
chassis = self.create_chassis()['chassis']
self.node = self.create_node(chassis['uuid'])['node']
- @test.attr(type='negative')
- def test_create_port_invalid_mac(self):
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_malformed_mac(self):
node_id = self.node['uuid']
- address = 'not an uuid'
+ address = 'malformed:mac'
self.assertRaises(exc.BadRequest,
self.create_port, node_id=node_id, address=address)
- @test.attr(type='negative')
- def test_create_port_wrong_node_id(self):
- node_id = str(data_utils.rand_uuid())
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_malformed_extra(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key': 0.123}
+ self.assertRaises(exc.BadRequest,
+ self.create_port, node_id=node_id,
+ address=address, extra=extra)
- self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id)
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_nonexsistent_node_id(self):
+ node_id = str(data_utils.rand_uuid())
+ address = data_utils.rand_mac_address()
+ self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id,
+ address=address)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_show_port_malformed_uuid(self):
+ self.assertRaises(exc.BadRequest, self.client.show_port,
+ 'malformed:uuid')
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_show_port_nonexistent_uuid(self):
+ self.assertRaises(exc.NotFound, self.client.show_port,
+ data_utils.rand_uuid())
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_show_port_by_mac_not_allowed(self):
+ self.assertRaises(exc.BadRequest, self.client.show_port,
+ data_utils.rand_mac_address())
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_duplicated_port_uuid(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ uuid = data_utils.rand_uuid()
+
+ self.create_port(node_id=node_id, address=address, uuid=uuid)
+ self.assertRaises(exc.Conflict, self.create_port, node_id=node_id,
+ address=address, uuid=uuid)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_no_mandatory_field_node_id(self):
+ address = data_utils.rand_mac_address()
+
+ self.assertRaises(exc.BadRequest, self.create_port, node_id=None,
+ address=address)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_no_mandatory_field_mac(self):
+ node_id = self.node['uuid']
+
+ self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id,
+ address=None)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_malformed_port_uuid(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ uuid = 'malformed:uuid'
+
+ self.assertRaises(exc.BadRequest, self.create_port, node_id=node_id,
+ address=address, uuid=uuid)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_malformed_node_id(self):
+ address = data_utils.rand_mac_address()
+ self.assertRaises(exc.BadRequest, self.create_port,
+ node_id='malformed:nodeid', address=address)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_create_port_duplicated_mac(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ self.create_port(node_id=node_id, address=address)
+ self.assertRaises(exc.Conflict,
+ self.create_port, node_id=node_id,
+ address=address)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_by_mac_not_allowed(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key': 'value'}
+
+ self.create_port(node_id=node_id, address=address, extra=extra)
+
+ patch = [{'path': '/extra/key',
+ 'op': 'replace',
+ 'value': 'new-value'}]
+
+ self.assertRaises(exc.BadRequest,
+ self.client.update_port, address,
+ patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_nonexistent(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key': 'value'}
+
+ port_id = self.create_port(node_id=node_id, address=address,
+ extra=extra)['port']['uuid']
+ self.client.delete_port(port_id)
+
+ patch = [{'path': '/extra/key',
+ 'op': 'replace',
+ 'value': 'new-value'}]
+ self.assertRaises(exc.NotFound,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_malformed_port_uuid(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ self.create_port(node_id=node_id, address=address)
+
+ new_address = data_utils.rand_mac_address()
+ self.assertRaises(exc.BadRequest, self.client.update_port,
+ uuid='malformed:uuid',
+ patch=[{'path': '/address', 'op': 'replace',
+ 'value': new_address}])
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_add_malformed_extra(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+
+ self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+ [{'path': '/extra/key', ' op': 'add',
+ 'value': 0.123}])
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_add_whole_malformed_extra(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+
+ self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+ [{'path': '/extra',
+ 'op': 'add',
+ 'value': [1, 2, 3, 4, 'a']}])
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_add_nonexistent_property(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+ self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+ [{'path': '/nonexistent', ' op': 'add',
+ 'value': 'value'}])
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_replace_node_id_with_malformed(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id,
+ address=address)['port']['uuid']
+
+ patch = [{'path': '/node_uuid',
+ 'op': 'replace',
+ 'value': 'malformed:node_uuid'}]
+ self.assertRaises(exc.BadRequest,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_replace_mac_with_duplicated(self):
+ node_id = self.node['uuid']
+ address1 = data_utils.rand_mac_address()
+ address2 = data_utils.rand_mac_address()
+
+ self.create_port(node_id=node_id, address=address1)
+ port_id = self.create_port(node_id=node_id,
+ address=address2)['port']['uuid']
+ patch = [{'path': '/address',
+ 'op': 'replace',
+ 'value': address1}]
+ self.assertRaises(exc.Conflict,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_replace_node_id_with_nonexistent(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id,
+ address=address)['port']['uuid']
+
+ patch = [{'path': '/node_uuid',
+ 'op': 'replace',
+ 'value': data_utils.rand_uuid()}]
+ self.assertRaises(exc.BadRequest,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_replace_mac_with_malformed(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id,
+ address=address)['port']['uuid']
+ patch = [{'path': '/address',
+ 'op': 'replace',
+ 'value': 'malformed:mac'}]
+
+ self.assertRaises(exc.BadRequest,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_replace_extra_item_with_malformed(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key': 'value'}
+
+ port_id = self.create_port(node_id=node_id,
+ address=address,
+ extra=extra)['port']['uuid']
+ patch = [{'path': '/extra/key',
+ 'op': 'replace',
+ 'value': 0.123}]
+
+ self.assertRaises(exc.BadRequest,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_replace_whole_extra_with_malformed(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key': 'value'}
+
+ port_id = self.create_port(node_id=node_id,
+ address=address,
+ extra=extra)['port']['uuid']
+ patch = [{'path': '/extra',
+ 'op': 'replace',
+ 'value': [1, 2, 3, 4, 'a']}]
+
+ self.assertRaises(exc.BadRequest,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_replace_nonexistent_property(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id,
+ address=address)['port']['uuid']
+
+ patch = [{'path': '/nonexistent', ' op': 'replace', 'value': 'value'}]
+
+ self.assertRaises(exc.BadRequest,
+ self.client.update_port, port_id, patch)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_remove_mandatory_field_mac(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+ self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+ [{'path': '/address', 'op': 'remove'}])
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_remove_mandatory_field_port_uuid(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+ self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+ [{'path': '/uuid', 'op': 'remove'}])
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_remove_nonexistent_property(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ port_id = self.create_port(node_id=node_id, address=address)['port'][
+ 'uuid']
+ self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+ [{'path': '/nonexistent', 'op': 'remove'}])
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_delete_port_by_mac_not_allowed(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+
+ self.create_port(node_id=node_id, address=address)
+ self.assertRaises(exc.BadRequest, self.client.delete_port, address)
+
+ @test.attr(type=['negative', 'smoke'])
+ def test_update_port_mixed_ops_integrity(self):
+ node_id = self.node['uuid']
+ address = data_utils.rand_mac_address()
+ extra = {'key1': 'value1', 'key2': 'value2'}
+
+ port_id = self.create_port(node_id=node_id, address=address,
+ extra=extra)['port']['uuid']
+
+ new_address = data_utils.rand_mac_address()
+ new_extra = {'key1': 'new-value1', 'key3': 'new-value3'}
+
+ patch = [{'path': '/address',
+ 'op': 'replace',
+ 'value': new_address},
+ {'path': '/extra/key1',
+ 'op': 'replace',
+ 'value': new_extra['key1']},
+ {'path': '/extra/key2',
+ 'op': 'remove'},
+ {'path': '/extra/key3',
+ 'op': 'add',
+ 'value': new_extra['key3']},
+ {'path': '/nonexistent',
+ 'op': 'replace',
+ 'value': 'value'}]
+
+ self.assertRaises(exc.BadRequest, self.client.update_port, port_id,
+ patch)
+
+ # patch should not be applied
+ resp, body = self.client.show_port(port_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(address, body['address'])
+ self.assertEqual(extra, body['extra'])
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
new file mode 100644
index 0000000..4808601
--- /dev/null
+++ b/tempest/api/compute/admin/test_agents.py
@@ -0,0 +1,123 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.compute import base
+from tempest.common.utils import data_utils
+from tempest import exceptions
+from tempest.openstack.common import log
+from tempest import test
+
+LOG = log.getLogger(__name__)
+
+
+class AgentsAdminTestJSON(base.BaseV2ComputeAdminTest):
+ """
+ Tests Agents API
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(AgentsAdminTestJSON, cls).setUpClass()
+ cls.client = cls.os_adm.agents_client
+
+ def setUp(self):
+ super(AgentsAdminTestJSON, self).setUp()
+ params = self._param_helper(
+ hypervisor='common', os='linux', architecture='x86_64',
+ version='7.0', url='xxx://xxxx/xxx/xxx',
+ md5hash='add6bb58e139be103324d04d82d8f545')
+ resp, body = self.client.create_agent(**params)
+ self.assertEqual(200, resp.status)
+ self.agent_id = body['agent_id']
+
+ def tearDown(self):
+ try:
+ self.client.delete_agent(self.agent_id)
+ except exceptions.NotFound:
+ pass
+ except Exception:
+ LOG.exception('Exception raised deleting agent %s', self.agent_id)
+ super(AgentsAdminTestJSON, self).tearDown()
+
+ def _param_helper(self, **kwargs):
+ rand_key = 'architecture'
+ if rand_key in kwargs:
+ # NOTE: The rand_name is for avoiding agent conflicts.
+ # If you try to create an agent with the same hypervisor,
+ # os and architecture as an exising agent, Nova will return
+ # an HTTPConflict or HTTPServerError.
+ kwargs[rand_key] = data_utils.rand_name(kwargs[rand_key])
+ return kwargs
+
+ @test.attr(type='gate')
+ def test_create_agent(self):
+ # Create an agent.
+ params = self._param_helper(
+ hypervisor='kvm', os='win', architecture='x86',
+ version='7.0', url='xxx://xxxx/xxx/xxx',
+ md5hash='add6bb58e139be103324d04d82d8f545')
+ resp, body = self.client.create_agent(**params)
+ self.assertEqual(200, resp.status)
+ self.addCleanup(self.client.delete_agent, body['agent_id'])
+ for expected_item, value in params.items():
+ self.assertEqual(value, body[expected_item])
+
+ @test.attr(type='gate')
+ def test_update_agent(self):
+ # Update an agent.
+ params = self._param_helper(
+ version='8.0', url='xxx://xxxx/xxx/xxx2',
+ md5hash='add6bb58e139be103324d04d82d8f547')
+ resp, body = self.client.update_agent(self.agent_id, **params)
+ self.assertEqual(200, resp.status)
+ for expected_item, value in params.items():
+ self.assertEqual(value, body[expected_item])
+
+ @test.attr(type='gate')
+ def test_delete_agent(self):
+ # Delete an agent.
+ resp, _ = self.client.delete_agent(self.agent_id)
+ self.assertEqual(200, resp.status)
+
+ # Verify the list doesn't contain the deleted agent.
+ resp, agents = self.client.list_agents()
+ self.assertEqual(200, resp.status)
+ self.assertNotIn(self.agent_id, map(lambda x: x['agent_id'], agents))
+
+ @test.attr(type='gate')
+ def test_list_agents(self):
+ # List all agents.
+ resp, agents = self.client.list_agents()
+ self.assertEqual(200, resp.status)
+ self.assertTrue(len(agents) > 0, 'Cannot get any agents.(%s)' % agents)
+ self.assertIn(self.agent_id, map(lambda x: x['agent_id'], agents))
+
+ @test.attr(type='gate')
+ def test_list_agents_with_filter(self):
+ # List the agent builds by the filter.
+ params = self._param_helper(
+ hypervisor='xen', os='linux', architecture='x86',
+ version='7.0', url='xxx://xxxx/xxx/xxx1',
+ md5hash='add6bb58e139be103324d04d82d8f546')
+ resp, agent_xen = self.client.create_agent(**params)
+ self.assertEqual(200, resp.status)
+ self.addCleanup(self.client.delete_agent, agent_xen['agent_id'])
+
+ agent_id_xen = agent_xen['agent_id']
+ params_filter = {'hypervisor': agent_xen['hypervisor']}
+ resp, agents = self.client.list_agents(params_filter)
+ self.assertEqual(200, resp.status)
+ self.assertTrue(len(agents) > 0, 'Cannot get any agents.(%s)' % agents)
+ self.assertIn(agent_id_xen, map(lambda x: x['agent_id'], agents))
+ self.assertNotIn(self.agent_id, map(lambda x: x['agent_id'], agents))
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index fb249e5..c2376c9 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -16,6 +16,7 @@
from tempest.api.compute import base
from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
+from tempest import exceptions
from tempest import test
@@ -39,11 +40,20 @@
filter(lambda y: y['service'] == 'compute', hosts_all))
cls.host = hosts[0]
+ def _try_delete_aggregate(self, aggregate_id):
+ # delete aggregate, if it exists
+ try:
+ self.client.delete_aggregate(aggregate_id)
+ # if aggregate not found, it depict it was deleted in the test
+ except exceptions.NotFound:
+ pass
+
@test.attr(type='gate')
def test_aggregate_create_delete(self):
# Create and delete an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
resp, aggregate = self.client.create_aggregate(name=aggregate_name)
+ self.addCleanup(self._try_delete_aggregate, aggregate['id'])
self.assertEqual(200, resp.status)
self.assertEqual(aggregate_name, aggregate['name'])
self.assertIsNone(aggregate['availability_zone'])
@@ -59,6 +69,7 @@
az_name = data_utils.rand_name(self.az_name_prefix)
resp, aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)
+ self.addCleanup(self._try_delete_aggregate, aggregate['id'])
self.assertEqual(200, resp.status)
self.assertEqual(aggregate_name, aggregate['name'])
self.assertEqual(az_name, aggregate['availability_zone'])
diff --git a/tempest/api/compute/admin/test_availability_zone.py b/tempest/api/compute/admin/test_availability_zone.py
index 3c06624..9555367 100644
--- a/tempest/api/compute/admin/test_availability_zone.py
+++ b/tempest/api/compute/admin/test_availability_zone.py
@@ -17,15 +17,15 @@
from tempest import test
-class AZAdminTestJSON(base.BaseV2ComputeAdminTest):
-
+class AZAdminV3Test(base.BaseComputeAdminTest):
"""
Tests Availability Zone API List
"""
+ _api_version = 3
@classmethod
def setUpClass(cls):
- super(AZAdminTestJSON, cls).setUpClass()
+ super(AZAdminV3Test, cls).setUpClass()
cls.client = cls.os_adm.availability_zone_client
@test.attr(type='gate')
@@ -44,5 +44,9 @@
self.assertTrue(len(availability_zone) > 0)
-class AZAdminTestXML(AZAdminTestJSON):
+class AZAdminV2TestJSON(AZAdminV3Test):
+ _api_version = 2
+
+
+class AZAdminV2TestXML(AZAdminV2TestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 05b763a..111ac9c 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -172,8 +172,9 @@
@test.attr(type='gate')
def test_list_non_public_flavor(self):
- # Create a flavor with os-flavor-access:is_public false should
- # be present in list_details.
+ # Create a flavor with os-flavor-access:is_public false.
+ # The flavor should not be present in list_details as the
+ # tenant is not automatically added access list.
# This operation requires the user to have 'admin' role
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
@@ -192,7 +193,7 @@
for flavor in flavors:
if flavor['name'] == flavor_name:
flag = True
- self.assertTrue(flag)
+ self.assertFalse(flag)
# Verify flavor is not retrieved with other user
flag = False
diff --git a/tempest/api/compute/admin/test_flavors_access.py b/tempest/api/compute/admin/test_flavors_access.py
index 4804ce4..3ba7314 100644
--- a/tempest/api/compute/admin/test_flavors_access.py
+++ b/tempest/api/compute/admin/test_flavors_access.py
@@ -32,15 +32,12 @@
msg = "FlavorExtraData extension not enabled."
raise cls.skipException(msg)
+ # Compute admin flavor client
cls.client = cls.os_adm.flavors_client
- admin_client = cls._get_identity_admin_client()
- cls.tenant = admin_client.get_tenant_by_name(cls.flavors_client.
- tenant_name)
- cls.tenant_id = cls.tenant['id']
- cls.adm_tenant = admin_client.get_tenant_by_name(cls.os_adm.
- flavors_client.
- tenant_name)
- cls.adm_tenant_id = cls.adm_tenant['id']
+ # Non admin tenant ID
+ cls.tenant_id = cls.flavors_client.tenant_id
+ # Compute admin tenant ID
+ cls.adm_tenant_id = cls.client.tenant_id
cls.flavor_name_prefix = 'test_flavor_access_'
cls.ram = 512
cls.vcpus = 1
@@ -48,7 +45,8 @@
@test.attr(type='gate')
def test_flavor_access_list_with_private_flavor(self):
- # Test to list flavor access successfully by querying private flavor
+ # Test to make sure that list flavor access on a newly created
+ # private flavor will return an empty access list
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
@@ -60,10 +58,7 @@
self.assertEqual(resp.status, 200)
resp, flavor_access = self.client.list_flavor_access(new_flavor_id)
self.assertEqual(resp.status, 200)
- self.assertEqual(len(flavor_access), 1, str(flavor_access))
- first_flavor = flavor_access[0]
- self.assertEqual(str(new_flavor_id), str(first_flavor['flavor_id']))
- self.assertEqual(self.adm_tenant_id, first_flavor['tenant_id'])
+ self.assertEqual(len(flavor_access), 0, str(flavor_access))
@test.attr(type='gate')
def test_flavor_access_add_remove(self):
diff --git a/tempest/api/compute/admin/test_flavors_access_negative.py b/tempest/api/compute/admin/test_flavors_access_negative.py
index 8fe3331..73834e9 100644
--- a/tempest/api/compute/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/admin/test_flavors_access_negative.py
@@ -36,14 +36,7 @@
raise cls.skipException(msg)
cls.client = cls.os_adm.flavors_client
- admin_client = cls._get_identity_admin_client()
- cls.tenant = admin_client.get_tenant_by_name(cls.flavors_client.
- tenant_name)
- cls.tenant_id = cls.tenant['id']
- cls.adm_tenant = admin_client.get_tenant_by_name(cls.os_adm.
- flavors_client.
- tenant_name)
- cls.adm_tenant_id = cls.adm_tenant['id']
+ cls.tenant_id = cls.flavors_client.tenant_id
cls.flavor_name_prefix = 'test_flavor_access_'
cls.ram = 512
cls.vcpus = 1
diff --git a/tempest/api/compute/admin/test_flavors_negative.py b/tempest/api/compute/admin/test_flavors_negative.py
index b882ff4..b37d32c 100644
--- a/tempest/api/compute/admin/test_flavors_negative.py
+++ b/tempest/api/compute/admin/test_flavors_negative.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testscenarios
import uuid
from tempest.api.compute import base
@@ -21,7 +20,7 @@
from tempest import exceptions
from tempest import test
-load_tests = testscenarios.load_tests_apply_scenarios
+load_tests = test.NegativeAutoTest.load_tests
class FlavorsAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
@@ -102,15 +101,9 @@
self.flavor_ref_alt)
+@test.SimpleNegativeAutoTest
class FlavorCreateNegativeTestJSON(base.BaseV2ComputeAdminTest,
test.NegativeAutoTest):
_interface = 'json'
_service = 'compute'
_schema_file = 'compute/admin/flavor_create.json'
-
- scenarios = test.NegativeAutoTest.generate_scenario(_schema_file)
-
- @test.attr(type=['negative', 'gate'])
- def test_create_flavor(self):
- # flavor details are not returned for non-existent flavors
- self.execute(self._schema_file)
diff --git a/tempest/api/compute/admin/test_instance_usage_audit_log.py b/tempest/api/compute/admin/test_instance_usage_audit_log.py
index 32c8656..055a177 100644
--- a/tempest/api/compute/admin/test_instance_usage_audit_log.py
+++ b/tempest/api/compute/admin/test_instance_usage_audit_log.py
@@ -14,10 +14,10 @@
# under the License.
import datetime
+import urllib
from tempest.api.compute import base
from tempest import test
-import urllib
class InstanceUsageAuditLogTestJSON(base.BaseV2ComputeAdminTest):
diff --git a/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py b/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py
index fe4a184..6a5fc96 100644
--- a/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py
+++ b/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py
@@ -14,11 +14,11 @@
# under the License.
import datetime
+import urllib
from tempest.api.compute import base
from tempest import exceptions
from tempest import test
-import urllib
class InstanceUsageAuditLogNegativeTestJSON(base.BaseV2ComputeAdminTest):
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
new file mode 100644
index 0000000..514f1fa
--- /dev/null
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -0,0 +1,55 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+from tempest.api.compute import base
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class MigrationsAdminTest(base.BaseV2ComputeAdminTest):
+
+ @classmethod
+ def setUpClass(cls):
+ super(MigrationsAdminTest, cls).setUpClass()
+ cls.client = cls.os_adm.migrations_client
+
+ @test.attr(type='gate')
+ def test_list_migrations(self):
+ # Admin can get the migrations list
+ resp, _ = self.client.list_migrations()
+ self.assertEqual(200, resp.status)
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ @test.attr(type='gate')
+ def test_list_migrations_in_flavor_resize_situation(self):
+ # Admin can get the migrations list which contains the resized server
+ resp, server = self.create_test_server(wait_until="ACTIVE")
+ server_id = server['id']
+
+ resp, _ = self.servers_client.resize(server_id, self.flavor_ref_alt)
+ self.assertEqual(202, resp.status)
+ self.servers_client.wait_for_server_status(server_id, 'VERIFY_RESIZE')
+ self.servers_client.confirm_resize(server_id)
+ self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+
+ resp, body = self.client.list_migrations()
+ self.assertEqual(200, resp.status)
+
+ instance_uuids = [x['instance_uuid'] for x in body]
+ self.assertIn(server_id, instance_uuids)
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index 5af091e..348666d 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -28,8 +28,7 @@
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
- cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
- 'tenantId')
+ cls.demo_tenant_id = cls.quotas_client.tenant_id
cls.default_quota_set = set(('injected_file_content_bytes',
'metadata_items', 'injected_files',
@@ -76,21 +75,61 @@
# TODO(afazekas): merge these test cases
@test.attr(type='gate')
def test_get_updated_quotas(self):
- # Verify that GET shows the updated quota set
+ # Verify that GET shows the updated quota set of tenant
tenant_name = data_utils.rand_name('cpu_quota_tenant_')
tenant_desc = tenant_name + '-desc'
identity_client = self.os_adm.identity_client
_, tenant = identity_client.create_tenant(name=tenant_name,
description=tenant_desc)
tenant_id = tenant['id']
- self.addCleanup(identity_client.delete_tenant,
- tenant_id)
+ self.addCleanup(identity_client.delete_tenant, tenant_id)
- self.adm_client.update_quota_set(tenant_id,
- ram='5120')
+ self.adm_client.update_quota_set(tenant_id, ram='5120')
resp, quota_set = self.adm_client.get_quota_set(tenant_id)
self.assertEqual(200, resp.status)
- self.assertEqual(quota_set['ram'], 5120)
+ self.assertEqual(5120, quota_set['ram'])
+
+ # Verify that GET shows the updated quota set of user
+ user_name = data_utils.rand_name('cpu_quota_user_')
+ password = data_utils.rand_name('password-')
+ email = user_name + '@testmail.tm'
+ _, user = identity_client.create_user(name=user_name,
+ password=password,
+ tenant_id=tenant_id,
+ email=email)
+ user_id = user['id']
+ self.addCleanup(identity_client.delete_user, user_id)
+
+ self.adm_client.update_quota_set(tenant_id,
+ user_id=user_id,
+ ram='2048')
+ resp, quota_set = self.adm_client.get_quota_set(tenant_id,
+ user_id=user_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(2048, quota_set['ram'])
+
+ @test.attr(type='gate')
+ def test_delete_quota(self):
+ # Admin can delete the resource quota set for a tenant
+ tenant_name = data_utils.rand_name('ram_quota_tenant_')
+ tenant_desc = tenant_name + '-desc'
+ identity_client = self.os_adm.identity_client
+ _, tenant = identity_client.create_tenant(name=tenant_name,
+ description=tenant_desc)
+ tenant_id = tenant['id']
+ self.addCleanup(identity_client.delete_tenant, tenant_id)
+ resp, quota_set_default = self.adm_client.get_quota_set(tenant_id)
+ ram_default = quota_set_default['ram']
+
+ resp, body = self.adm_client.update_quota_set(tenant_id, ram='5120')
+ self.assertEqual(200, resp.status)
+
+ resp, body = self.adm_client.delete_quota_set(tenant_id)
+ self.assertEqual(202, resp.status)
+
+ resp, quota_set_new = self.adm_client.get_quota_set(tenant_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(ram_default, quota_set_new['ram'])
class QuotasAdminTestXML(QuotasAdminTestJSON):
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index 5b2b5fd..e1dc685 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -33,8 +33,7 @@
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
- cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
- 'tenantId')
+ cls.demo_tenant_id = cls.client.tenant_id
@test.attr(type=['negative', 'gate'])
def test_update_quota_normal_user(self):
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index 40a4df7..49af645 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -14,7 +14,6 @@
from tempest.api.compute import base
from tempest.common.utils import data_utils
-from tempest import exceptions
from tempest import test
@@ -27,6 +26,7 @@
_host_key = 'OS-EXT-SRV-ATTR:host'
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ServersAdminTestJSON, cls).setUpClass()
cls.client = cls.os_adm.servers_client
@@ -43,16 +43,6 @@
wait_until='ACTIVE')
cls.s2_id = server['id']
- def _get_unused_flavor_id(self):
- flavor_id = data_utils.rand_int_id(start=1000)
- while True:
- try:
- resp, body = self.flavors_client.get_flavor_details(flavor_id)
- except exceptions.NotFound:
- break
- flavor_id = data_utils.rand_int_id(start=1000)
- return flavor_id
-
@test.attr(type='gate')
def test_list_servers_by_admin(self):
# Listing servers by admin user returns empty list by default
@@ -184,6 +174,16 @@
resp, server_body = self.client.inject_network_info(server['id'])
self.assertEqual(202, resp.status)
+ @test.attr(type='gate')
+ def test_create_server_with_scheduling_hint(self):
+ # Create a server with scheduler hints.
+ hints = {
+ 'same_host': self.s1_id
+ }
+ resp, server = self.create_test_server(sched_hints=hints,
+ wait_until='ACTIVE')
+ self.assertEqual('202', resp['status'])
+
class ServersAdminTestXML(ServersAdminTestJSON):
_host_key = (
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index 797b780..8b3a0b5 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -14,11 +14,16 @@
import uuid
+import testtools
+
from tempest.api.compute import base
from tempest.common.utils import data_utils
+from tempest import config
from tempest import exceptions
from tempest import test
+CONF = config.CONF
+
class ServersAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
@@ -32,10 +37,7 @@
cls.client = cls.os_adm.servers_client
cls.non_adm_client = cls.servers_client
cls.flavors_client = cls.os_adm.flavors_client
- cls.identity_client = cls._get_identity_admin_client()
- tenant = cls.identity_client.get_tenant_by_name(
- cls.client.tenant_name)
- cls.tenant_id = tenant['id']
+ cls.tenant_id = cls.client.tenant_id
cls.s1_name = data_utils.rand_name('server')
resp, server = cls.create_test_server(name=cls.s1_name,
@@ -119,6 +121,8 @@
self.client.migrate_server,
str(uuid.uuid4()))
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_migrate_server_invalid_state(self):
# create server.
diff --git a/tempest/api/compute/admin/test_services.py b/tempest/api/compute/admin/test_services.py
index 9dd429b..2feb825 100644
--- a/tempest/api/compute/admin/test_services.py
+++ b/tempest/api/compute/admin/test_services.py
@@ -14,7 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.api.compute.api_schema import services as schema
from tempest.api.compute import base
from tempest import test
@@ -33,7 +32,7 @@
@test.attr(type='gate')
def test_list_services(self):
resp, services = self.client.list_services()
- self.validate_response(schema.list_services, resp, services)
+ self.assertEqual(200, resp.status)
self.assertNotEqual(0, len(services))
@test.attr(type='gate')
@@ -41,7 +40,7 @@
binary_name = 'nova-compute'
params = {'binary': binary_name}
resp, services = self.client.list_services(params)
- self.validate_response(schema.list_services, resp, services)
+ self.assertEqual(200, resp.status)
self.assertNotEqual(0, len(services))
for service in services:
self.assertEqual(binary_name, service['binary'])
@@ -49,14 +48,12 @@
@test.attr(type='gate')
def test_get_service_by_host_name(self):
resp, services = self.client.list_services()
- self.validate_response(schema.list_services, resp, services)
host_name = services[0]['host']
services_on_host = [service for service in services if
service['host'] == host_name]
params = {'host': host_name}
resp, services = self.client.list_services(params)
- self.validate_response(schema.list_services, resp, services)
# we could have a periodic job checkin between the 2 service
# lookups, so only compare binary lists.
@@ -70,13 +67,12 @@
@test.attr(type='gate')
def test_get_service_by_service_and_host_name(self):
resp, services = self.client.list_services()
- self.validate_response(schema.list_services, resp, services)
host_name = services[0]['host']
binary_name = services[0]['binary']
params = {'host': host_name, 'binary': binary_name}
resp, services = self.client.list_services(params)
- self.validate_response(schema.list_services, resp, services)
+ self.assertEqual(200, resp.status)
self.assertEqual(1, len(services))
self.assertEqual(host_name, services[0]['host'])
self.assertEqual(binary_name, services[0]['binary'])
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage.py b/tempest/api/compute/admin/test_simple_tenant_usage.py
index cc8641f..f3a81d1 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage.py
@@ -14,10 +14,10 @@
# under the License.
import datetime
+import time
from tempest.api.compute import base
from tempest import test
-import time
class TenantUsagesTestJSON(base.BaseV2ComputeAdminTest):
@@ -27,11 +27,7 @@
super(TenantUsagesTestJSON, cls).setUpClass()
cls.adm_client = cls.os_adm.tenant_usages_client
cls.client = cls.os.tenant_usages_client
- cls.identity_client = cls._get_identity_admin_client()
-
- resp, tenants = cls.identity_client.list_tenants()
- cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
- cls.client.tenant_name][0]
+ cls.tenant_id = cls.client.tenant_id
# Create a server in the demo tenant
resp, server = cls.create_test_server(wait_until='ACTIVE')
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage_negative.py b/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
index a080f2e..d69c43c 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
@@ -52,11 +52,9 @@
params = {'start': self.end,
'end': self.start}
resp, tenants = self.identity_client.list_tenants()
- tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
- self.client.tenant_name][0]
self.assertRaises(exceptions.BadRequest,
self.adm_client.get_tenant_usage,
- tenant_id, params)
+ self.client.tenant_id, params)
@test.attr(type=['negative', 'gate'])
def test_list_usage_all_tenants_with_non_admin_user(self):
diff --git a/tempest/api/compute/api_schema/services.py b/tempest/api/compute/api_schema/services.py
deleted file mode 100644
index ef5868c..0000000
--- a/tempest/api/compute/api_schema/services.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014 NEC Corporation. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-list_services = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- # NOTE: Now the type of 'id' is integer, but here allows
- # 'string' also because we will be able to change it to
- # 'uuid' in the future.
- 'id': {'type': ['integer', 'string']},
- 'zone': {'type': 'string'},
- 'host': {'type': 'string'},
- 'state': {'type': 'string'},
- 'binary': {'type': 'string'},
- 'status': {'type': 'string'},
- 'updated_at': {'type': 'string'},
- 'disabled_reason': {'type': ['string', 'null']},
- },
- 'required': ['id', 'zone', 'host', 'state', 'binary', 'status',
- 'updated_at', 'disabled_reason'],
- },
- }
-}
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index b2f3117..44340c3 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -15,8 +15,6 @@
import time
-import jsonschema
-
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
@@ -32,12 +30,16 @@
class BaseComputeTest(tempest.test.BaseTestCase):
"""Base test case class for all Compute API tests."""
+ _api_version = 3
force_tenant_isolation = False
@classmethod
def setUpClass(cls):
+ cls.set_network_resources()
super(BaseComputeTest, cls).setUpClass()
+ # TODO(andreaf) WE should care also for the alt_manager here
+ # but only once client lazy load in the manager is done
os = cls.get_client_manager()
cls.os = os
@@ -55,6 +57,57 @@
cls.multi_user = cls.get_multi_user()
cls.security_groups = []
+ if cls._api_version == 2:
+ cls.servers_client = cls.os.servers_client
+ cls.flavors_client = cls.os.flavors_client
+ cls.images_client = cls.os.images_client
+ cls.extensions_client = cls.os.extensions_client
+ cls.floating_ips_client = cls.os.floating_ips_client
+ cls.keypairs_client = cls.os.keypairs_client
+ cls.security_groups_client = cls.os.security_groups_client
+ cls.quotas_client = cls.os.quotas_client
+ cls.limits_client = cls.os.limits_client
+ cls.volumes_extensions_client = cls.os.volumes_extensions_client
+ cls.volumes_client = cls.os.volumes_client
+ cls.interfaces_client = cls.os.interfaces_client
+ cls.fixed_ips_client = cls.os.fixed_ips_client
+ cls.availability_zone_client = cls.os.availability_zone_client
+ cls.agents_client = cls.os.agents_client
+ cls.aggregates_client = cls.os.aggregates_client
+ cls.services_client = cls.os.services_client
+ cls.instance_usages_audit_log_client = \
+ cls.os.instance_usages_audit_log_client
+ cls.hypervisor_client = cls.os.hypervisor_client
+ cls.certificates_client = cls.os.certificates_client
+ cls.migrations_client = cls.os.migrations_client
+
+ elif cls._api_version == 3:
+ if not CONF.compute_feature_enabled.api_v3:
+ skip_msg = ("%s skipped as nova v3 api is not available" %
+ cls.__name__)
+ raise cls.skipException(skip_msg)
+ cls.servers_client = cls.os.servers_v3_client
+ cls.images_client = cls.os.image_client
+ cls.flavors_client = cls.os.flavors_v3_client
+ cls.services_client = cls.os.services_v3_client
+ cls.extensions_client = cls.os.extensions_v3_client
+ cls.availability_zone_client = cls.os.availability_zone_v3_client
+ cls.interfaces_client = cls.os.interfaces_v3_client
+ cls.hypervisor_client = cls.os.hypervisor_v3_client
+ cls.keypairs_client = cls.os.keypairs_v3_client
+ cls.volumes_client = cls.os.volumes_client
+ cls.certificates_client = cls.os.certificates_v3_client
+ cls.keypairs_client = cls.os.keypairs_v3_client
+ cls.aggregates_client = cls.os.aggregates_v3_client
+ cls.hosts_client = cls.os.hosts_v3_client
+ cls.quotas_client = cls.os.quotas_v3_client
+ cls.version_client = cls.os.version_v3_client
+ cls.migrations_client = cls.os.migrations_v3_client
+ else:
+ msg = ("Unexpected API version is specified (%s)" %
+ cls._api_version)
+ raise exceptions.InvalidConfiguration(message=msg)
+
@classmethod
def get_multi_user(cls):
multi_user = True
@@ -93,6 +146,26 @@
pass
@classmethod
+ def server_check_teardown(cls):
+ """Checks is the shared server clean enough for subsequent test.
+ Method will delete the server when it's dirty.
+ The setUp method is responsible for creating a new server.
+ Exceptions raised in tearDown class are fails the test case,
+ This method supposed to use only by tierDown methods, when
+ the shared server_id is stored in the server_id of the class.
+ """
+ if getattr(cls, 'server_id', None) is not None:
+ try:
+ cls.servers_client.wait_for_server_status(cls.server_id,
+ 'ACTIVE')
+ except Exception as exc:
+ LOG.exception(exc)
+ cls.servers_client.delete_server(cls.server_id)
+ cls.servers_client.wait_for_server_termination(cls.server_id)
+ cls.server_id = None
+ raise
+
+ @classmethod
def clear_images(cls):
for image_id in cls.images:
try:
@@ -102,7 +175,6 @@
pass
except Exception:
LOG.exception('Exception raised deleting image %s' % image_id)
- pass
@classmethod
def clear_security_groups(cls):
@@ -117,7 +189,6 @@
LOG.info('Exception raised deleting security group %s',
sg['id'])
LOG.exception(exc)
- pass
@classmethod
def tearDownClass(cls):
@@ -178,32 +249,6 @@
return resp, body
- @classmethod
- def validate_response(cls, schema, resp, body):
- response_code = schema['status_code']
- if resp.status not in response_code:
- msg = ("The status code(%s) is different than the expected "
- "one(%s)") % (resp.status, response_code)
- raise exceptions.InvalidHttpSuccessCode(msg)
- response_schema = schema.get('response_body')
- if response_schema:
- if cls._interface == 'xml':
- # NOTE: xml client of Tempest is broken and cannot get some
- # keys. The best way is to fix it, but now xml format has been
- # marked as "deprecated" in Nova API and xml client will be
- # removed from Tempest.
- # So now this test does not check attributes if xml.
- return
- try:
- jsonschema.validate(body, response_schema)
- except jsonschema.ValidationError as ex:
- msg = ("HTTP response body is invalid (%s)") % ex
- raise exceptions.InvalidHTTPResponseBody(msg)
- else:
- if body:
- msg = ("HTTP response body should not exist (%s)") % body
- raise exceptions.InvalidHTTPResponseBody(msg)
-
def wait_for(self, condition):
"""Repeatedly calls condition() until a timeout."""
start_time = int(time.time())
@@ -231,36 +276,12 @@
LOG.warn("Unable to delete volume '%s' since it was not found. "
"Maybe it was already deleted?" % volume_id)
-
-class BaseV2ComputeTest(BaseComputeTest):
-
- _interface = "json"
-
@classmethod
- def setUpClass(cls):
- # By default compute tests do not create network resources
- cls.set_network_resources()
- super(BaseV2ComputeTest, cls).setUpClass()
- cls.servers_client = cls.os.servers_client
- cls.flavors_client = cls.os.flavors_client
- cls.images_client = cls.os.images_client
- cls.extensions_client = cls.os.extensions_client
- cls.floating_ips_client = cls.os.floating_ips_client
- cls.keypairs_client = cls.os.keypairs_client
- cls.security_groups_client = cls.os.security_groups_client
- cls.quotas_client = cls.os.quotas_client
- cls.limits_client = cls.os.limits_client
- cls.volumes_extensions_client = cls.os.volumes_extensions_client
- cls.volumes_client = cls.os.volumes_client
- cls.interfaces_client = cls.os.interfaces_client
- cls.fixed_ips_client = cls.os.fixed_ips_client
- cls.availability_zone_client = cls.os.availability_zone_client
- cls.aggregates_client = cls.os.aggregates_client
- cls.services_client = cls.os.services_client
- cls.instance_usages_audit_log_client = \
- cls.os.instance_usages_audit_log_client
- cls.hypervisor_client = cls.os.hypervisor_client
- cls.certificates_client = cls.os.certificates_client
+ def prepare_instance_network(cls):
+ if (CONF.compute.ssh_auth_method != 'disabled' and
+ CONF.compute.ssh_connect_method == 'floating'):
+ cls.set_network_resources(network=True, subnet=True, router=True,
+ dhcp=True)
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
@@ -269,21 +290,25 @@
if 'name' in kwargs:
name = kwargs.pop('name')
- resp, image = cls.images_client.create_image(
- server_id, name)
+ if cls._api_version == 2:
+ resp, image = cls.images_client.create_image(server_id, name)
+ elif cls._api_version == 3:
+ resp, image = cls.servers_client.create_image(server_id, name)
image_id = data_utils.parse_image_id(resp['location'])
cls.images.append(image_id)
if 'wait_until' in kwargs:
cls.images_client.wait_for_image_status(image_id,
kwargs['wait_until'])
- resp, image = cls.images_client.get_image(image_id)
+ if cls._api_version == 2:
+ resp, image = cls.images_client.get_image(image_id)
+ elif cls._api_version == 3:
+ resp, image = cls.images_client.get_image_meta(image_id)
if kwargs['wait_until'] == 'ACTIVE':
if kwargs.get('wait_for_server', True):
cls.servers_client.wait_for_server_status(server_id,
'ACTIVE')
-
return resp, image
@classmethod
@@ -295,148 +320,72 @@
cls.servers_client.wait_for_server_termination(server_id)
except Exception:
LOG.exception('Failed to delete server %s' % server_id)
- pass
resp, server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
- cls.password = server['adminPass']
+ if cls._api_version == 2:
+ cls.password = server['adminPass']
+ elif cls._api_version == 3:
+ cls.password = server['admin_password']
return server['id']
@classmethod
def delete_volume(cls, volume_id):
"""Deletes the given volume and waits for it to be gone."""
- cls._delete_volume(cls.volumes_extensions_client, volume_id)
+ if cls._api_version == 2:
+ cls._delete_volume(cls.volumes_extensions_client, volume_id)
+ elif cls._api_version == 3:
+ cls._delete_volume(cls.volumes_client, volume_id)
-class BaseV2ComputeAdminTest(BaseV2ComputeTest):
- """Base test case class for Compute Admin V2 API tests."""
-
- @classmethod
- def setUpClass(cls):
- super(BaseV2ComputeAdminTest, cls).setUpClass()
- admin_username = CONF.compute_admin.username
- admin_password = CONF.compute_admin.password
- admin_tenant = CONF.compute_admin.tenant_name
- if not (admin_username and admin_password and admin_tenant):
- msg = ("Missing Compute Admin API credentials "
- "in configuration.")
- raise cls.skipException(msg)
- if (CONF.compute.allow_tenant_isolation or
- cls.force_tenant_isolation is True):
- creds = cls.isolated_creds.get_admin_creds()
- admin_username, admin_tenant_name, admin_password = creds
- cls.os_adm = clients.Manager(username=admin_username,
- password=admin_password,
- tenant_name=admin_tenant_name,
- interface=cls._interface)
- else:
- cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
+class BaseV2ComputeTest(BaseComputeTest):
+ _api_version = 2
+ _interface = "json"
class BaseV3ComputeTest(BaseComputeTest):
+ _api_version = 3
+ _interface = "json"
+
+class BaseComputeAdminTest(BaseComputeTest):
+ """Base test case class for Compute Admin API tests."""
_interface = "json"
@classmethod
def setUpClass(cls):
- # By default compute tests do not create network resources
- if cls._interface == "xml":
- skip_msg = ("XML interface is being removed from Nova v3. "
- "%s will be removed shortly" % cls.__name__)
- raise cls.skipException(skip_msg)
-
- if not CONF.compute_feature_enabled.api_v3:
- skip_msg = ("%s skipped as nova v3 api is not available" %
- cls.__name__)
- raise cls.skipException(skip_msg)
-
- cls.set_network_resources()
- super(BaseV3ComputeTest, cls).setUpClass()
-
- cls.servers_client = cls.os.servers_v3_client
- cls.images_client = cls.os.image_client
- cls.flavors_client = cls.os.flavors_v3_client
- cls.services_client = cls.os.services_v3_client
- cls.extensions_client = cls.os.extensions_v3_client
- cls.availability_zone_client = cls.os.availability_zone_v3_client
- cls.interfaces_client = cls.os.interfaces_v3_client
- cls.hypervisor_client = cls.os.hypervisor_v3_client
- cls.keypairs_client = cls.os.keypairs_v3_client
- cls.volumes_client = cls.os.volumes_client
- cls.certificates_client = cls.os.certificates_v3_client
- cls.keypairs_client = cls.os.keypairs_v3_client
- cls.aggregates_client = cls.os.aggregates_v3_client
- cls.hosts_client = cls.os.hosts_v3_client
- cls.quotas_client = cls.os.quotas_v3_client
- cls.version_client = cls.os.version_v3_client
-
- @classmethod
- def create_image_from_server(cls, server_id, **kwargs):
- """Wrapper utility that returns an image created from the server."""
- name = data_utils.rand_name(cls.__name__ + "-image")
- if 'name' in kwargs:
- name = kwargs.pop('name')
-
- resp, image = cls.servers_client.create_image(
- server_id, name)
- image_id = data_utils.parse_image_id(resp['location'])
- cls.images.append(image_id)
-
- if 'wait_until' in kwargs:
- cls.images_client.wait_for_image_status(image_id,
- kwargs['wait_until'])
- resp, image = cls.images_client.get_image_meta(image_id)
-
- return resp, image
-
- @classmethod
- def rebuild_server(cls, server_id, **kwargs):
- # Destroy an existing server and creates a new one
- try:
- cls.servers_client.delete_server(server_id)
- cls.servers_client.wait_for_server_termination(server_id)
- except Exception:
- LOG.exception('Failed to delete server %s' % server_id)
- pass
- resp, server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
- cls.password = server['admin_password']
- return server['id']
-
- @classmethod
- def delete_volume(cls, volume_id):
- """Deletes the given volume and waits for it to be gone."""
- cls._delete_volume(cls.volumes_client, volume_id)
-
-
-class BaseV3ComputeAdminTest(BaseV3ComputeTest):
- """Base test case class for all Compute Admin API V3 tests."""
-
- @classmethod
- def setUpClass(cls):
- super(BaseV3ComputeAdminTest, cls).setUpClass()
- admin_username = CONF.compute_admin.username
- admin_password = CONF.compute_admin.password
- admin_tenant = CONF.compute_admin.tenant_name
- if not (admin_username and admin_password and admin_tenant):
- msg = ("Missing Compute Admin API credentials "
- "in configuration.")
- raise cls.skipException(msg)
- if CONF.compute.allow_tenant_isolation:
+ super(BaseComputeAdminTest, cls).setUpClass()
+ if (CONF.compute.allow_tenant_isolation or
+ cls.force_tenant_isolation is True):
creds = cls.isolated_creds.get_admin_creds()
- admin_username, admin_tenant_name, admin_password = creds
- os_adm = clients.Manager(username=admin_username,
- password=admin_password,
- tenant_name=admin_tenant_name,
- interface=cls._interface)
+ cls.os_adm = clients.Manager(credentials=creds,
+ interface=cls._interface)
else:
- os_adm = clients.ComputeAdminManager(interface=cls._interface)
+ try:
+ cls.os_adm = clients.ComputeAdminManager(
+ interface=cls._interface)
+ except exceptions.InvalidCredentials:
+ msg = ("Missing Compute Admin API credentials "
+ "in configuration.")
+ raise cls.skipException(msg)
- cls.os_adm = os_adm
- cls.servers_admin_client = cls.os_adm.servers_v3_client
- cls.services_admin_client = cls.os_adm.services_v3_client
- cls.availability_zone_admin_client = \
- cls.os_adm.availability_zone_v3_client
- cls.hypervisor_admin_client = cls.os_adm.hypervisor_v3_client
- cls.flavors_admin_client = cls.os_adm.flavors_v3_client
- cls.aggregates_admin_client = cls.os_adm.aggregates_v3_client
- cls.hosts_admin_client = cls.os_adm.hosts_v3_client
- cls.quotas_admin_client = cls.os_adm.quotas_v3_client
- cls.agents_admin_client = cls.os_adm.agents_v3_client
+ if cls._api_version == 3:
+ cls.servers_admin_client = cls.os_adm.servers_v3_client
+ cls.services_admin_client = cls.os_adm.services_v3_client
+ cls.availability_zone_admin_client = \
+ cls.os_adm.availability_zone_v3_client
+ cls.hypervisor_admin_client = cls.os_adm.hypervisor_v3_client
+ cls.flavors_admin_client = cls.os_adm.flavors_v3_client
+ cls.aggregates_admin_client = cls.os_adm.aggregates_v3_client
+ cls.hosts_admin_client = cls.os_adm.hosts_v3_client
+ cls.quotas_admin_client = cls.os_adm.quotas_v3_client
+ cls.agents_admin_client = cls.os_adm.agents_v3_client
+ cls.migrations_admin_client = cls.os_adm.migrations_v3_client
+
+
+class BaseV2ComputeAdminTest(BaseComputeAdminTest):
+ """Base test case class for Compute Admin V2 API tests."""
+ _api_version = 2
+
+
+class BaseV3ComputeAdminTest(BaseComputeAdminTest):
+ """Base test case class for Compute Admin V3 API tests."""
+ _api_version = 3
diff --git a/tempest/api/compute/certificates/test_certificates.py b/tempest/api/compute/certificates/test_certificates.py
index 5299d13..0f921c5 100644
--- a/tempest/api/compute/certificates/test_certificates.py
+++ b/tempest/api/compute/certificates/test_certificates.py
@@ -17,15 +17,19 @@
from tempest import test
-class CertificatesTestJSON(base.BaseV2ComputeTest):
+class CertificatesV3Test(base.BaseComputeTest):
+
+ _api_version = 3
@test.attr(type='gate')
- def test_create_and_get_root_certificate(self):
+ def test_create_root_certificate(self):
# create certificates
- resp, create_body = self.certificates_client.create_certificate()
- self.assertEqual(200, resp.status)
- self.assertIn('data', create_body)
- self.assertIn('private_key', create_body)
+ resp, body = self.certificates_client.create_certificate()
+ self.assertIn('data', body)
+ self.assertIn('private_key', body)
+
+ @test.attr(type='gate')
+ def test_get_root_certificate(self):
# get the root certificate
resp, body = self.certificates_client.get_certificate('root')
self.assertEqual(200, resp.status)
@@ -33,5 +37,9 @@
self.assertIn('private_key', body)
-class CertificatesTestXML(CertificatesTestJSON):
+class CertificatesV2TestJSON(CertificatesV3Test):
+ _api_version = 2
+
+
+class CertificatesV2TestXML(CertificatesV2TestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index 6e202f6..bfebb5e 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -17,11 +17,15 @@
from tempest import test
-class FlavorsTestJSON(base.BaseV2ComputeTest):
+class FlavorsV3Test(base.BaseComputeTest):
+
+ _api_version = 3
+ _min_disk = 'min_disk'
+ _min_ram = 'min_ram'
@classmethod
def setUpClass(cls):
- super(FlavorsTestJSON, cls).setUpClass()
+ super(FlavorsV3Test, cls).setUpClass()
cls.client = cls.flavors_client
@test.attr(type='smoke')
@@ -89,7 +93,7 @@
flavors = sorted(flavors, key=lambda k: k['disk'])
flavor_id = flavors[0]['id']
- params = {'minDisk': flavors[0]['disk'] + 1}
+ params = {self._min_disk: flavors[0]['disk'] + 1}
resp, flavors = self.client.list_flavors_with_detail(params)
self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
@@ -100,7 +104,7 @@
flavors = sorted(flavors, key=lambda k: k['ram'])
flavor_id = flavors[0]['id']
- params = {'minRam': flavors[0]['ram'] + 1}
+ params = {self._min_ram: flavors[0]['ram'] + 1}
resp, flavors = self.client.list_flavors_with_detail(params)
self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
@@ -111,7 +115,7 @@
flavors = sorted(flavors, key=lambda k: k['disk'])
flavor_id = flavors[0]['id']
- params = {'minDisk': flavors[0]['disk'] + 1}
+ params = {self._min_disk: flavors[0]['disk'] + 1}
resp, flavors = self.client.list_flavors(params)
self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
@@ -122,10 +126,17 @@
flavors = sorted(flavors, key=lambda k: k['ram'])
flavor_id = flavors[0]['id']
- params = {'minRam': flavors[0]['ram'] + 1}
+ params = {self._min_ram: flavors[0]['ram'] + 1}
resp, flavors = self.client.list_flavors(params)
self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
-class FlavorsTestXML(FlavorsTestJSON):
+class FlavorsV2TestJSON(FlavorsV3Test):
+
+ _api_version = 2
+ _min_disk = 'minDisk'
+ _min_ram = 'minRam'
+
+
+class FlavorsV2TestXML(FlavorsV2TestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/flavors/test_flavors_negative.py b/tempest/api/compute/flavors/test_flavors_negative.py
index 4ba5023..1638f2d 100644
--- a/tempest/api/compute/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/flavors/test_flavors_negative.py
@@ -13,40 +13,28 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testscenarios
from tempest.api.compute import base
from tempest import test
-load_tests = testscenarios.load_tests_apply_scenarios
+load_tests = test.NegativeAutoTest.load_tests
-class FlavorsListNegativeTestJSON(base.BaseV2ComputeTest,
- test.NegativeAutoTest):
+@test.SimpleNegativeAutoTest
+class FlavorsListWithDetailsNegativeTestJSON(base.BaseV2ComputeTest,
+ test.NegativeAutoTest):
_service = 'compute'
_schema_file = 'compute/flavors/flavors_list.json'
- scenarios = test.NegativeAutoTest.generate_scenario(_schema_file)
- @test.attr(type=['negative', 'gate'])
- def test_list_flavors_with_detail(self):
- self.execute(self._schema_file)
-
-
+@test.SimpleNegativeAutoTest
class FlavorDetailsNegativeTestJSON(base.BaseV2ComputeTest,
test.NegativeAutoTest):
_service = 'compute'
_schema_file = 'compute/flavors/flavor_details.json'
- scenarios = test.NegativeAutoTest.generate_scenario(_schema_file)
-
@classmethod
def setUpClass(cls):
super(FlavorDetailsNegativeTestJSON, cls).setUpClass()
cls.set_resource("flavor", cls.flavor_ref)
-
- @test.attr(type=['negative', 'gate'])
- def test_get_flavor_details(self):
- # flavor details are not returned for non-existent flavors
- self.execute(self._schema_file)
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions.py b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
index c0f7af0..b3789f8 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
@@ -24,10 +24,11 @@
floating_ip = None
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(FloatingIPsTestJSON, cls).setUpClass()
cls.client = cls.floating_ips_client
- #cls.servers_client = cls.servers_client
+ cls.floating_ip_id = None
# Server creation
resp, server = cls.create_test_server(wait_until='ACTIVE')
@@ -40,9 +41,18 @@
@classmethod
def tearDownClass(cls):
# Deleting the floating IP which is created in this method
- resp, body = cls.client.delete_floating_ip(cls.floating_ip_id)
+ if cls.floating_ip_id:
+ resp, body = cls.client.delete_floating_ip(cls.floating_ip_id)
super(FloatingIPsTestJSON, cls).tearDownClass()
+ def _try_delete_floating_ip(self, floating_ip_id):
+ # delete floating ip, if it exists
+ try:
+ self.client.delete_floating_ip(floating_ip_id)
+ # if not found, it depicts it was deleted in the test
+ except exceptions.NotFound:
+ pass
+
@test.attr(type='gate')
def test_allocate_floating_ip(self):
# Positive test:Allocation of a new floating IP to a project
@@ -64,6 +74,7 @@
# should be successful
# Creating the floating IP that is to be deleted in this method
resp, floating_ip_body = self.client.create_floating_ip()
+ self.addCleanup(self._try_delete_floating_ip, floating_ip_body['id'])
# Storing the details of floating IP before deleting it
cli_resp = self.client.get_floating_ip_details(floating_ip_body['id'])
resp, floating_ip_details = cli_resp
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index 195a018..91eb4c5 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -24,14 +24,15 @@
class ImagesMetadataTestJSON(base.BaseV2ComputeTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ImagesMetadataTestJSON, cls).setUpClass()
if not CONF.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
- cls.servers_client = cls.servers_client
cls.client = cls.images_client
+ cls.image_id = None
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
@@ -45,7 +46,8 @@
@classmethod
def tearDownClass(cls):
- cls.client.delete_image(cls.image_id)
+ if cls.image_id:
+ cls.client.delete_image(cls.image_id)
super(ImagesMetadataTestJSON, cls).tearDownClass()
def setUp(self):
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index 5de2436..29df2b0 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -15,7 +15,6 @@
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
-from tempest import exceptions
from tempest import test
CONF = config.CONF
@@ -32,48 +31,6 @@
cls.client = cls.images_client
cls.servers_client = cls.servers_client
- @test.attr(type=['negative', 'gate'])
- def test_create_image_from_deleted_server(self):
- # An image should not be created if the server instance is removed
- resp, server = self.create_test_server(wait_until='ACTIVE')
-
- # Delete server before trying to create server
- self.servers_client.delete_server(server['id'])
- self.servers_client.wait_for_server_termination(server['id'])
- # Create a new image after server is deleted
- name = data_utils.rand_name('image')
- meta = {'image_type': 'test'}
- self.assertRaises(exceptions.NotFound,
- self.create_image_from_server,
- server['id'], name=name, meta=meta)
-
- @test.attr(type=['negative', 'gate'])
- def test_create_image_from_invalid_server(self):
- # An image should not be created with invalid server id
- # Create a new image with invalid server id
- name = data_utils.rand_name('image')
- meta = {'image_type': 'test'}
- resp = {}
- resp['status'] = None
- self.assertRaises(exceptions.NotFound,
- self.create_image_from_server,
- '!@#$%^&*()', name=name, meta=meta)
-
- @test.attr(type=['negative', 'gate'])
- def test_create_image_from_stopped_server(self):
- resp, server = self.create_test_server(wait_until='ACTIVE')
- self.servers_client.stop(server['id'])
- self.servers_client.wait_for_server_status(server['id'],
- 'SHUTOFF')
- self.addCleanup(self.servers_client.delete_server, server['id'])
- snapshot_name = data_utils.rand_name('test-snap-')
- resp, image = self.create_image_from_server(server['id'],
- name=snapshot_name,
- wait_until='ACTIVE',
- wait_for_server=False)
- self.addCleanup(self.client.delete_image, image['id'])
- self.assertEqual(snapshot_name, image['name'])
-
@test.attr(type='gate')
def test_delete_saving_image(self):
snapshot_name = data_utils.rand_name('test-snap-')
@@ -85,59 +42,6 @@
resp, body = self.client.delete_image(image['id'])
self.assertEqual('204', resp['status'])
- @test.attr(type=['negative', 'gate'])
- def test_create_image_specify_uuid_35_characters_or_less(self):
- # Return an error if Image ID passed is 35 characters or less
- snapshot_name = data_utils.rand_name('test-snap-')
- test_uuid = ('a' * 35)
- self.assertRaises(exceptions.NotFound, self.client.create_image,
- test_uuid, snapshot_name)
-
- @test.attr(type=['negative', 'gate'])
- def test_create_image_specify_uuid_37_characters_or_more(self):
- # Return an error if Image ID passed is 37 characters or more
- snapshot_name = data_utils.rand_name('test-snap-')
- test_uuid = ('a' * 37)
- self.assertRaises(exceptions.NotFound, self.client.create_image,
- test_uuid, snapshot_name)
-
- @test.attr(type=['negative', 'gate'])
- def test_delete_image_with_invalid_image_id(self):
- # An image should not be deleted with invalid image id
- self.assertRaises(exceptions.NotFound, self.client.delete_image,
- '!@$%^&*()')
-
- @test.attr(type=['negative', 'gate'])
- def test_delete_non_existent_image(self):
- # Return an error while trying to delete a non-existent image
-
- non_existent_image_id = '11a22b9-12a9-5555-cc11-00ab112223fa'
- self.assertRaises(exceptions.NotFound, self.client.delete_image,
- non_existent_image_id)
-
- @test.attr(type=['negative', 'gate'])
- def test_delete_image_blank_id(self):
- # Return an error while trying to delete an image with blank Id
- self.assertRaises(exceptions.NotFound, self.client.delete_image, '')
-
- @test.attr(type=['negative', 'gate'])
- def test_delete_image_non_hex_string_id(self):
- # Return an error while trying to delete an image with non hex id
- image_id = '11a22b9-120q-5555-cc11-00ab112223gj'
- self.assertRaises(exceptions.NotFound, self.client.delete_image,
- image_id)
-
- @test.attr(type=['negative', 'gate'])
- def test_delete_image_negative_image_id(self):
- # Return an error while trying to delete an image with negative id
- self.assertRaises(exceptions.NotFound, self.client.delete_image, -1)
-
- @test.attr(type=['negative', 'gate'])
- def test_delete_image_id_is_over_35_character_limit(self):
- # Return an error while trying to delete image with id over limit
- self.assertRaises(exceptions.NotFound, self.client.delete_image,
- '11a22b9-12a9-5555-cc11-00ab112223fa-3fac')
-
class ImagesTestXML(ImagesTestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/images/test_images_negative.py b/tempest/api/compute/images/test_images_negative.py
new file mode 100644
index 0000000..ae00ae2
--- /dev/null
+++ b/tempest/api/compute/images/test_images_negative.py
@@ -0,0 +1,131 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.compute import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest import test
+
+CONF = config.CONF
+
+
+class ImagesNegativeTestJSON(base.BaseV2ComputeTest):
+
+ @classmethod
+ def setUpClass(cls):
+ super(ImagesNegativeTestJSON, cls).setUpClass()
+ if not CONF.service_available.glance:
+ skip_msg = ("%s skipped as glance is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
+ cls.client = cls.images_client
+ cls.servers_client = cls.servers_client
+
+ @test.attr(type=['negative', 'gate'])
+ def test_create_image_from_deleted_server(self):
+ # An image should not be created if the server instance is removed
+ resp, server = self.create_test_server(wait_until='ACTIVE')
+
+ # Delete server before trying to create server
+ self.servers_client.delete_server(server['id'])
+ self.servers_client.wait_for_server_termination(server['id'])
+ # Create a new image after server is deleted
+ name = data_utils.rand_name('image')
+ meta = {'image_type': 'test'}
+ self.assertRaises(exceptions.NotFound,
+ self.create_image_from_server,
+ server['id'], name=name, meta=meta)
+
+ @test.attr(type=['negative', 'gate'])
+ def test_create_image_from_invalid_server(self):
+ # An image should not be created with invalid server id
+ # Create a new image with invalid server id
+ name = data_utils.rand_name('image')
+ meta = {'image_type': 'test'}
+ resp = {}
+ resp['status'] = None
+ self.assertRaises(exceptions.NotFound, self.create_image_from_server,
+ '!@#$%^&*()', name=name, meta=meta)
+
+ @test.attr(type=['negative', 'gate'])
+ def test_create_image_from_stopped_server(self):
+ resp, server = self.create_test_server(wait_until='ACTIVE')
+ self.servers_client.stop(server['id'])
+ self.servers_client.wait_for_server_status(server['id'],
+ 'SHUTOFF')
+ self.addCleanup(self.servers_client.delete_server, server['id'])
+ snapshot_name = data_utils.rand_name('test-snap-')
+ resp, image = self.create_image_from_server(server['id'],
+ name=snapshot_name,
+ wait_until='ACTIVE',
+ wait_for_server=False)
+ self.addCleanup(self.client.delete_image, image['id'])
+ self.assertEqual(snapshot_name, image['name'])
+
+ @test.attr(type=['negative', 'gate'])
+ def test_create_image_specify_uuid_35_characters_or_less(self):
+ # Return an error if Image ID passed is 35 characters or less
+ snapshot_name = data_utils.rand_name('test-snap-')
+ test_uuid = ('a' * 35)
+ self.assertRaises(exceptions.NotFound, self.client.create_image,
+ test_uuid, snapshot_name)
+
+ @test.attr(type=['negative', 'gate'])
+ def test_create_image_specify_uuid_37_characters_or_more(self):
+ # Return an error if Image ID passed is 37 characters or more
+ snapshot_name = data_utils.rand_name('test-snap-')
+ test_uuid = ('a' * 37)
+ self.assertRaises(exceptions.NotFound, self.client.create_image,
+ test_uuid, snapshot_name)
+
+ @test.attr(type=['negative', 'gate'])
+ def test_delete_image_with_invalid_image_id(self):
+ # An image should not be deleted with invalid image id
+ self.assertRaises(exceptions.NotFound, self.client.delete_image,
+ '!@$%^&*()')
+
+ @test.attr(type=['negative', 'gate'])
+ def test_delete_non_existent_image(self):
+ # Return an error while trying to delete a non-existent image
+
+ non_existent_image_id = '11a22b9-12a9-5555-cc11-00ab112223fa'
+ self.assertRaises(exceptions.NotFound, self.client.delete_image,
+ non_existent_image_id)
+
+ @test.attr(type=['negative', 'gate'])
+ def test_delete_image_blank_id(self):
+ # Return an error while trying to delete an image with blank Id
+ self.assertRaises(exceptions.NotFound, self.client.delete_image, '')
+
+ @test.attr(type=['negative', 'gate'])
+ def test_delete_image_non_hex_string_id(self):
+ # Return an error while trying to delete an image with non hex id
+ image_id = '11a22b9-120q-5555-cc11-00ab112223gj'
+ self.assertRaises(exceptions.NotFound, self.client.delete_image,
+ image_id)
+
+ @test.attr(type=['negative', 'gate'])
+ def test_delete_image_negative_image_id(self):
+ # Return an error while trying to delete an image with negative id
+ self.assertRaises(exceptions.NotFound, self.client.delete_image, -1)
+
+ @test.attr(type=['negative', 'gate'])
+ def test_delete_image_id_is_over_35_character_limit(self):
+ # Return an error while trying to delete image with id over limit
+ self.assertRaises(exceptions.NotFound, self.client.delete_image,
+ '11a22b9-12a9-5555-cc11-00ab112223fa-3fac')
+
+
+class ImagesNegativeTestXML(ImagesNegativeTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index b152c3c..c81cec5 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
@@ -27,6 +26,11 @@
class ImagesOneServerTestJSON(base.BaseV2ComputeTest):
+ def tearDown(self):
+ """Terminate test instances created after a test is executed."""
+ self.server_check_teardown()
+ super(ImagesOneServerTestJSON, self).tearDown()
+
def setUp(self):
# NOTE(afazekas): Normally we use the same server with all test cases,
# but if it has an issue, we build a new one
@@ -61,8 +65,6 @@
resp, flavor = self.flavors_client.get_flavor_details(flavor_id)
return flavor['disk']
- @testtools.skipUnless(CONF.compute_feature_enabled.create_image,
- 'Environment unable to create images.')
@test.attr(type='smoke')
def test_create_delete_image(self):
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 41a0590..9c4ab00 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -33,6 +33,7 @@
for image_id in self.image_ids:
self.client.delete_image(image_id)
self.image_ids.remove(image_id)
+ self.server_check_teardown()
super(ImagesOneServerNegativeTestJSON, self).tearDown()
def setUp(self):
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index 67fafed..01979c0 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -18,16 +18,17 @@
from tempest import test
-class KeyPairsTestJSON(base.BaseV2ComputeTest):
+class KeyPairsV3Test(base.BaseComputeTest):
+
+ _api_version = 3
@classmethod
def setUpClass(cls):
- super(KeyPairsTestJSON, cls).setUpClass()
+ super(KeyPairsV3Test, cls).setUpClass()
cls.client = cls.keypairs_client
def _delete_keypair(self, keypair_name):
resp, _ = self.client.delete_keypair(keypair_name)
- self.assertEqual(202, resp.status)
def _create_keypair(self, keypair_name, pub_key=None):
resp, body = self.client.create_keypair(keypair_name, pub_key)
@@ -46,7 +47,6 @@
# as the keypair dicts from list API doesn't have them.
keypair.pop('private_key')
keypair.pop('user_id')
- self.assertEqual(200, resp.status)
key_list.append(keypair)
# Fetch all keypairs and verify the list
# has all created keypairs
@@ -69,7 +69,6 @@
# Keypair should be created, verified and deleted
k_name = data_utils.rand_name('keypair-')
resp, keypair = self._create_keypair(k_name)
- self.assertEqual(200, resp.status)
private_key = keypair['private_key']
key_name = keypair['name']
self.assertEqual(key_name, k_name,
@@ -108,7 +107,6 @@
"XcPojYN56tI0OlrGqojbediJYD0rUsJu4weZpbn8vilb3JuDY+jws"
"snSA8wzBx3A/8y9Pp1B nova@ubuntu")
resp, keypair = self._create_keypair(k_name, pub_key)
- self.assertEqual(200, resp.status)
self.assertFalse('private_key' in keypair,
"Field private_key is not empty!")
key_name = keypair['name']
@@ -117,5 +115,9 @@
"to the requested name!")
-class KeyPairsTestXML(KeyPairsTestJSON):
+class KeyPairsV2TestJSON(KeyPairsV3Test):
+ _api_version = 2
+
+
+class KeyPairsV2TestXML(KeyPairsV2TestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index 538ebc6..3736f28 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -61,8 +61,11 @@
@test.attr(type='smoke')
def test_security_group_create_get_delete(self):
# Security Group should be created, fetched and deleted
- s_name = data_utils.rand_name('securitygroup-')
+ # with char space between name along with
+ # leading and trailing spaces
+ s_name = ' %s ' % data_utils.rand_name('securitygroup ')
resp, securitygroup = self.create_security_group(name=s_name)
+ self.assertEqual(200, resp.status)
self.assertIn('name', securitygroup)
securitygroup_name = securitygroup['name']
self.assertEqual(securitygroup_name, s_name,
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index f6eed00..297b300 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -127,7 +127,7 @@
_ifs = self._test_delete_interface(server, ifs)
self.assertEqual(len(ifs) - 1, len(_ifs))
- @test.attr(type='gate')
+ @test.attr(type='smoke')
def test_add_remove_fixed_ip(self):
# Add and Remove the fixed IP to server.
server, ifs = self._create_server_get_interfaces()
diff --git a/tempest/api/compute/servers/test_availability_zone.py b/tempest/api/compute/servers/test_availability_zone.py
index 7b12555..cf9837f 100644
--- a/tempest/api/compute/servers/test_availability_zone.py
+++ b/tempest/api/compute/servers/test_availability_zone.py
@@ -17,15 +17,15 @@
from tempest import test
-class AZTestJSON(base.BaseV2ComputeTest):
-
+class AZV3Test(base.BaseComputeTest):
"""
Tests Availability Zone API List
"""
+ _api_version = 3
@classmethod
def setUpClass(cls):
- super(AZTestJSON, cls).setUpClass()
+ super(AZV3Test, cls).setUpClass()
cls.client = cls.availability_zone_client
@test.attr(type='gate')
@@ -36,5 +36,9 @@
self.assertTrue(len(availability_zone) > 0)
-class AZTestXML(AZTestJSON):
+class AZV2TestJSON(AZV3Test):
+ _api_version = 2
+
+
+class AZV2TestXML(AZV2TestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index ddf37ce..e135eca 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -28,11 +28,11 @@
class ServersTestJSON(base.BaseV2ComputeTest):
- run_ssh = CONF.compute.run_ssh
disk_config = 'AUTO'
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(ServersTestJSON, cls).setUpClass()
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
@@ -54,13 +54,6 @@
resp, cls.server = cls.client.get_server(cls.server_initial['id'])
@test.attr(type='smoke')
- def test_create_server_response(self):
- # Check that the required fields are returned with values
- self.assertEqual(202, self.resp.status)
- self.assertTrue(self.server_initial['id'] is not None)
- self.assertTrue(self.server_initial['adminPass'] is not None)
-
- @test.attr(type='smoke')
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
@@ -89,7 +82,8 @@
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
- @testtools.skipIf(not run_ssh, 'Instance validation tests are disabled.')
+ @testtools.skipUnless(CONF.compute.run_ssh,
+ 'Instance validation tests are disabled.')
@test.attr(type='gate')
def test_verify_created_server_vcpus(self):
# Verify that the number of vcpus reported by the instance matches
@@ -99,7 +93,8 @@
self.password)
self.assertEqual(flavor['vcpus'], linux_client.get_number_of_vcpus())
- @testtools.skipIf(not run_ssh, 'Instance validation tests are disabled.')
+ @testtools.skipUnless(CONF.compute.run_ssh,
+ 'Instance validation tests are disabled.')
@test.attr(type='gate')
def test_host_name_is_same_as_server_name(self):
# Verify the instance host name is the same as the server name
@@ -109,38 +104,22 @@
class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
- run_ssh = CONF.compute.run_ssh
disk_config = 'AUTO'
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(ServersWithSpecificFlavorTestJSON, cls).setUpClass()
- cls.meta = {'hello': 'world'}
- cls.accessIPv4 = '1.1.1.1'
- cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
- cls.name = data_utils.rand_name('server')
- file_contents = 'This is a test file.'
- personality = [{'path': '/test.txt',
- 'contents': base64.b64encode(file_contents)}]
- cls.client = cls.servers_client
cls.flavor_client = cls.os_adm.flavors_client
- cli_resp = cls.create_test_server(name=cls.name,
- meta=cls.meta,
- accessIPv4=cls.accessIPv4,
- accessIPv6=cls.accessIPv6,
- personality=personality,
- disk_config=cls.disk_config)
- cls.resp, cls.server_initial = cli_resp
- cls.password = cls.server_initial['adminPass']
- cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
- resp, cls.server = cls.client.get_server(cls.server_initial['id'])
+ cls.client = cls.servers_client
- @testtools.skipIf(not run_ssh, 'Instance validation tests are disabled.')
+ @testtools.skipUnless(CONF.compute.run_ssh,
+ 'Instance validation tests are disabled.')
@test.attr(type='gate')
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
- def create_flavor_with_extra_specs(self):
+ def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = 64
@@ -153,12 +132,12 @@
ram, vcpus, disk,
flavor_with_eph_disk_id,
ephemeral=1))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
+ self.addCleanup(flavor_clean_up, flavor['id'])
self.assertEqual(200, resp.status)
return flavor['id']
- def create_flavor_without_extra_specs(self):
+ def create_flavor_without_extra_specs():
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
@@ -171,23 +150,22 @@
create_flavor(flavor_no_eph_disk_name,
ram, vcpus, disk,
flavor_no_eph_disk_id))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
+ self.addCleanup(flavor_clean_up, flavor['id'])
self.assertEqual(200, resp.status)
return flavor['id']
- def flavor_clean_up(self, flavor_id):
+ def flavor_clean_up(flavor_id):
resp, body = self.flavor_client.delete_flavor(flavor_id)
self.assertEqual(resp.status, 202)
self.flavor_client.wait_for_resource_deletion(flavor_id)
- flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
- flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()
+ flavor_with_eph_disk_id = create_flavor_with_extra_specs()
+ flavor_no_eph_disk_id = create_flavor_without_extra_specs()
admin_pass = self.image_ssh_password
- resp, server_no_eph_disk = (self.
- create_test_server(
+ resp, server_no_eph_disk = (self.create_test_server(
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_no_eph_disk_id))
@@ -196,13 +174,18 @@
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id))
# Get partition number of server without extra specs.
+ _, server_no_eph_disk = self.client.get_server(
+ server_no_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_no_eph_disk,
- self.ssh_user, self.password)
- partition_num = len(linux_client.get_partitions())
+ self.ssh_user, admin_pass)
+ partition_num = len(linux_client.get_partitions().split('\n'))
+ _, server_with_eph_disk = self.client.get_server(
+ server_with_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_with_eph_disk,
- self.ssh_user, self.password)
- self.assertEqual(partition_num + 1, linux_client.get_partitions())
+ self.ssh_user, admin_pass)
+ partition_num_emph = len(linux_client.get_partitions().split('\n'))
+ self.assertEqual(partition_num + 1, partition_num_emph)
class ServersTestManualDisk(ServersTestJSON):
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 5e011dd..9e34922 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -23,6 +23,7 @@
class DeleteServersTestJSON(base.BaseV2ComputeTest):
+
# NOTE: Server creations of each test class should be under 10
# for preventing "Quota exceeded for instances"
@@ -57,6 +58,8 @@
self.assertEqual('204', resp['status'])
self.client.wait_for_server_termination(server['id'])
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
@test.attr(type='gate')
def test_delete_server_while_in_pause_state(self):
# Delete a server while it's VM state is Pause
@@ -99,6 +102,26 @@
self.assertEqual('204', resp['status'])
self.client.wait_for_server_termination(server['id'])
+ @test.attr(type='gate')
+ def test_delete_server_while_in_attached_volume(self):
+ # Delete a server while a volume is attached to it
+ volumes_client = self.volumes_extensions_client
+ device = '/dev/%s' % CONF.compute.volume_device_name
+ resp, server = self.create_test_server(wait_until='ACTIVE')
+
+ resp, volume = volumes_client.create_volume(1)
+ self.addCleanup(volumes_client.delete_volume, volume['id'])
+ volumes_client.wait_for_volume_status(volume['id'], 'available')
+ resp, body = self.client.attach_volume(server['id'],
+ volume['id'],
+ device=device)
+ volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+
+ resp, _ = self.client.delete_server(server['id'])
+ self.assertEqual('204', resp['status'])
+ self.client.wait_for_server_termination(server['id'])
+ volumes_client.wait_for_volume_status(volume['id'], 'available')
+
class DeleteServersAdminTestJSON(base.BaseV2ComputeAdminTest):
# NOTE: Server creations of each test class should be under 10
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index 837114c..f66020c 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -26,7 +26,9 @@
class ListServerFiltersTestJSON(base.BaseV2ComputeTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
+ cls.set_network_resources(network=True, subnet=True, dhcp=True)
super(ListServerFiltersTestJSON, cls).setUpClass()
cls.client = cls.servers_client
@@ -68,8 +70,12 @@
resp, cls.s3 = cls.create_test_server(name=cls.s3_name,
flavor=cls.flavor_ref_alt,
wait_until='ACTIVE')
-
- cls.fixed_network_name = CONF.compute.fixed_network_name
+ if (CONF.service_available.neutron and
+ CONF.compute.allow_tenant_isolation):
+ network = cls.isolated_creds.get_primary_network()
+ cls.fixed_network_name = network['name']
+ else:
+ cls.fixed_network_name = CONF.compute.fixed_network_name
@utils.skip_unless_attr('multiple_images', 'Only one image found')
@test.attr(type='gate')
@@ -225,7 +231,6 @@
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
- @test.skip_because(bug="1170718")
@test.attr(type='gate')
def test_list_servers_filtered_by_ip(self):
# Filter servers by ip
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index c825fb9..768cc11 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -26,6 +26,7 @@
force_tenant_isolation = True
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ListServersNegativeTestJSON, cls).setUpClass()
cls.client = cls.servers_client
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 71d6018..80e6008 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -14,9 +14,9 @@
# under the License.
import base64
-import time
import testtools
+import urlparse
from tempest.api.compute import base
from tempest.common.utils import data_utils
@@ -29,7 +29,6 @@
class ServerActionsTestJSON(base.BaseV2ComputeTest):
- resize_available = CONF.compute_feature_enabled.resize
run_ssh = CONF.compute.run_ssh
def setUp(self):
@@ -43,8 +42,15 @@
# Rebuild server if something happened to it during a test
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ _, server = self.client.get_server(self.server_id)
+ self.assertEqual(self.image_ref, server['image']['id'])
+ self.server_check_teardown()
+ super(ServerActionsTestJSON, self).tearDown()
+
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(ServerActionsTestJSON, cls).setUpClass()
cls.client = cls.servers_client
cls.server_id = cls.rebuild_server(None)
@@ -126,7 +132,6 @@
metadata=meta,
personality=personality,
adminPass=password)
- self.addCleanup(self.client.rebuild, self.server_id, self.image_ref)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -146,6 +151,8 @@
linux_client = remote_client.RemoteClient(server, self.ssh_user,
password)
linux_client.validate_authentication()
+ if self.image_ref_alt != self.image_ref:
+ self.client.rebuild(self.server_id, self.image_ref)
@test.attr(type='gate')
def test_rebuild_server_in_stop_state(self):
@@ -158,11 +165,7 @@
resp, server = self.client.stop(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
- self.addCleanup(self.client.start, self.server_id)
resp, rebuilt_server = self.client.rebuild(self.server_id, new_image)
- self.addCleanup(self.client.wait_for_server_status, self.server_id,
- 'SHUTOFF')
- self.addCleanup(self.client.rebuild, self.server_id, old_image)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -176,6 +179,12 @@
rebuilt_image_id = server['image']['id']
self.assertEqual(new_image, rebuilt_image_id)
+ # Restore to the original image (The tearDown will test it again)
+ if self.image_ref_alt != self.image_ref:
+ self.client.rebuild(self.server_id, old_image)
+ self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
+ self.client.start(self.server_id)
+
def _detect_server_image_flavor(self, server_id):
# Detects the current server image flavor ref.
resp, server = self.client.get_server(server_id)
@@ -184,7 +193,8 @@
if current_flavor == self.flavor_ref else self.flavor_ref
return current_flavor, new_flavor_ref
- @testtools.skipIf(not resize_available, 'Resize not available.')
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
@test.attr(type='smoke')
def test_resize_server_confirm(self):
# The server's RAM and disk space should be modified to that of
@@ -203,7 +213,8 @@
resp, server = self.client.get_server(self.server_id)
self.assertEqual(new_flavor_ref, server['flavor']['id'])
- @testtools.skipIf(not resize_available, 'Resize not available.')
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
@test.attr(type='gate')
def test_resize_server_revert(self):
# The server's RAM and disk space should return to its original
@@ -219,18 +230,8 @@
self.client.revert_resize(self.server_id)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
- # Need to poll for the id change until lp#924371 is fixed
resp, server = self.client.get_server(self.server_id)
- start = int(time.time())
-
- while server['flavor']['id'] != previous_flavor_ref:
- time.sleep(self.build_interval)
- resp, server = self.client.get_server(self.server_id)
-
- if int(time.time()) - start >= self.build_timeout:
- message = 'Server %s failed to revert resize within the \
- required time (%s s).' % (self.server_id, self.build_timeout)
- raise exceptions.TimeoutException(message)
+ self.assertEqual(previous_flavor_ref, server['flavor']['id'])
@test.attr(type='gate')
def test_create_backup(self):
@@ -350,6 +351,8 @@
self.wait_for(self._get_output)
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
@test.attr(type='gate')
def test_pause_unpause_server(self):
resp, server = self.client.pause_server(self.server_id)
@@ -359,6 +362,8 @@
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
@test.attr(type='gate')
def test_suspend_resume_server(self):
resp, server = self.client.suspend_server(self.server_id)
@@ -427,6 +432,29 @@
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
+ def _validate_url(self, url):
+ valid_scheme = ['http', 'https']
+ parsed_url = urlparse.urlparse(url)
+ self.assertNotEqual('None', parsed_url.port)
+ self.assertNotEqual('None', parsed_url.hostname)
+ self.assertIn(parsed_url.scheme, valid_scheme)
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.vnc_console,
+ 'VNC Console feature is disabled.')
+ @test.attr(type='gate')
+ def test_get_vnc_console(self):
+ # Get the VNC console of type 'novnc' and 'xvpvnc'
+ console_types = ['novnc', 'xvpvnc']
+ for console_type in console_types:
+ resp, body = self.servers_client.get_vnc_console(self.server_id,
+ console_type)
+ self.assertEqual(
+ 200, resp.status,
+ "Failed to get Console Type: %s" % (console_types))
+ self.assertEqual(console_type, body['type'])
+ self.assertNotEqual('', body['url'])
+ self._validate_url(body['url'])
+
class ServerActionsTestXML(ServerActionsTestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index 8b69c78..b55833c 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -26,10 +26,7 @@
super(ServerMetadataNegativeTestJSON, cls).setUpClass()
cls.client = cls.servers_client
cls.quotas = cls.quotas_client
- cls.admin_client = cls._get_identity_admin_client()
- resp, tenants = cls.admin_client.list_tenants()
- cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
- cls.client.tenant_name][0]
+ cls.tenant_id = cls.client.tenant_id
resp, server = cls.create_test_server(meta={}, wait_until='ACTIVE')
cls.server_id = server['id']
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 48f2e14..093e9e2 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -21,6 +21,7 @@
class ServerRescueTestJSON(base.BaseV2ComputeTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
cls.set_network_resources(network=True, subnet=True, router=True)
super(ServerRescueTestJSON, cls).setUpClass()
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index e027567..dae4709 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -12,16 +12,21 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
+from tempest import config
from tempest import exceptions
from tempest import test
+CONF = config.CONF
+
class ServerRescueNegativeTestJSON(base.BaseV2ComputeTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
cls.set_network_resources(network=True, subnet=True, router=True)
super(ServerRescueNegativeTestJSON, cls).setUpClass()
@@ -44,6 +49,7 @@
cls.servers_client.rescue_server(
cls.rescue_id, adminPass=rescue_password)
cls.servers_client.wait_for_server_status(cls.rescue_id, 'RESCUE')
+ cls.servers_client.wait_for_server_status(cls.server_id, 'ACTIVE')
@classmethod
def tearDownClass(cls):
@@ -65,6 +71,8 @@
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_rescue_paused_instance(self):
# Rescue a paused server
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 4cccbd6..5ac667e 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -16,6 +16,8 @@
import base64
import sys
+import testtools
+
from tempest.api.compute import base
from tempest import clients
from tempest.common.utils import data_utils
@@ -35,6 +37,10 @@
except Exception:
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ self.server_check_teardown()
+ super(ServersNegativeTestJSON, self).tearDown()
+
@classmethod
def setUpClass(cls):
super(ServersNegativeTestJSON, cls).setUpClass()
@@ -125,16 +131,17 @@
self.assertRaises(exceptions.NotFound, self.client.reboot,
nonexistent_server, 'SOFT')
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_pause_paused_server(self):
# Pause a paused server.
self.client.pause_server(self.server_id)
- self.addCleanup(self.client.unpause_server,
- self.server_id)
self.client.wait_for_server_status(self.server_id, 'PAUSED')
self.assertRaises(exceptions.Conflict,
self.client.pause_server,
self.server_id)
+ self.client.unpause_server(self.server_id)
@test.attr(type=['negative', 'gate'])
def test_rebuild_reboot_deleted_server(self):
@@ -304,6 +311,8 @@
self.assertRaises(exceptions.NotFound, self.servers_client.stop,
nonexistent_server)
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_pause_non_existent_server(self):
# pause a non existent server
@@ -311,6 +320,8 @@
self.assertRaises(exceptions.NotFound, self.client.pause_server,
nonexistent_server)
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_unpause_non_existent_server(self):
# unpause a non existent server
@@ -318,6 +329,8 @@
self.assertRaises(exceptions.NotFound, self.client.unpause_server,
nonexistent_server)
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_unpause_server_invalid_state(self):
# unpause an active server.
@@ -325,6 +338,8 @@
self.client.unpause_server,
self.server_id)
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_suspend_non_existent_server(self):
# suspend a non existent server
@@ -332,18 +347,21 @@
self.assertRaises(exceptions.NotFound, self.client.suspend_server,
nonexistent_server)
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_suspend_server_invalid_state(self):
# suspend a suspended server.
resp, _ = self.client.suspend_server(self.server_id)
- self.addCleanup(self.client.resume_server,
- self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
self.assertRaises(exceptions.Conflict,
self.client.suspend_server,
self.server_id)
+ self.client.resume_server(self.server_id)
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_resume_non_existent_server(self):
# resume a non existent server
@@ -351,6 +369,8 @@
self.assertRaises(exceptions.NotFound, self.client.resume_server,
nonexistent_server)
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_resume_server_invalid_state(self):
# resume an active server.
@@ -408,7 +428,6 @@
# shelve a shelved server.
resp, server = self.client.shelve_server(self.server_id)
self.assertEqual(202, resp.status)
- self.addCleanup(self.client.unshelve_server, self.server_id)
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
@@ -430,6 +449,8 @@
self.client.shelve_server,
self.server_id)
+ self.client.unshelve_server(self.server_id)
+
@test.attr(type=['negative', 'gate'])
def test_unshelve_non_existent_server(self):
# unshelve a non existent server
diff --git a/tempest/api/compute/servers/test_servers_negative_new.py b/tempest/api/compute/servers/test_servers_negative_new.py
index 42ace76..43ddb3a 100644
--- a/tempest/api/compute/servers/test_servers_negative_new.py
+++ b/tempest/api/compute/servers/test_servers_negative_new.py
@@ -13,28 +13,22 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testscenarios
from tempest.api.compute import base
from tempest import test
-load_tests = testscenarios.load_tests_apply_scenarios
+load_tests = test.NegativeAutoTest.load_tests
+@test.SimpleNegativeAutoTest
class GetConsoleOutputNegativeTestJSON(base.BaseV2ComputeTest,
test.NegativeAutoTest):
_service = 'compute'
_schema_file = 'compute/servers/get_console_output.json'
- scenarios = test.NegativeAutoTest.generate_scenario(_schema_file)
-
@classmethod
def setUpClass(cls):
super(GetConsoleOutputNegativeTestJSON, cls).setUpClass()
_resp, server = cls.create_test_server()
cls.set_resource("server", server['id'])
-
- @test.attr(type=['negative', 'gate'])
- def test_get_console_output(self):
- self.execute(self._schema_file)
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 7f909d7..375ddf8 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -43,10 +43,7 @@
if CONF.compute.allow_tenant_isolation:
creds = cls.isolated_creds.get_alt_creds()
- username, tenant_name, password = creds
- cls.alt_manager = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name)
+ cls.alt_manager = clients.Manager(credentials=creds)
else:
# Use the alt_XXX credentials in the config file
cls.alt_manager = clients.AltManager()
@@ -60,7 +57,7 @@
resp, cls.server = cls.client.get_server(server['id'])
name = data_utils.rand_name('image')
- resp, body = cls.client.create_image(server['id'], name)
+ resp, body = cls.images_client.create_image(server['id'], name)
image_id = data_utils.parse_image_id(resp['location'])
cls.images_client.wait_for_image_status(image_id, 'ACTIVE')
resp, cls.image = cls.images_client.get_image(image_id)
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index 230d433..dc85e76 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -23,10 +23,8 @@
def setUpClass(cls):
super(QuotasTestJSON, cls).setUpClass()
cls.client = cls.quotas_client
- cls.admin_client = cls._get_identity_admin_client()
- resp, tenants = cls.admin_client.list_tenants()
- cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
- cls.client.tenant_name][0]
+ cls.tenant_id = cls.client.tenant_id
+ cls.user_id = cls.client.user_id
cls.default_quota_set = set(('injected_file_content_bytes',
'metadata_items', 'injected_files',
'ram', 'floating_ips',
@@ -45,6 +43,14 @@
sorted(quota_set.keys()))
self.assertEqual(quota_set['id'], self.tenant_id)
+ # get the quota set using user id
+ resp, quota_set = self.client.get_quota_set(self.tenant_id,
+ self.user_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(sorted(expected_quota_set),
+ sorted(quota_set.keys()))
+ self.assertEqual(quota_set['id'], self.tenant_id)
+
@test.attr(type='smoke')
def test_get_default_quotas(self):
# User can get the default quota set for it's tenant
diff --git a/tempest/api/compute/api_schema/__init__.py b/tempest/api/compute/v2/__init__.py
similarity index 100%
copy from tempest/api/compute/api_schema/__init__.py
copy to tempest/api/compute/v2/__init__.py
diff --git a/tempest/api/compute/v3/admin/test_availability_zone.py b/tempest/api/compute/v3/admin/test_availability_zone.py
deleted file mode 100644
index 9ca8953..0000000
--- a/tempest/api/compute/v3/admin/test_availability_zone.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2013 NEC Corporation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.compute import base
-from tempest.test import attr
-
-
-class AZAdminV3Test(base.BaseV3ComputeAdminTest):
-
- """
- Tests Availability Zone API List
- """
-
- @classmethod
- def setUpClass(cls):
- super(AZAdminV3Test, cls).setUpClass()
- cls.client = cls.availability_zone_admin_client
-
- @attr(type='gate')
- def test_get_availability_zone_list(self):
- # List of availability zone
- resp, availability_zone = self.client.get_availability_zone_list()
- self.assertEqual(200, resp.status)
- self.assertTrue(len(availability_zone) > 0)
-
- @attr(type='gate')
- def test_get_availability_zone_list_detail(self):
- # List of availability zones and available services
- resp, availability_zone = \
- self.client.get_availability_zone_list_detail()
- self.assertEqual(200, resp.status)
- self.assertTrue(len(availability_zone) > 0)
diff --git a/tempest/api/compute/v3/admin/test_flavors.py b/tempest/api/compute/v3/admin/test_flavors.py
index 401eb85..2a4fc02 100644
--- a/tempest/api/compute/v3/admin/test_flavors.py
+++ b/tempest/api/compute/v3/admin/test_flavors.py
@@ -169,7 +169,6 @@
flag = True
self.assertTrue(flag)
- @test.skip_because(bug="1209101")
@test.attr(type='gate')
def test_list_non_public_flavor(self):
# Create a flavor with os-flavor-access:is_public false should
diff --git a/tempest/api/compute/v3/admin/test_flavors_access.py b/tempest/api/compute/v3/admin/test_flavors_access.py
index 03305ff..c641bf6 100644
--- a/tempest/api/compute/v3/admin/test_flavors_access.py
+++ b/tempest/api/compute/v3/admin/test_flavors_access.py
@@ -31,12 +31,8 @@
cls.client = cls.flavors_admin_client
admin_client = cls._get_identity_admin_client()
- cls.tenant = admin_client.get_tenant_by_name(cls.flavors_client.
- tenant_name)
- cls.tenant_id = cls.tenant['id']
- cls.adm_tenant = admin_client.get_tenant_by_name(
- cls.flavors_admin_client.tenant_name)
- cls.adm_tenant_id = cls.adm_tenant['id']
+ cls.tenant_id = cls.client.tenant_id
+ cls.adm_tenant_id = admin_client.tenant_id
cls.flavor_name_prefix = 'test_flavor_access_'
cls.ram = 512
cls.vcpus = 1
diff --git a/tempest/api/compute/v3/admin/test_flavors_access_negative.py b/tempest/api/compute/v3/admin/test_flavors_access_negative.py
index 334d124..02ecb24 100644
--- a/tempest/api/compute/v3/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/v3/admin/test_flavors_access_negative.py
@@ -33,13 +33,7 @@
super(FlavorsAccessNegativeV3Test, cls).setUpClass()
cls.client = cls.flavors_admin_client
- admin_client = cls._get_identity_admin_client()
- cls.tenant = admin_client.get_tenant_by_name(cls.flavors_client.
- tenant_name)
- cls.tenant_id = cls.tenant['id']
- cls.adm_tenant = admin_client.get_tenant_by_name(
- cls.flavors_admin_client.tenant_name)
- cls.adm_tenant_id = cls.adm_tenant['id']
+ cls.tenant_id = cls.client.tenant_id
cls.flavor_name_prefix = 'test_flavor_access_'
cls.ram = 512
cls.vcpus = 1
diff --git a/tempest/api/compute/v3/admin/test_migrations.py b/tempest/api/compute/v3/admin/test_migrations.py
new file mode 100644
index 0000000..e8bd473
--- /dev/null
+++ b/tempest/api/compute/v3/admin/test_migrations.py
@@ -0,0 +1,50 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+from tempest.api.compute import base
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class MigrationsAdminV3Test(base.BaseV3ComputeAdminTest):
+
+ @test.attr(type='gate')
+ def test_list_migrations(self):
+ # Admin can get the migrations list
+ resp, _ = self.migrations_admin_client.list_migrations()
+ self.assertEqual(200, resp.status)
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ @test.attr(type='gate')
+ def test_list_migrations_in_flavor_resize_situation(self):
+ # Admin can get the migrations list which contains the resized server
+ resp, server = self.create_test_server(wait_until="ACTIVE")
+ server_id = server['id']
+
+ resp, _ = self.servers_client.resize(server_id, self.flavor_ref_alt)
+ self.assertEqual(202, resp.status)
+ self.servers_client.wait_for_server_status(server_id, 'VERIFY_RESIZE')
+ self.servers_client.confirm_resize(server_id)
+ self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+
+ resp, body = self.migrations_admin_client.list_migrations()
+ self.assertEqual(200, resp.status)
+
+ instance_uuids = [x['instance_uuid'] for x in body]
+ self.assertIn(server_id, instance_uuids)
diff --git a/tempest/api/compute/v3/admin/test_quotas.py b/tempest/api/compute/v3/admin/test_quotas.py
index 0c138bb..19c31fe 100644
--- a/tempest/api/compute/v3/admin/test_quotas.py
+++ b/tempest/api/compute/v3/admin/test_quotas.py
@@ -32,8 +32,7 @@
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
- cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
- 'tenantId')
+ cls.demo_tenant_id = cls.client.tenant_id
cls.default_quota_set = set(('metadata_items',
'ram', 'floating_ips',
@@ -56,7 +55,7 @@
def test_get_quota_set_detail(self):
# Admin can get the detail of resource quota set for a tenant
expected_quota_set = self.default_quota_set | set(['id'])
- expected_detail = {'reserved', 'limit', 'in_use'}
+ expected_detail = ['reserved', 'limit', 'in_use']
resp, quota_set = self.adm_client.get_quota_set_detail(
self.demo_tenant_id)
self.assertEqual(200, resp.status)
@@ -94,18 +93,58 @@
# TODO(afazekas): merge these test cases
@test.attr(type='gate')
def test_get_updated_quotas(self):
- # Verify that GET shows the updated quota set
+ # Verify that GET shows the updated quota set of tenant
tenant_name = data_utils.rand_name('cpu_quota_tenant_')
tenant_desc = tenant_name + '-desc'
identity_client = self.os_adm.identity_client
_, tenant = identity_client.create_tenant(name=tenant_name,
description=tenant_desc)
tenant_id = tenant['id']
- self.addCleanup(identity_client.delete_tenant,
- tenant_id)
+ self.addCleanup(identity_client.delete_tenant, tenant_id)
- self.adm_client.update_quota_set(tenant_id,
- ram='5120')
+ self.adm_client.update_quota_set(tenant_id, ram='5120')
resp, quota_set = self.adm_client.get_quota_set(tenant_id)
self.assertEqual(200, resp.status)
- self.assertEqual(quota_set['ram'], 5120)
+ self.assertEqual(5120, quota_set['ram'])
+
+ # Verify that GET shows the updated quota set of user
+ user_name = data_utils.rand_name('cpu_quota_user_')
+ password = data_utils.rand_name('password-')
+ email = user_name + '@testmail.tm'
+ _, user = identity_client.create_user(name=user_name,
+ password=password,
+ tenant_id=tenant_id,
+ email=email)
+ user_id = user['id']
+ self.addCleanup(identity_client.delete_user, user_id)
+
+ self.adm_client.update_quota_set(tenant_id,
+ user_id=user_id,
+ ram='2048')
+ resp, quota_set = self.adm_client.get_quota_set(tenant_id,
+ user_id=user_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(2048, quota_set['ram'])
+
+ @test.attr(type='gate')
+ def test_delete_quota(self):
+ # Admin can delete the resource quota set for a tenant
+ tenant_name = data_utils.rand_name('cpu_quota_tenant_')
+ tenant_desc = tenant_name + '-desc'
+ identity_client = self.os_adm.identity_client
+ _, tenant = identity_client.create_tenant(name=tenant_name,
+ description=tenant_desc)
+ tenant_id = tenant['id']
+ self.addCleanup(identity_client.delete_tenant, tenant_id)
+ resp, quota_set_default = self.adm_client.get_quota_set(tenant_id)
+ self.assertEqual(200, resp.status)
+ ram_default = quota_set_default['ram']
+
+ self.adm_client.update_quota_set(tenant_id, ram='5120')
+ self.assertEqual(200, resp.status)
+ resp, _ = self.adm_client.delete_quota_set(tenant_id)
+ self.assertEqual(204, resp.status)
+
+ resp, quota_set_new = self.adm_client.get_quota_set(tenant_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(ram_default, quota_set_new['ram'])
diff --git a/tempest/api/compute/v3/admin/test_quotas_negative.py b/tempest/api/compute/v3/admin/test_quotas_negative.py
index d138e80..307462f 100644
--- a/tempest/api/compute/v3/admin/test_quotas_negative.py
+++ b/tempest/api/compute/v3/admin/test_quotas_negative.py
@@ -30,8 +30,7 @@
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
- cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
- 'tenantId')
+ cls.demo_tenant_id = cls.isolated_creds.get_primary_creds().tenant_id
# TODO(afazekas): Add dedicated tenant to the skiped quota tests
# it can be moved into the setUpClass as well
diff --git a/tempest/api/compute/v3/admin/test_servers.py b/tempest/api/compute/v3/admin/test_servers.py
index fb8afe4..366cfc6 100644
--- a/tempest/api/compute/v3/admin/test_servers.py
+++ b/tempest/api/compute/v3/admin/test_servers.py
@@ -14,7 +14,6 @@
from tempest.api.compute import base
from tempest.common.utils import data_utils
-from tempest import exceptions
from tempest import test
@@ -27,6 +26,7 @@
_host_key = 'os-extended-server-attributes:host'
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ServersAdminV3Test, cls).setUpClass()
cls.client = cls.servers_admin_client
@@ -43,16 +43,6 @@
wait_until='ACTIVE')
cls.s2_id = server['id']
- def _get_unused_flavor_id(self):
- flavor_id = data_utils.rand_int_id(start=1000)
- while True:
- try:
- resp, body = self.flavors_client.get_flavor_details(flavor_id)
- except exceptions.NotFound:
- break
- flavor_id = data_utils.rand_int_id(start=1000)
- return flavor_id
-
@test.attr(type='gate')
def test_list_servers_by_admin(self):
# Listing servers by admin user returns empty list by default
diff --git a/tempest/api/compute/v3/admin/test_servers_negative.py b/tempest/api/compute/v3/admin/test_servers_negative.py
index cc1be4e..a46da47 100644
--- a/tempest/api/compute/v3/admin/test_servers_negative.py
+++ b/tempest/api/compute/v3/admin/test_servers_negative.py
@@ -14,11 +14,16 @@
import uuid
+import testtools
+
from tempest.api.compute import base
from tempest.common.utils import data_utils
+from tempest import config
from tempest import exceptions
from tempest.test import attr
+CONF = config.CONF
+
class ServersAdminNegativeV3Test(base.BaseV3ComputeAdminTest):
@@ -32,10 +37,7 @@
cls.client = cls.servers_admin_client
cls.non_adm_client = cls.servers_client
cls.flavors_client = cls.flavors_admin_client
- cls.identity_client = cls._get_identity_admin_client()
- tenant = cls.identity_client.get_tenant_by_name(
- cls.client.tenant_name)
- cls.tenant_id = tenant['id']
+ cls.tenant_id = cls.client.tenant_id
cls.s1_name = data_utils.rand_name('server')
resp, server = cls.create_test_server(name=cls.s1_name,
@@ -119,6 +121,8 @@
self.client.migrate_server,
str(uuid.uuid4()))
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
@attr(type=['negative', 'gate'])
def test_migrate_server_invalid_state(self):
# create server.
diff --git a/tempest/api/compute/v3/admin/test_services.py b/tempest/api/compute/v3/admin/test_services.py
index 0a7c7f1..b367dad 100644
--- a/tempest/api/compute/v3/admin/test_services.py
+++ b/tempest/api/compute/v3/admin/test_services.py
@@ -14,7 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.api.compute.api_schema import services as schema
from tempest.api.compute import base
from tempest.test import attr
@@ -33,7 +32,7 @@
@attr(type='gate')
def test_list_services(self):
resp, services = self.client.list_services()
- self.validate_response(schema.list_services, resp, services)
+ self.assertEqual(200, resp.status)
self.assertNotEqual(0, len(services))
@attr(type='gate')
@@ -41,7 +40,7 @@
binary_name = 'nova-compute'
params = {'binary': binary_name}
resp, services = self.client.list_services(params)
- self.validate_response(schema.list_services, resp, services)
+ self.assertEqual(200, resp.status)
self.assertNotEqual(0, len(services))
for service in services:
self.assertEqual(binary_name, service['binary'])
@@ -49,14 +48,13 @@
@attr(type='gate')
def test_get_service_by_host_name(self):
resp, services = self.client.list_services()
- self.validate_response(schema.list_services, resp, services)
+ self.assertEqual(200, resp.status)
host_name = services[0]['host']
services_on_host = [service for service in services if
service['host'] == host_name]
params = {'host': host_name}
resp, services = self.client.list_services(params)
- self.validate_response(schema.list_services, resp, services)
# we could have a periodic job checkin between the 2 service
# lookups, so only compare binary lists.
@@ -70,13 +68,12 @@
@attr(type='gate')
def test_get_service_by_service_and_host_name(self):
resp, services = self.client.list_services()
- self.validate_response(schema.list_services, resp, services)
host_name = services[0]['host']
binary_name = services[0]['binary']
params = {'host': host_name, 'binary': binary_name}
resp, services = self.client.list_services(params)
- self.validate_response(schema.list_services, resp, services)
+ self.assertEqual(200, resp.status)
self.assertEqual(1, len(services))
self.assertEqual(host_name, services[0]['host'])
self.assertEqual(binary_name, services[0]['binary'])
diff --git a/tempest/api/compute/v3/certificates/__init__.py b/tempest/api/compute/v3/certificates/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/api/compute/v3/certificates/__init__.py
+++ /dev/null
diff --git a/tempest/api/compute/v3/certificates/test_certificates.py b/tempest/api/compute/v3/certificates/test_certificates.py
deleted file mode 100644
index ce025fc..0000000
--- a/tempest/api/compute/v3/certificates/test_certificates.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.compute import base
-from tempest.test import attr
-
-
-class CertificatesV3Test(base.BaseV3ComputeTest):
-
- @attr(type='gate')
- def test_create_and_get_root_certificate(self):
- # create certificates
- resp, create_body = self.certificates_client.create_certificate()
- self.assertEqual(201, resp.status)
- self.assertIn('data', create_body)
- self.assertIn('private_key', create_body)
- # get the root certificate
- resp, body = self.certificates_client.get_certificate('root')
- self.assertEqual(200, resp.status)
- self.assertIn('data', body)
- self.assertIn('private_key', body)
diff --git a/tempest/api/compute/v3/flavors/test_flavors.py b/tempest/api/compute/v3/flavors/test_flavors.py
deleted file mode 100644
index a0bbba6..0000000
--- a/tempest/api/compute/v3/flavors/test_flavors.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.compute import base
-from tempest import test
-
-
-class FlavorsV3Test(base.BaseV3ComputeTest):
-
- @classmethod
- def setUpClass(cls):
- super(FlavorsV3Test, cls).setUpClass()
- cls.client = cls.flavors_client
-
- @test.attr(type='smoke')
- def test_list_flavors(self):
- # List of all flavors should contain the expected flavor
- resp, flavors = self.client.list_flavors()
- resp, flavor = self.client.get_flavor_details(self.flavor_ref)
- flavor_min_detail = {'id': flavor['id'], 'links': flavor['links'],
- 'name': flavor['name']}
- self.assertIn(flavor_min_detail, flavors)
-
- @test.attr(type='smoke')
- def test_list_flavors_with_detail(self):
- # Detailed list of all flavors should contain the expected flavor
- resp, flavors = self.client.list_flavors_with_detail()
- resp, flavor = self.client.get_flavor_details(self.flavor_ref)
- self.assertIn(flavor, flavors)
-
- @test.attr(type='smoke')
- def test_get_flavor(self):
- # The expected flavor details should be returned
- resp, flavor = self.client.get_flavor_details(self.flavor_ref)
- self.assertEqual(self.flavor_ref, flavor['id'])
-
- @test.attr(type='gate')
- def test_list_flavors_limit_results(self):
- # Only the expected number of flavors should be returned
- params = {'limit': 1}
- resp, flavors = self.client.list_flavors(params)
- self.assertEqual(1, len(flavors))
-
- @test.attr(type='gate')
- def test_list_flavors_detailed_limit_results(self):
- # Only the expected number of flavors (detailed) should be returned
- params = {'limit': 1}
- resp, flavors = self.client.list_flavors_with_detail(params)
- self.assertEqual(1, len(flavors))
-
- @test.attr(type='gate')
- def test_list_flavors_using_marker(self):
- # The list of flavors should start from the provided marker
- resp, flavors = self.client.list_flavors()
- flavor_id = flavors[0]['id']
-
- params = {'marker': flavor_id}
- resp, flavors = self.client.list_flavors(params)
- self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]),
- 'The list of flavors did not start after the marker.')
-
- @test.attr(type='gate')
- def test_list_flavors_detailed_using_marker(self):
- # The list of flavors should start from the provided marker
- resp, flavors = self.client.list_flavors_with_detail()
- flavor_id = flavors[0]['id']
-
- params = {'marker': flavor_id}
- resp, flavors = self.client.list_flavors_with_detail(params)
- self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]),
- 'The list of flavors did not start after the marker.')
-
- @test.attr(type='gate')
- def test_list_flavors_detailed_filter_by_min_disk(self):
- # The detailed list of flavors should be filtered by disk space
- resp, flavors = self.client.list_flavors_with_detail()
- flavors = sorted(flavors, key=lambda k: k['disk'])
- flavor_id = flavors[0]['id']
-
- params = {'min_disk': flavors[0]['disk'] + 1}
- resp, flavors = self.client.list_flavors_with_detail(params)
- self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
-
- @test.attr(type='gate')
- def test_list_flavors_detailed_filter_by_min_ram(self):
- # The detailed list of flavors should be filtered by RAM
- resp, flavors = self.client.list_flavors_with_detail()
- flavors = sorted(flavors, key=lambda k: k['ram'])
- flavor_id = flavors[0]['id']
-
- params = {'min_ram': flavors[0]['ram'] + 1}
- resp, flavors = self.client.list_flavors_with_detail(params)
- self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
-
- @test.attr(type='gate')
- def test_list_flavors_filter_by_min_disk(self):
- # The list of flavors should be filtered by disk space
- resp, flavors = self.client.list_flavors_with_detail()
- flavors = sorted(flavors, key=lambda k: k['disk'])
- flavor_id = flavors[0]['id']
-
- params = {'min_disk': flavors[0]['disk'] + 1}
- resp, flavors = self.client.list_flavors(params)
- self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
-
- @test.attr(type='gate')
- def test_list_flavors_filter_by_min_ram(self):
- # The list of flavors should be filtered by RAM
- resp, flavors = self.client.list_flavors_with_detail()
- flavors = sorted(flavors, key=lambda k: k['ram'])
- flavor_id = flavors[0]['id']
-
- params = {'min_ram': flavors[0]['ram'] + 1}
- resp, flavors = self.client.list_flavors(params)
- self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
diff --git a/tempest/api/compute/v3/flavors/test_flavors_negative.py b/tempest/api/compute/v3/flavors/test_flavors_negative.py
index 346f6d6..657e2cd 100644
--- a/tempest/api/compute/v3/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/v3/flavors/test_flavors_negative.py
@@ -13,40 +13,27 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testscenarios
-
from tempest.api.compute import base
from tempest import test
-load_tests = testscenarios.load_tests_apply_scenarios
+load_tests = test.NegativeAutoTest.load_tests
+@test.SimpleNegativeAutoTest
class FlavorsListNegativeV3Test(base.BaseV3ComputeTest,
test.NegativeAutoTest):
_service = 'computev3'
_schema_file = 'compute/flavors/flavors_list_v3.json'
- scenarios = test.NegativeAutoTest.generate_scenario(_schema_file)
- @test.attr(type=['negative', 'gate'])
- def test_list_flavors_with_detail(self):
- self.execute(self._schema_file)
-
-
+@test.SimpleNegativeAutoTest
class FlavorDetailsNegativeV3Test(base.BaseV3ComputeTest,
test.NegativeAutoTest):
_service = 'computev3'
_schema_file = 'compute/flavors/flavor_details_v3.json'
- scenarios = test.NegativeAutoTest.generate_scenario(_schema_file)
-
@classmethod
def setUpClass(cls):
super(FlavorDetailsNegativeV3Test, cls).setUpClass()
cls.set_resource("flavor", cls.flavor_ref)
-
- @test.attr(type=['negative', 'gate'])
- def test_get_flavor_details(self):
- # flavor details are not returned for non-existent flavors
- self.execute(self._schema_file)
diff --git a/tempest/api/compute/v3/images/test_images_negative.py b/tempest/api/compute/v3/images/test_images_negative.py
index c38373f..0705bdc 100644
--- a/tempest/api/compute/v3/images/test_images_negative.py
+++ b/tempest/api/compute/v3/images/test_images_negative.py
@@ -35,7 +35,7 @@
resp, body = self.servers_client.create_image(server_id, name, meta)
image_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(self.client.delete_image, image_id)
- self.client.wait_for_image_status(image_id, 'ACTIVE')
+ self.client.wait_for_image_status(image_id, 'active')
return resp, body
@test.attr(type=['negative', 'gate'])
diff --git a/tempest/api/compute/v3/images/test_images_oneserver.py b/tempest/api/compute/v3/images/test_images_oneserver.py
index 48a885e..795437b 100644
--- a/tempest/api/compute/v3/images/test_images_oneserver.py
+++ b/tempest/api/compute/v3/images/test_images_oneserver.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
@@ -42,6 +41,11 @@
# Usually it means the server had a serious accident
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ """Terminate test instances created after a test is executed."""
+ self.server_check_teardown()
+ super(ImagesOneServerV3Test, self).tearDown()
+
@classmethod
def setUpClass(cls):
super(ImagesOneServerV3Test, cls).setUpClass()
@@ -61,8 +65,6 @@
resp, flavor = self.flavors_client.get_flavor_details(flavor_id)
return flavor['disk']
- @testtools.skipUnless(CONF.compute_feature_enabled.create_image,
- 'Environment unable to create images.')
@test.attr(type='smoke')
def test_create_delete_image(self):
@@ -73,26 +75,26 @@
name, meta)
self.assertEqual(202, resp.status)
image_id = data_utils.parse_image_id(resp['location'])
- self.client.wait_for_image_status(image_id, 'ACTIVE')
+ self.client.wait_for_image_status(image_id, 'active')
# Verify the image was created correctly
- resp, image = self.client.get_image(image_id)
+ resp, image = self.client.get_image_meta(image_id)
self.assertEqual(name, image['name'])
- self.assertEqual('test', image['metadata']['image_type'])
+ self.assertEqual('test', image['properties']['image_type'])
- resp, original_image = self.client.get_image(self.image_ref)
+ resp, original_image = self.client.get_image_meta(self.image_ref)
# Verify minRAM is the same as the original image
- self.assertEqual(image['minRam'], original_image['minRam'])
+ self.assertEqual(image['min_ram'], original_image['min_ram'])
# Verify minDisk is the same as the original image or the flavor size
flavor_disk_size = self._get_default_flavor_disk_size(self.flavor_ref)
- self.assertIn(str(image['minDisk']),
- (str(original_image['minDisk']), str(flavor_disk_size)))
+ self.assertIn(str(image['min_disk']),
+ (str(original_image['min_disk']), str(flavor_disk_size)))
# Verify the image was deleted correctly
resp, body = self.client.delete_image(image_id)
- self.assertEqual('204', resp['status'])
+ self.assertEqual('200', resp['status'])
self.client.wait_for_resource_deletion(image_id)
@test.attr(type=['gate'])
diff --git a/tempest/api/compute/v3/images/test_images_oneserver_negative.py b/tempest/api/compute/v3/images/test_images_oneserver_negative.py
index 7679eee..eed81c6 100644
--- a/tempest/api/compute/v3/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/v3/images/test_images_oneserver_negative.py
@@ -33,6 +33,7 @@
for image_id in self.image_ids:
self.client.delete_image(image_id)
self.image_ids.remove(image_id)
+ self.server_check_teardown()
super(ImagesOneServerNegativeV3Test, self).tearDown()
def setUp(self):
diff --git a/tempest/api/compute/v3/keypairs/test_keypairs.py b/tempest/api/compute/v3/keypairs/test_keypairs.py
deleted file mode 100644
index 668a295..0000000
--- a/tempest/api/compute/v3/keypairs/test_keypairs.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.compute import base
-from tempest.common.utils import data_utils
-from tempest import test
-
-
-class KeyPairsV3Test(base.BaseV3ComputeTest):
-
- @classmethod
- def setUpClass(cls):
- super(KeyPairsV3Test, cls).setUpClass()
- cls.client = cls.keypairs_client
-
- def _delete_keypair(self, keypair_name):
- resp, _ = self.client.delete_keypair(keypair_name)
- self.assertEqual(204, resp.status)
-
- def _create_keypair(self, keypair_name, pub_key=None):
- resp, body = self.client.create_keypair(keypair_name, pub_key)
- self.addCleanup(self._delete_keypair, keypair_name)
- return resp, body
-
- @test.attr(type='gate')
- def test_keypairs_create_list_delete(self):
- # Keypairs created should be available in the response list
- # Create 3 keypairs
- key_list = list()
- for i in range(3):
- k_name = data_utils.rand_name('keypair-')
- resp, keypair = self._create_keypair(k_name)
- # Need to pop these keys so that our compare doesn't fail later,
- # as the keypair dicts from list API doesn't have them.
- keypair.pop('private_key')
- keypair.pop('user_id')
- self.assertEqual(201, resp.status)
- key_list.append(keypair)
- # Fetch all keypairs and verify the list
- # has all created keypairs
- resp, fetched_list = self.client.list_keypairs()
- self.assertEqual(200, resp.status)
- # We need to remove the extra 'keypair' element in the
- # returned dict. See comment in keypairs_client.list_keypairs()
- new_list = list()
- for keypair in fetched_list:
- new_list.append(keypair['keypair'])
- fetched_list = new_list
- # Now check if all the created keypairs are in the fetched list
- missing_kps = [kp for kp in key_list if kp not in fetched_list]
- self.assertFalse(missing_kps,
- "Failed to find keypairs %s in fetched list"
- % ', '.join(m_key['name'] for m_key in missing_kps))
-
- @test.attr(type='gate')
- def test_keypair_create_delete(self):
- # Keypair should be created, verified and deleted
- k_name = data_utils.rand_name('keypair-')
- resp, keypair = self._create_keypair(k_name)
- self.assertEqual(201, resp.status)
- private_key = keypair['private_key']
- key_name = keypair['name']
- self.assertEqual(key_name, k_name,
- "The created keypair name is not equal "
- "to the requested name")
- self.assertTrue(private_key is not None,
- "Field private_key is empty or not found.")
-
- @test.attr(type='gate')
- def test_get_keypair_detail(self):
- # Keypair should be created, Got details by name and deleted
- k_name = data_utils.rand_name('keypair-')
- resp, keypair = self._create_keypair(k_name)
- resp, keypair_detail = self.client.get_keypair(k_name)
- self.assertEqual(200, resp.status)
- self.assertIn('name', keypair_detail)
- self.assertIn('public_key', keypair_detail)
- self.assertEqual(keypair_detail['name'], k_name,
- "The created keypair name is not equal "
- "to requested name")
- public_key = keypair_detail['public_key']
- self.assertTrue(public_key is not None,
- "Field public_key is empty or not found.")
-
- @test.attr(type='gate')
- def test_keypair_create_with_pub_key(self):
- # Keypair should be created with a given public key
- k_name = data_utils.rand_name('keypair-')
- pub_key = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCs"
- "Ne3/1ILNCqFyfYWDeTKLD6jEXC2OQHLmietMWW+/vd"
- "aZq7KZEwO0jhglaFjU1mpqq4Gz5RX156sCTNM9vRbw"
- "KAxfsdF9laBYVsex3m3Wmui3uYrKyumsoJn2g9GNnG1P"
- "I1mrVjZ61i0GY3khna+wzlTpCCmy5HNlrmbj3XLqBUpip"
- "TOXmsnr4sChzC53KCd8LXuwc1i/CZPvF+3XipvAgFSE53pCt"
- "LOeB1kYMOBaiUPLQTWXR3JpckqFIQwhIH0zoHlJvZE8hh90"
- "XcPojYN56tI0OlrGqojbediJYD0rUsJu4weZpbn8vilb3JuDY+jws"
- "snSA8wzBx3A/8y9Pp1B nova@ubuntu")
- resp, keypair = self._create_keypair(k_name, pub_key)
- self.assertEqual(201, resp.status)
- self.assertFalse('private_key' in keypair,
- "Field private_key is not empty!")
- key_name = keypair['name']
- self.assertEqual(key_name, k_name,
- "The created keypair name is not equal "
- "to the requested name!")
diff --git a/tempest/api/compute/v3/servers/test_attach_interfaces.py b/tempest/api/compute/v3/servers/test_attach_interfaces.py
index e1c69d9..c848f8c 100644
--- a/tempest/api/compute/v3/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/v3/servers/test_attach_interfaces.py
@@ -127,7 +127,7 @@
_ifs = self._test_delete_interface(server, ifs)
self.assertEqual(len(ifs) - 1, len(_ifs))
- @attr(type='gate')
+ @attr(type='smoke')
def test_add_remove_fixed_ip(self):
# Add and Remove the fixed IP to server.
server, ifs = self._create_server_get_interfaces()
diff --git a/tempest/api/compute/v3/servers/test_attach_volume.py b/tempest/api/compute/v3/servers/test_attach_volume.py
index 8577aab..e994c7f 100644
--- a/tempest/api/compute/v3/servers/test_attach_volume.py
+++ b/tempest/api/compute/v3/servers/test_attach_volume.py
@@ -24,7 +24,6 @@
class AttachVolumeV3Test(base.BaseV3ComputeTest):
- run_ssh = CONF.compute.run_ssh
def __init__(self, *args, **kwargs):
super(AttachVolumeV3Test, self).__init__(*args, **kwargs)
@@ -34,6 +33,7 @@
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(AttachVolumeV3Test, cls).setUpClass()
cls.device = CONF.compute.volume_device_name
if not CONF.service_available.cinder:
@@ -76,7 +76,7 @@
self.attached = True
self.addCleanup(self._detach, server['id'], volume['id'])
- @testtools.skipIf(not run_ssh, 'SSH required for this test')
+ @testtools.skipUnless(CONF.compute.run_ssh, 'SSH required for this test')
@test.attr(type='gate')
def test_attach_detach_volume(self):
# Stop and Start a server with an attached volume, ensuring that
diff --git a/tempest/api/compute/v3/servers/test_availability_zone.py b/tempest/api/compute/v3/servers/test_availability_zone.py
deleted file mode 100644
index 5a1e07e..0000000
--- a/tempest/api/compute/v3/servers/test_availability_zone.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2014 NEC Corporation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.compute import base
-from tempest import test
-
-
-class AZV3Test(base.BaseV3ComputeTest):
-
- """
- Tests Availability Zone API List
- """
-
- @classmethod
- def setUpClass(cls):
- super(AZV3Test, cls).setUpClass()
- cls.client = cls.availability_zone_client
-
- @test.attr(type='gate')
- def test_get_availability_zone_list_with_non_admin_user(self):
- # List of availability zone with non-administrator user
- resp, availability_zone = self.client.get_availability_zone_list()
- self.assertEqual(200, resp.status)
- self.assertTrue(len(availability_zone) > 0)
diff --git a/tempest/api/compute/v3/servers/test_create_server.py b/tempest/api/compute/v3/servers/test_create_server.py
index a212ca5..c59fe91 100644
--- a/tempest/api/compute/v3/servers/test_create_server.py
+++ b/tempest/api/compute/v3/servers/test_create_server.py
@@ -28,11 +28,11 @@
class ServersV3Test(base.BaseV3ComputeTest):
- run_ssh = CONF.compute.run_ssh
disk_config = 'AUTO'
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(ServersV3Test, cls).setUpClass()
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
@@ -54,13 +54,6 @@
resp, cls.server = cls.client.get_server(cls.server_initial['id'])
@test.attr(type='smoke')
- def test_create_server_response(self):
- # Check that the required fields are returned with values
- self.assertEqual(202, self.resp.status)
- self.assertTrue(self.server_initial['id'] is not None)
- self.assertTrue(self.server_initial['admin_password'] is not None)
-
- @test.attr(type='smoke')
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4,
@@ -90,15 +83,8 @@
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
- @testtools.skipIf(not run_ssh, 'Instance validation tests are disabled.')
- @test.attr(type='gate')
- def test_can_log_into_created_server(self):
- # Check that the user can authenticate with the generated password
- linux_client = remote_client.RemoteClient(self.server,
- self.ssh_user, self.password)
- self.assertTrue(linux_client.can_authenticate())
-
- @testtools.skipIf(not run_ssh, 'Instance validation tests are disabled.')
+ @testtools.skipUnless(CONF.compute.run_ssh,
+ 'Instance validation tests are disabled.')
@test.attr(type='gate')
def test_verify_created_server_vcpus(self):
# Verify that the number of vcpus reported by the instance matches
@@ -108,7 +94,8 @@
self.ssh_user, self.password)
self.assertEqual(flavor['vcpus'], linux_client.get_number_of_vcpus())
- @testtools.skipIf(not run_ssh, 'Instance validation tests are disabled.')
+ @testtools.skipUnless(CONF.compute.run_ssh,
+ 'Instance validation tests are disabled.')
@test.attr(type='gate')
def test_host_name_is_same_as_server_name(self):
# Verify the instance host name is the same as the server name
@@ -118,38 +105,22 @@
class ServersWithSpecificFlavorV3Test(base.BaseV3ComputeAdminTest):
- run_ssh = CONF.compute.run_ssh
disk_config = 'AUTO'
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(ServersWithSpecificFlavorV3Test, cls).setUpClass()
- cls.meta = {'hello': 'world'}
- cls.accessIPv4 = '1.1.1.1'
- cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
- cls.name = data_utils.rand_name('server')
- file_contents = 'This is a test file.'
- personality = [{'path': '/test.txt',
- 'contents': base64.b64encode(file_contents)}]
cls.client = cls.servers_client
cls.flavor_client = cls.flavors_admin_client
- cli_resp = cls.create_test_server(name=cls.name,
- meta=cls.meta,
- access_ip_v4=cls.accessIPv4,
- access_ip_v6=cls.accessIPv6,
- personality=personality,
- disk_config=cls.disk_config)
- cls.resp, cls.server_initial = cli_resp
- cls.password = cls.server_initial['admin_password']
- cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
- resp, cls.server = cls.client.get_server(cls.server_initial['id'])
- @testtools.skipIf(not run_ssh, 'Instance validation tests are disabled.')
+ @testtools.skipUnless(CONF.compute.run_ssh,
+ 'Instance validation tests are disabled.')
@test.attr(type='gate')
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
- def create_flavor_with_extra_specs(self):
+ def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = 512
@@ -161,13 +132,13 @@
create_flavor(flavor_with_eph_disk_name,
ram, vcpus, disk,
flavor_with_eph_disk_id,
- ephemeral=1, swap=1024, rxtx=1))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
- self.assertEqual(200, resp.status)
+ ephemeral=1, rxtx=1))
+ self.addCleanup(flavor_clean_up, flavor['id'])
+ self.assertEqual(201, resp.status)
return flavor['id']
- def create_flavor_without_extra_specs(self):
+ def create_flavor_without_extra_specs():
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
@@ -180,23 +151,22 @@
create_flavor(flavor_no_eph_disk_name,
ram, vcpus, disk,
flavor_no_eph_disk_id))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
- self.assertEqual(200, resp.status)
+ self.addCleanup(flavor_clean_up, flavor['id'])
+ self.assertEqual(201, resp.status)
return flavor['id']
- def flavor_clean_up(self, flavor_id):
+ def flavor_clean_up(flavor_id):
resp, body = self.flavor_client.delete_flavor(flavor_id)
- self.assertEqual(resp.status, 202)
+ self.assertEqual(resp.status, 204)
self.flavor_client.wait_for_resource_deletion(flavor_id)
- flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
- flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()
+ flavor_with_eph_disk_id = create_flavor_with_extra_specs()
+ flavor_no_eph_disk_id = create_flavor_without_extra_specs()
admin_pass = self.image_ssh_password
- resp, server_no_eph_disk = (self.
- create_test_server(
+ resp, server_no_eph_disk = (self.create_test_server(
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_no_eph_disk_id))
@@ -205,13 +175,17 @@
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id))
# Get partition number of server without extra specs.
+ _, server_no_eph_disk = self.client.get_server(
+ server_no_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_no_eph_disk,
- self.ssh_user, self.password)
- partition_num = len(linux_client.get_partitions())
-
+ self.ssh_user, admin_pass)
+ partition_num = len(linux_client.get_partitions().split('\n'))
+ _, server_with_eph_disk = self.client.get_server(
+ server_with_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_with_eph_disk,
- self.ssh_user, self.password)
- self.assertEqual(partition_num + 1, linux_client.get_partitions())
+ self.ssh_user, admin_pass)
+ partition_num_emph = len(linux_client.get_partitions().split('\n'))
+ self.assertEqual(partition_num + 1, partition_num_emph)
class ServersV3TestManualDisk(ServersV3Test):
diff --git a/tempest/api/compute/v3/servers/test_delete_server.py b/tempest/api/compute/v3/servers/test_delete_server.py
index d694a33..add69ab 100644
--- a/tempest/api/compute/v3/servers/test_delete_server.py
+++ b/tempest/api/compute/v3/servers/test_delete_server.py
@@ -56,6 +56,8 @@
self.assertEqual('204', resp['status'])
self.client.wait_for_server_termination(server['id'])
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
@test.attr(type='gate')
def test_delete_server_while_in_pause_state(self):
# Delete a server while it's VM state is Pause
@@ -99,6 +101,25 @@
self.assertEqual('204', resp['status'])
self.client.wait_for_server_termination(server['id'])
+ @test.attr(type='gate')
+ def test_delete_server_while_in_attached_volume(self):
+ # Delete a server while a volume is attached to it
+ device = '/dev/%s' % CONF.compute.volume_device_name
+ resp, server = self.create_test_server(wait_until='ACTIVE')
+
+ resp, volume = self.volumes_client.create_volume(1)
+ self.addCleanup(self.volumes_client.delete_volume, volume['id'])
+ self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+ resp, body = self.client.attach_volume(server['id'],
+ volume['id'],
+ device=device)
+ self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+
+ resp, _ = self.client.delete_server(server['id'])
+ self.assertEqual('204', resp['status'])
+ self.client.wait_for_server_termination(server['id'])
+ self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+
class DeleteServersAdminV3Test(base.BaseV3ComputeAdminTest):
# NOTE: Server creations of each test class should be under 10
diff --git a/tempest/api/compute/v3/servers/test_instance_actions.py b/tempest/api/compute/v3/servers/test_instance_actions.py
index 7d25100..399541b 100644
--- a/tempest/api/compute/v3/servers/test_instance_actions.py
+++ b/tempest/api/compute/v3/servers/test_instance_actions.py
@@ -27,25 +27,27 @@
cls.resp = resp
cls.server_id = server['id']
+ @test.skip_because(bug="1206032")
@test.attr(type='gate')
- def test_list_instance_actions(self):
+ def test_list_server_actions(self):
# List actions of the provided server
resp, body = self.client.reboot(self.server_id, 'HARD')
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
- resp, body = self.client.list_instance_actions(self.server_id)
+ resp, body = self.client.list_server_actions(self.server_id)
self.assertEqual(200, resp.status)
self.assertTrue(len(body) == 2, str(body))
self.assertTrue(any([i for i in body if i['action'] == 'create']))
self.assertTrue(any([i for i in body if i['action'] == 'reboot']))
+ @test.skip_because(bug="1206032")
@test.attr(type='gate')
@test.skip_because(bug="1281915")
- def test_get_instance_action(self):
+ def test_get_server_action(self):
# Get the action details of the provided server
request_id = self.resp['x-compute-request-id']
- resp, body = self.client.get_instance_action(self.server_id,
- request_id)
+ resp, body = self.client.get_server_action(self.server_id,
+ request_id)
self.assertEqual(200, resp.status)
- self.assertEqual(self.server_id, body['instance_uuid'])
+ self.assertEqual(self.server_id, body['server_uuid'])
self.assertEqual('create', body['action'])
diff --git a/tempest/api/compute/v3/servers/test_instance_actions_negative.py b/tempest/api/compute/v3/servers/test_instance_actions_negative.py
index b0a7050..0b2c6f9 100644
--- a/tempest/api/compute/v3/servers/test_instance_actions_negative.py
+++ b/tempest/api/compute/v3/servers/test_instance_actions_negative.py
@@ -29,15 +29,15 @@
cls.server_id = server['id']
@test.attr(type=['negative', 'gate'])
- def test_list_instance_actions_invalid_server(self):
+ def test_list_server_actions_invalid_server(self):
# List actions of the invalid server id
invalid_server_id = data_utils.rand_uuid()
self.assertRaises(exceptions.NotFound,
- self.client.list_instance_actions, invalid_server_id)
+ self.client.list_server_actions, invalid_server_id)
@test.attr(type=['negative', 'gate'])
- def test_get_instance_action_invalid_request(self):
+ def test_get_server_action_invalid_request(self):
# Get the action details of the provided server with invalid request
invalid_request_id = 'req-' + data_utils.rand_uuid()
- self.assertRaises(exceptions.NotFound, self.client.get_instance_action,
+ self.assertRaises(exceptions.NotFound, self.client.get_server_action,
self.server_id, invalid_request_id)
diff --git a/tempest/api/compute/v3/servers/test_list_server_filters.py b/tempest/api/compute/v3/servers/test_list_server_filters.py
index ec31e8e..778b033 100644
--- a/tempest/api/compute/v3/servers/test_list_server_filters.py
+++ b/tempest/api/compute/v3/servers/test_list_server_filters.py
@@ -26,7 +26,9 @@
class ListServerFiltersV3Test(base.BaseV3ComputeTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
+ cls.set_network_resources(network=True, subnet=True, dhcp=True)
super(ListServerFiltersV3Test, cls).setUpClass()
cls.client = cls.servers_client
@@ -69,7 +71,12 @@
flavor=cls.flavor_ref_alt,
wait_until='ACTIVE')
- cls.fixed_network_name = CONF.compute.fixed_network_name
+ if (CONF.service_available.neutron and
+ CONF.compute.allow_tenant_isolation):
+ network = cls.isolated_creds.get_primary_network()
+ cls.fixed_network_name = network['name']
+ else:
+ cls.fixed_network_name = CONF.compute.fixed_network_name
@utils.skip_unless_attr('multiple_images', 'Only one image found')
@test.attr(type='gate')
@@ -225,7 +232,6 @@
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
- @test.skip_because(bug="1170718")
@test.attr(type='gate')
def test_list_servers_filtered_by_ip(self):
# Filter servers by ip
diff --git a/tempest/api/compute/v3/servers/test_list_servers_negative.py b/tempest/api/compute/v3/servers/test_list_servers_negative.py
index 92f44fe..9cbc4e0 100644
--- a/tempest/api/compute/v3/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_list_servers_negative.py
@@ -19,13 +19,14 @@
from tempest.api.compute import base
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
class ListServersNegativeV3Test(base.BaseV3ComputeTest):
force_tenant_isolation = True
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ListServersNegativeV3Test, cls).setUpClass()
cls.client = cls.servers_client
@@ -51,7 +52,7 @@
ignore_error=True)
cls.deleted_fixtures.append(srv)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_servers_with_a_deleted_server(self):
# Verify deleted servers do not show by default in list servers
# List servers and verify server not returned
@@ -63,7 +64,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual([], actual)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_image(self):
# Listing servers for a non existing image returns empty list
non_existing_image = '1234abcd-zzz0-aaa9-ppp3-0987654abcde'
@@ -72,7 +73,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_flavor(self):
# Listing servers by non existing flavor returns empty list
non_existing_flavor = 1234
@@ -81,7 +82,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_server_name(self):
# Listing servers for a non existent server name returns empty list
non_existing_name = 'junk_server_1234'
@@ -90,7 +91,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_servers_status_non_existing(self):
# Return an empty list when invalid status is specified
non_existing_status = 'BALONEY'
@@ -99,33 +100,33 @@
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_list_servers_by_limits(self):
# List servers by specifying limits
resp, body = self.client.list_servers({'limit': 1})
self.assertEqual('200', resp['status'])
self.assertEqual(1, len([x for x in body['servers'] if 'id' in x]))
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_servers_by_limits_greater_than_actual_count(self):
# List servers by specifying a greater value for limit
resp, body = self.client.list_servers({'limit': 100})
self.assertEqual('200', resp['status'])
self.assertEqual(len(self.existing_fixtures), len(body['servers']))
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_servers_by_limits_pass_string(self):
# Return an error if a string value is passed for limit
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'limit': 'testing'})
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_servers_by_limits_pass_negative_value(self):
# Return an error if a negative value for limit is passed
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'limit': -1})
- @attr(type='gate')
+ @test.attr(type='gate')
def test_list_servers_by_changes_since(self):
# Servers are listed by specifying changes-since date
changes_since = {'changes_since': self.start_time.isoformat()}
@@ -138,13 +139,13 @@
"Number of servers %d is wrong in %s" %
(num_expected, body['servers']))
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_servers_by_changes_since_invalid_date(self):
# Return an error when invalid date format is passed
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'changes_since': '2011/01/01'})
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_servers_by_changes_since_future_date(self):
# Return an empty list when a date in the future is passed
changes_since = {'changes_since': '2051-01-01T12:34:00Z'}
@@ -152,7 +153,7 @@
self.assertEqual('200', resp['status'])
self.assertEqual(0, len(body['servers']))
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_servers_detail_server_is_deleted(self):
# Server details are not listed for a deleted server
deleted_ids = [s['id'] for s in self.deleted_fixtures]
diff --git a/tempest/api/compute/v3/servers/test_server_actions.py b/tempest/api/compute/v3/servers/test_server_actions.py
index 555d028..721fe42 100644
--- a/tempest/api/compute/v3/servers/test_server_actions.py
+++ b/tempest/api/compute/v3/servers/test_server_actions.py
@@ -13,9 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import time
-
import testtools
+import urlparse
from tempest.api.compute import base
from tempest.common.utils import data_utils
@@ -28,7 +27,6 @@
class ServerActionsV3Test(base.BaseV3ComputeTest):
- resize_available = CONF.compute_feature_enabled.resize
run_ssh = CONF.compute.run_ssh
def setUp(self):
@@ -42,8 +40,15 @@
# Rebuild server if something happened to it during a test
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ _, server = self.client.get_server(self.server_id)
+ self.assertEqual(self.image_ref, server['image']['id'])
+ self.server_check_teardown()
+ super(ServerActionsV3Test, self).tearDown()
+
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(ServerActionsV3Test, cls).setUpClass()
cls.client = cls.servers_client
cls.server_id = cls.rebuild_server(None)
@@ -119,7 +124,6 @@
name=new_name,
metadata=meta,
admin_password=password)
- self.addCleanup(self.client.rebuild, self.server_id, self.image_ref)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -140,6 +144,9 @@
password)
linux_client.validate_authentication()
+ if self.image_ref_alt != self.image_ref:
+ self.client.rebuild(self.server_id, self.image_ref)
+
@test.attr(type='gate')
def test_rebuild_server_in_stop_state(self):
# The server in stop state should be rebuilt using the provided
@@ -151,11 +158,7 @@
resp, server = self.client.stop(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
- self.addCleanup(self.client.start, self.server_id)
resp, rebuilt_server = self.client.rebuild(self.server_id, new_image)
- self.addCleanup(self.client.wait_for_server_status, self.server_id,
- 'SHUTOFF')
- self.addCleanup(self.client.rebuild, self.server_id, old_image)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -169,6 +172,12 @@
rebuilt_image_id = server['image']['id']
self.assertEqual(new_image, rebuilt_image_id)
+ # Restore to the original image (The tearDown will test it again)
+ if self.image_ref_alt != self.image_ref:
+ self.client.rebuild(self.server_id, old_image)
+ self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
+ self.client.start(self.server_id)
+
def _detect_server_image_flavor(self, server_id):
# Detects the current server image flavor ref.
resp, server = self.client.get_server(server_id)
@@ -177,7 +186,8 @@
if current_flavor == self.flavor_ref else self.flavor_ref
return current_flavor, new_flavor_ref
- @testtools.skipIf(not resize_available, 'Resize not available.')
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
@test.attr(type='smoke')
def test_resize_server_confirm(self):
# The server's RAM and disk space should be modified to that of
@@ -196,7 +206,8 @@
resp, server = self.client.get_server(self.server_id)
self.assertEqual(new_flavor_ref, server['flavor']['id'])
- @testtools.skipIf(not resize_available, 'Resize not available.')
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
@test.attr(type='gate')
def test_resize_server_revert(self):
# The server's RAM and disk space should return to its original
@@ -212,18 +223,8 @@
self.client.revert_resize(self.server_id)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
- # Need to poll for the id change until lp#924371 is fixed
resp, server = self.client.get_server(self.server_id)
- start = int(time.time())
-
- while server['flavor']['id'] != previous_flavor_ref:
- time.sleep(self.build_interval)
- resp, server = self.client.get_server(self.server_id)
-
- if int(time.time()) - start >= self.build_timeout:
- message = 'Server %s failed to revert resize within the \
- required time (%s s).' % (self.server_id, self.build_timeout)
- raise exceptions.TimeoutException(message)
+ self.assertEqual(previous_flavor_ref, server['flavor']['id'])
@test.attr(type='gate')
def test_create_backup(self):
@@ -340,6 +341,8 @@
self.wait_for(self._get_output)
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
@test.attr(type='gate')
def test_pause_unpause_server(self):
resp, server = self.client.pause_server(self.server_id)
@@ -349,6 +352,8 @@
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
@test.attr(type='gate')
def test_suspend_resume_server(self):
resp, server = self.client.suspend_server(self.server_id)
@@ -416,6 +421,12 @@
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
+ def _validate_url(self, url):
+ valid_scheme = ['http', 'https']
+ parsed_url = urlparse.urlparse(url)
+ self.assertNotEqual('None', parsed_url.hostname)
+ self.assertIn(parsed_url.scheme, valid_scheme)
+
@testtools.skipUnless(CONF.compute_feature_enabled.vnc_console,
'VNC Console feature is disabled')
@test.attr(type='gate')
@@ -425,6 +436,35 @@
for console_type in console_types:
resp, body = self.servers_client.get_vnc_console(self.server_id,
console_type)
- self.assertEqual(200, resp.status)
+ self.assertEqual(
+ 200, resp.status,
+ "Failed to get Console Type: %s" % (console_type))
self.assertEqual(console_type, body['type'])
self.assertNotEqual('', body['url'])
+ self._validate_url(body['url'])
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.spice_console,
+ 'Spice Console feature is disabled.')
+ @test.attr(type='gate')
+ def test_get_spice_console(self):
+ # Get the Spice console of type "spice-html5"
+ console_type = 'spice-html5'
+ resp, body = self.servers_client.get_spice_console(self.server_id,
+ console_type)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(console_type, body['type'])
+ self.assertNotEqual('', body['url'])
+ self._validate_url(body['url'])
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.rdp_console,
+ 'RDP Console feature is disabled.')
+ @test.attr(type='gate')
+ def test_get_rdp_console(self):
+ # Get the RDP console of type "rdp-html5"
+ console_type = 'rdp-html5'
+ resp, body = self.servers_client.get_rdp_console(self.server_id,
+ console_type)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(console_type, body['type'])
+ self.assertNotEqual('', body['url'])
+ self._validate_url(body['url'])
diff --git a/tempest/api/compute/v3/servers/test_server_metadata.py b/tempest/api/compute/v3/servers/test_server_metadata.py
index 0e4ef07..298cd3c 100644
--- a/tempest/api/compute/v3/servers/test_server_metadata.py
+++ b/tempest/api/compute/v3/servers/test_server_metadata.py
@@ -24,10 +24,7 @@
super(ServerMetadataV3Test, cls).setUpClass()
cls.client = cls.servers_client
cls.quotas = cls.quotas_client
- cls.admin_client = cls._get_identity_admin_client()
- resp, tenants = cls.admin_client.list_tenants()
- cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
- cls.client.tenant_name][0]
+ cls.tenant_id = cls.client.tenant_id
resp, server = cls.create_test_server(meta={}, wait_until='ACTIVE')
cls.server_id = server['id']
diff --git a/tempest/api/compute/v3/servers/test_server_metadata_negative.py b/tempest/api/compute/v3/servers/test_server_metadata_negative.py
index ec2bc8c..f746be3 100644
--- a/tempest/api/compute/v3/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/v3/servers/test_server_metadata_negative.py
@@ -25,10 +25,7 @@
super(ServerMetadataV3NegativeTest, cls).setUpClass()
cls.client = cls.servers_client
cls.quotas = cls.quotas_client
- cls.admin_client = cls._get_identity_admin_client()
- resp, tenants = cls.admin_client.list_tenants()
- cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
- cls.client.tenant_name][0]
+ cls.tenant_id = cls.client.tenant_id
resp, server = cls.create_test_server(meta={}, wait_until='ACTIVE')
cls.server_id = server['id']
diff --git a/tempest/api/compute/v3/servers/test_server_rescue_negative.py b/tempest/api/compute/v3/servers/test_server_rescue_negative.py
index 6e09376..eb6bcdd 100644
--- a/tempest/api/compute/v3/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/v3/servers/test_server_rescue_negative.py
@@ -12,16 +12,21 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
+from tempest import config
from tempest import exceptions
from tempest import test
+CONF = config.CONF
+
class ServerRescueNegativeV3Test(base.BaseV3ComputeTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ServerRescueNegativeV3Test, cls).setUpClass()
cls.device = 'vdf'
@@ -43,6 +48,7 @@
cls.servers_client.rescue_server(
cls.rescue_id, admin_password=cls.rescue_password)
cls.servers_client.wait_for_server_status(cls.rescue_id, 'RESCUE')
+ cls.servers_client.wait_for_server_status(cls.server_id, 'ACTIVE')
@classmethod
def tearDownClass(cls):
@@ -64,6 +70,8 @@
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_rescue_paused_instance(self):
# Rescue a paused server
diff --git a/tempest/api/compute/v3/servers/test_servers_negative.py b/tempest/api/compute/v3/servers/test_servers_negative.py
index cb5e93d..827c4c4 100644
--- a/tempest/api/compute/v3/servers/test_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_servers_negative.py
@@ -16,6 +16,8 @@
import base64
import sys
+import testtools
+
from tempest.api.compute import base
from tempest import clients
from tempest.common.utils import data_utils
@@ -35,6 +37,10 @@
except Exception:
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ self.server_check_teardown()
+ super(ServersNegativeV3Test, self).tearDown()
+
@classmethod
def setUpClass(cls):
super(ServersNegativeV3Test, cls).setUpClass()
@@ -113,16 +119,17 @@
self.assertRaises(exceptions.NotFound, self.client.reboot,
nonexistent_server, 'SOFT')
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
@test.attr(type=['negative', 'gate'])
def test_pause_paused_server(self):
# Pause a paused server.
self.client.pause_server(self.server_id)
- self.addCleanup(self.client.unpause_server,
- self.server_id)
self.client.wait_for_server_status(self.server_id, 'PAUSED')
self.assertRaises(exceptions.Conflict,
self.client.pause_server,
self.server_id)
+ self.client.unpause_server(self.server_id)
@test.attr(type=['negative', 'gate'])
def test_rebuild_reboot_deleted_server(self):
@@ -311,6 +318,8 @@
self.client.unpause_server,
self.server_id)
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_suspend_non_existent_server(self):
# suspend a non existent server
@@ -318,17 +327,18 @@
self.assertRaises(exceptions.NotFound, self.client.suspend_server,
nonexistent_server)
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_suspend_server_invalid_state(self):
# suspend a suspended server.
resp, _ = self.client.suspend_server(self.server_id)
- self.addCleanup(self.client.resume_server,
- self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
self.assertRaises(exceptions.Conflict,
self.client.suspend_server,
self.server_id)
+ self.client.resume_server(self.server_id)
@test.attr(type=['negative', 'gate'])
def test_resume_non_existent_server(self):
@@ -337,6 +347,8 @@
self.assertRaises(exceptions.NotFound, self.client.resume_server,
nonexistent_server)
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
@test.attr(type=['negative', 'gate'])
def test_resume_server_invalid_state(self):
# resume an active server.
@@ -394,7 +406,6 @@
# shelve a shelved server.
resp, server = self.client.shelve_server(self.server_id)
self.assertEqual(202, resp.status)
- self.addCleanup(self.client.unshelve_server, self.server_id)
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
@@ -415,6 +426,8 @@
self.client.shelve_server,
self.server_id)
+ self.client.unshelve_server(self.server_id)
+
@test.attr(type=['negative', 'gate'])
def test_unshelve_non_existent_server(self):
# unshelve a non existent server
diff --git a/tempest/api/compute/v3/test_quotas.py b/tempest/api/compute/v3/test_quotas.py
index b53d9be..62a7556 100644
--- a/tempest/api/compute/v3/test_quotas.py
+++ b/tempest/api/compute/v3/test_quotas.py
@@ -23,10 +23,8 @@
def setUpClass(cls):
super(QuotasV3Test, cls).setUpClass()
cls.client = cls.quotas_client
- cls.admin_client = cls._get_identity_admin_client()
- resp, tenants = cls.admin_client.list_tenants()
- cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
- cls.client.tenant_name][0]
+ cls.tenant_id = cls.client.tenant_id
+ cls.user_id = cls.client.user_id
cls.default_quota_set = set(('metadata_items',
'ram', 'floating_ips',
'fixed_ips', 'key_pairs',
@@ -43,6 +41,14 @@
sorted(quota_set.keys()))
self.assertEqual(quota_set['id'], self.tenant_id)
+ # get the quota set using user id
+ resp, quota_set = self.client.get_quota_set(self.tenant_id,
+ self.user_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(sorted(expected_quota_set),
+ sorted(quota_set.keys()))
+ self.assertEqual(quota_set['id'], self.tenant_id)
+
@test.attr(type='smoke')
def test_get_default_quotas(self):
# User can get the default quota set for it's tenant
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 3c5feed..4585912 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -24,7 +24,6 @@
class AttachVolumeTestJSON(base.BaseV2ComputeTest):
- run_ssh = CONF.compute.run_ssh
def __init__(self, *args, **kwargs):
super(AttachVolumeTestJSON, self).__init__(*args, **kwargs)
@@ -34,6 +33,7 @@
@classmethod
def setUpClass(cls):
+ cls.prepare_instance_network()
super(AttachVolumeTestJSON, cls).setUpClass()
cls.device = CONF.compute.volume_device_name
if not CONF.service_available.cinder:
@@ -76,7 +76,7 @@
self.attached = True
self.addCleanup(self._detach, server['id'], volume['id'])
- @testtools.skipIf(not run_ssh, 'SSH required for this test')
+ @testtools.skipUnless(CONF.compute.run_ssh, 'SSH required for this test')
@test.attr(type='gate')
def test_attach_detach_volume(self):
# Stop and Start a server with an attached volume, ensuring that
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index 5b272ef..74444d7 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -14,6 +14,7 @@
# limitations under the License.
from tempest import config
+from tempest import exceptions
import tempest.test
@@ -26,46 +27,39 @@
@classmethod
def setUpClass(cls):
super(BaseDataProcessingTest, cls).setUpClass()
+ if not CONF.service_available.sahara:
+ raise cls.skipException('Sahara support is required')
+
os = cls.get_client_manager()
- if not CONF.service_available.savanna:
- raise cls.skipException("Savanna support is required")
cls.client = os.data_processing_client
- # set some constants
cls.flavor_ref = CONF.compute.flavor_ref
- cls.simple_node_group_template = {
- 'plugin_name': 'vanilla',
- 'hadoop_version': '1.2.1',
- 'node_processes': [
- "datanode",
- "tasktracker"
- ],
- 'flavor_id': cls.flavor_ref,
- 'node_configs': {
- 'HDFS': {
- 'Data Node Heap Size': 1024
- },
- 'MapReduce': {
- 'Task Tracker Heap Size': 1024
- }
- }
- }
# add lists for watched resources
cls._node_group_templates = []
+ cls._cluster_templates = []
+ cls._data_sources = []
@classmethod
def tearDownClass(cls):
- # cleanup node group templates
- for ngt_id in cls._node_group_templates:
- try:
- cls.client.delete_node_group_template(ngt_id)
- except Exception:
- # ignore errors while auto removing created resource
- pass
+ cls.cleanup_resources(getattr(cls, '_cluster_templates', []),
+ cls.client.delete_cluster_template)
+ cls.cleanup_resources(getattr(cls, '_node_group_templates', []),
+ cls.client.delete_node_group_template)
+ cls.cleanup_resources(getattr(cls, '_data_sources', []),
+ cls.client.delete_data_source)
cls.clear_isolated_creds()
super(BaseDataProcessingTest, cls).tearDownClass()
+ @staticmethod
+ def cleanup_resources(resource_id_list, method):
+ for resource_id in resource_id_list:
+ try:
+ method(resource_id)
+ except exceptions.NotFound:
+ # ignore errors while auto removing created resource
+ pass
+
@classmethod
def create_node_group_template(cls, name, plugin_name, hadoop_version,
node_processes, flavor_id,
@@ -76,16 +70,46 @@
object. All resources created in this method will be automatically
removed in tearDownClass method.
"""
-
resp, body = cls.client.create_node_group_template(name, plugin_name,
hadoop_version,
node_processes,
flavor_id,
node_configs,
**kwargs)
-
# store id of created node group template
- template_id = body['id']
- cls._node_group_templates.append(template_id)
+ cls._node_group_templates.append(body['id'])
- return resp, body, template_id
+ return resp, body
+
+ @classmethod
+ def create_cluster_template(cls, name, plugin_name, hadoop_version,
+ node_groups, cluster_configs=None, **kwargs):
+ """Creates watched cluster template with specified params.
+
+ It supports passing additional params using kwargs and returns created
+ object. All resources created in this method will be automatically
+ removed in tearDownClass method.
+ """
+ resp, body = cls.client.create_cluster_template(name, plugin_name,
+ hadoop_version,
+ node_groups,
+ cluster_configs,
+ **kwargs)
+ # store id of created cluster template
+ cls._cluster_templates.append(body['id'])
+
+ return resp, body
+
+ @classmethod
+ def create_data_source(cls, name, type, url, **kwargs):
+ """Creates watched data source with specified params.
+
+ It supports passing additional params using kwargs and returns created
+ object. All resources created in this method will be automatically
+ removed in tearDownClass method.
+ """
+ resp, body = cls.client.create_data_source(name, type, url, **kwargs)
+ # store id of created data source
+ cls._data_sources.append(body['id'])
+
+ return resp, body
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index ff4fa6a..ed4cf1f 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -19,65 +19,87 @@
class NodeGroupTemplateTest(dp_base.BaseDataProcessingTest):
- def _create_simple_node_group_template(self, template_name=None):
- """Creates simple Node Group Template with optional name specified.
+ @classmethod
+ def setUpClass(cls):
+ super(NodeGroupTemplateTest, cls).setUpClass()
+ cls.node_group_template = {
+ 'description': 'Test node group template',
+ 'plugin_name': 'vanilla',
+ 'hadoop_version': '1.2.1',
+ 'node_processes': [
+ 'datanode',
+ 'tasktracker'
+ ],
+ 'flavor_id': cls.flavor_ref,
+ 'node_configs': {
+ 'HDFS': {
+ 'Data Node Heap Size': 1024
+ },
+ 'MapReduce': {
+ 'Task Tracker Heap Size': 1024
+ }
+ }
+ }
+
+ def _create_node_group_template(self, template_name=None):
+ """Creates Node Group Template with optional name specified.
It creates template and ensures response status and template name.
Returns id and name of created template.
"""
-
if template_name is None:
# generate random name if it's not specified
- template_name = data_utils.rand_name('savanna')
+ template_name = data_utils.rand_name('sahara-ng-template')
- # create simple node group template
- resp, body, template_id = self.create_node_group_template(
- template_name, **self.simple_node_group_template)
+ # create node group template
+ resp, body = self.create_node_group_template(
+ template_name, **self.node_group_template)
# ensure that template created successfully
self.assertEqual(202, resp.status)
self.assertEqual(template_name, body['name'])
- return template_id, template_name
+ return body['id'], template_name
@attr(type='smoke')
def test_node_group_template_create(self):
- # just create and ensure template
- self._create_simple_node_group_template()
+ template_name = data_utils.rand_name('sahara-ng-template')
+ resp, body = self.create_node_group_template(
+ template_name, **self.node_group_template)
+
+ # check that template created successfully
+ self.assertEqual(resp.status, 202)
+ self.assertEqual(template_name, body['name'])
+ self.assertDictContainsSubset(self.node_group_template, body)
@attr(type='smoke')
def test_node_group_template_list(self):
- template_info = self._create_simple_node_group_template()
+ template_info = self._create_node_group_template()
# check for node group template in list
resp, templates = self.client.list_node_group_templates()
self.assertEqual(200, resp.status)
- templates_info = list([(template['id'], template['name'])
- for template in templates])
+ templates_info = [(template['id'], template['name'])
+ for template in templates]
self.assertIn(template_info, templates_info)
@attr(type='smoke')
def test_node_group_template_get(self):
- template_id, template_name = self._create_simple_node_group_template()
+ template_id, template_name = self._create_node_group_template()
# check node group template fetch by id
resp, template = self.client.get_node_group_template(template_id)
self.assertEqual(200, resp.status)
self.assertEqual(template_name, template['name'])
- self.assertEqual(self.simple_node_group_template['plugin_name'],
- template['plugin_name'])
- self.assertEqual(self.simple_node_group_template['node_processes'],
- template['node_processes'])
- self.assertEqual(self.simple_node_group_template['flavor_id'],
- template['flavor_id'])
+ self.assertDictContainsSubset(self.node_group_template, template)
@attr(type='smoke')
def test_node_group_template_delete(self):
- template_id, template_name = self._create_simple_node_group_template()
+ template_id = self._create_node_group_template()[0]
# delete the node group template by id
- resp = self.client.delete_node_group_template(template_id)
+ resp = self.client.delete_node_group_template(template_id)[0]
- self.assertEqual('204', resp[0]['status'])
+ self.assertEqual(204, resp.status)
diff --git a/tempest/api/database/base.py b/tempest/api/database/base.py
index 8add9ba..cf70d11 100644
--- a/tempest/api/database/base.py
+++ b/tempest/api/database/base.py
@@ -36,7 +36,9 @@
cls.catalog_type = CONF.database.catalog_type
cls.db_flavor_ref = CONF.database.db_flavor_ref
+ cls.db_current_version = CONF.database.db_current_version
os = cls.get_client_manager()
cls.os = os
cls.database_flavors_client = cls.os.database_flavors_client
+ cls.database_versions_client = cls.os.database_versions_client
diff --git a/tempest/api/compute/api_schema/__init__.py b/tempest/api/database/versions/__init__.py
similarity index 100%
copy from tempest/api/compute/api_schema/__init__.py
copy to tempest/api/database/versions/__init__.py
diff --git a/tempest/api/database/versions/test_versions.py b/tempest/api/database/versions/test_versions.py
new file mode 100644
index 0000000..6101f47
--- /dev/null
+++ b/tempest/api/database/versions/test_versions.py
@@ -0,0 +1,40 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.database import base
+from tempest import test
+
+
+class DatabaseVersionsTest(base.BaseDatabaseTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(DatabaseVersionsTest, cls).setUpClass()
+ cls.client = cls.database_versions_client
+
+ @test.attr(type='smoke')
+ def test_list_db_versions(self):
+ resp, versions = self.client.list_db_versions()
+ self.assertEqual(200, resp.status)
+ self.assertTrue(len(versions) > 0, "No database versions found")
+ # List of all versions should contain the current version, and there
+ # should only be one 'current' version
+ current_versions = list()
+ for version in versions:
+ if 'CURRENT' == version['status']:
+ current_versions.append(version['id'])
+ self.assertEqual(1, len(current_versions))
+ self.assertIn(self.db_current_version, current_versions)
diff --git a/tempest/api/identity/admin/test_roles.py b/tempest/api/identity/admin/test_roles.py
index 5e78cce..a29f27e 100644
--- a/tempest/api/identity/admin/test_roles.py
+++ b/tempest/api/identity/admin/test_roles.py
@@ -17,13 +17,14 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
class RolesTestJSON(base.BaseIdentityV2AdminTest):
_interface = 'json'
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(RolesTestJSON, cls).setUpClass()
for _ in moves.xrange(5):
@@ -46,7 +47,7 @@
found = True
self.assertTrue(found, "assigned role was not in list")
- @attr(type='gate')
+ @test.attr(type='gate')
def test_list_roles(self):
# Return a list of all roles
resp, body = self.client.list_roles()
@@ -54,13 +55,12 @@
self.assertTrue(any(found))
self.assertEqual(len(found), len(self.data.roles))
- @attr(type='gate')
+ @test.attr(type='gate')
def test_role_create_delete(self):
# Role should be created, verified, and deleted
role_name = data_utils.rand_name(name='role-test-')
resp, body = self.client.create_role(role_name)
- self.assertIn('status', resp)
- self.assertTrue(resp['status'].startswith('2'))
+ self.assertEqual(200, resp.status)
self.assertEqual(role_name, body['name'])
resp, body = self.client.list_roles()
@@ -68,14 +68,25 @@
self.assertTrue(any(found))
resp, body = self.client.delete_role(found[0]['id'])
- self.assertIn('status', resp)
- self.assertTrue(resp['status'].startswith('2'))
+ self.assertEqual(204, resp.status)
resp, body = self.client.list_roles()
found = [role for role in body if role['name'] == role_name]
self.assertFalse(any(found))
- @attr(type='gate')
+ @test.attr(type='gate')
+ def test_get_role_by_id(self):
+ # Get a role by its id
+ self.data.setup_test_role()
+ role_id = self.data.role['id']
+ role_name = self.data.role['name']
+ resp, body = self.client.get_role(role_id)
+ self.assertIn('status', resp)
+ self.assertTrue('200', resp['status'])
+ self.assertEqual(role_id, body['id'])
+ self.assertEqual(role_name, body['name'])
+
+ @test.attr(type='gate')
def test_assign_user_role(self):
# Assign a role to a user on a tenant
(user, tenant, role) = self._get_role_params()
@@ -83,7 +94,7 @@
resp, roles = self.client.list_user_roles(tenant['id'], user['id'])
self.assert_role_in_role_list(role, roles)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_remove_user_role(self):
# Remove a role assigned to a user on a tenant
(user, tenant, role) = self._get_role_params()
@@ -91,9 +102,9 @@
user['id'], role['id'])
resp, body = self.client.remove_user_role(tenant['id'], user['id'],
user_role['id'])
- self.assertEqual(resp['status'], '204')
+ self.assertEqual(204, resp.status)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_list_user_roles(self):
# List roles assigned to a user on tenant
(user, tenant, role) = self._get_role_params()
diff --git a/tempest/api/identity/admin/test_roles_negative.py b/tempest/api/identity/admin/test_roles_negative.py
index 7a0bdea..d311143 100644
--- a/tempest/api/identity/admin/test_roles_negative.py
+++ b/tempest/api/identity/admin/test_roles_negative.py
@@ -74,8 +74,7 @@
role_name = data_utils.rand_name(name='role-dup-')
resp, body = self.client.create_role(role_name)
role1_id = body.get('id')
- self.assertIn('status', resp)
- self.assertTrue(resp['status'].startswith('2'))
+ self.assertEqual(200, resp.status)
self.addCleanup(self.client.delete_role, role1_id)
self.assertRaises(exceptions.Conflict, self.client.create_role,
role_name)
diff --git a/tempest/api/identity/admin/test_services.py b/tempest/api/identity/admin/test_services.py
index 459c44c..e5cb348 100644
--- a/tempest/api/identity/admin/test_services.py
+++ b/tempest/api/identity/admin/test_services.py
@@ -27,7 +27,7 @@
def _del_service(self, service_id):
# Deleting the service created in this method
resp, _ = self.client.delete_service(service_id)
- self.assertEqual(resp['status'], '204')
+ self.assertEqual(204, resp.status)
# Checking whether service is deleted successfully
self.assertRaises(exceptions.NotFound, self.client.get_service,
service_id)
@@ -43,7 +43,7 @@
name, type, description=description)
self.assertFalse(service_data['id'] is None)
self.addCleanup(self._del_service, service_data['id'])
- self.assertTrue(resp['status'].startswith('2'))
+ self.assertEqual(200, resp.status)
# Verifying response body of create service
self.assertIn('id', service_data)
self.assertIn('name', service_data)
@@ -54,7 +54,7 @@
self.assertEqual(description, service_data['description'])
# Get service
resp, fetched_service = self.client.get_service(service_data['id'])
- self.assertTrue(resp['status'].startswith('2'))
+ self.assertEqual(200, resp.status)
# verifying the existence of service created
self.assertIn('id', fetched_service)
self.assertEqual(fetched_service['id'], service_data['id'])
@@ -66,6 +66,20 @@
self.assertEqual(fetched_service['description'],
service_data['description'])
+ @attr(type='gate')
+ def test_create_service_without_description(self):
+ # Create a service only with name and type
+ name = data_utils.rand_name('service-')
+ type = data_utils.rand_name('type--')
+ resp, service = self.client.create_service(name, type)
+ self.assertIn('id', service)
+ self.assertTrue('200', resp['status'])
+ self.addCleanup(self._del_service, service['id'])
+ self.assertIn('name', service)
+ self.assertEqual(name, service['name'])
+ self.assertIn('type', service)
+ self.assertEqual(type, service['type'])
+
@attr(type='smoke')
def test_list_services(self):
# Create, List, Verify and Delete Services
@@ -86,7 +100,7 @@
self.addCleanup(delete_services)
# List and Verify Services
resp, body = self.client.list_services()
- self.assertTrue(resp['status'].startswith('2'))
+ self.assertEqual(200, resp.status)
found = [service for service in body if service['id'] in service_ids]
self.assertEqual(len(found), len(services), 'Services not found')
diff --git a/tempest/api/identity/admin/test_tenants.py b/tempest/api/identity/admin/test_tenants.py
index 257a6d7..7ba46bb 100644
--- a/tempest/api/identity/admin/test_tenants.py
+++ b/tempest/api/identity/admin/test_tenants.py
@@ -35,13 +35,13 @@
tenants.append(tenant)
tenant_ids = map(lambda x: x['id'], tenants)
resp, body = self.client.list_tenants()
- self.assertTrue(resp['status'].startswith('2'))
+ self.assertEqual(200, resp.status)
found = [tenant for tenant in body if tenant['id'] in tenant_ids]
self.assertEqual(len(found), len(tenants), 'Tenants not created')
for tenant in tenants:
resp, body = self.client.delete_tenant(tenant['id'])
- self.assertTrue(resp['status'].startswith('2'))
+ self.assertEqual(204, resp.status)
self.data.tenants.remove(tenant)
resp, body = self.client.list_tenants()
@@ -57,10 +57,9 @@
description=tenant_desc)
tenant = body
self.data.tenants.append(tenant)
- st1 = resp['status']
tenant_id = body['id']
desc1 = body['description']
- self.assertTrue(st1.startswith('2'))
+ self.assertEqual(200, resp.status)
self.assertEqual(desc1, tenant_desc, 'Description should have '
'been sent in response for create')
resp, body = self.client.get_tenant(tenant_id)
@@ -78,9 +77,8 @@
tenant = body
self.data.tenants.append(tenant)
tenant_id = body['id']
- st1 = resp['status']
en1 = body['enabled']
- self.assertTrue(st1.startswith('2'))
+ self.assertEqual(200, resp.status)
self.assertTrue(en1, 'Enable should be True in response')
resp, body = self.client.get_tenant(tenant_id)
en2 = body['enabled']
@@ -96,9 +94,8 @@
tenant = body
self.data.tenants.append(tenant)
tenant_id = body['id']
- st1 = resp['status']
en1 = body['enabled']
- self.assertTrue(st1.startswith('2'))
+ self.assertEqual(200, resp.status)
self.assertEqual('false', str(en1).lower(),
'Enable should be False in response')
resp, body = self.client.get_tenant(tenant_id)
@@ -122,9 +119,8 @@
t_name2 = data_utils.rand_name(name='tenant2-')
resp, body = self.client.update_tenant(t_id, name=t_name2)
- st2 = resp['status']
resp2_name = body['name']
- self.assertTrue(st2.startswith('2'))
+ self.assertEqual(200, resp.status)
self.assertNotEqual(resp1_name, resp2_name)
resp, body = self.client.get_tenant(t_id)
@@ -152,9 +148,8 @@
t_desc2 = data_utils.rand_name(name='desc2-')
resp, body = self.client.update_tenant(t_id, description=t_desc2)
- st2 = resp['status']
resp2_desc = body['description']
- self.assertTrue(st2.startswith('2'))
+ self.assertEqual(200, resp.status)
self.assertNotEqual(resp1_desc, resp2_desc)
resp, body = self.client.get_tenant(t_id)
@@ -182,9 +177,8 @@
t_en2 = True
resp, body = self.client.update_tenant(t_id, enabled=t_en2)
- st2 = resp['status']
resp2_en = body['enabled']
- self.assertTrue(st2.startswith('2'))
+ self.assertEqual(200, resp.status)
self.assertNotEqual(resp1_en, resp2_en)
resp, body = self.client.get_tenant(t_id)
diff --git a/tempest/api/identity/admin/test_tokens.py b/tempest/api/identity/admin/test_tokens.py
index 533f374..7fec28d 100644
--- a/tempest/api/identity/admin/test_tokens.py
+++ b/tempest/api/identity/admin/test_tokens.py
@@ -56,6 +56,68 @@
resp, body = self.client.delete_token(token_id)
self.assertEqual(resp['status'], '204')
+ @attr(type='gate')
+ def test_rescope_token(self):
+ """An unscoped token can be requested, that token can be used to
+ request a scoped token.
+ """
+
+ # Create a user.
+ user_name = data_utils.rand_name(name='user-')
+ user_password = data_utils.rand_name(name='pass-')
+ tenant_id = None # No default tenant so will get unscoped token.
+ email = ''
+ resp, user = self.client.create_user(user_name, user_password,
+ tenant_id, email)
+ self.assertEqual(200, resp.status)
+ self.data.users.append(user)
+
+ # Create a couple tenants.
+ tenant1_name = data_utils.rand_name(name='tenant-')
+ resp, tenant1 = self.client.create_tenant(tenant1_name)
+ self.assertEqual(200, resp.status)
+ self.data.tenants.append(tenant1)
+
+ tenant2_name = data_utils.rand_name(name='tenant-')
+ resp, tenant2 = self.client.create_tenant(tenant2_name)
+ self.assertEqual(200, resp.status)
+ self.data.tenants.append(tenant2)
+
+ # Create a role
+ role_name = data_utils.rand_name(name='role-')
+ resp, role = self.client.create_role(role_name)
+ self.assertEqual(200, resp.status)
+ self.data.roles.append(role)
+
+ # Grant the user the role on the tenants.
+ resp, _ = self.client.assign_user_role(tenant1['id'], user['id'],
+ role['id'])
+ self.assertEqual(200, resp.status)
+
+ resp, _ = self.client.assign_user_role(tenant2['id'], user['id'],
+ role['id'])
+ self.assertEqual(200, resp.status)
+
+ # Get an unscoped token.
+ rsp, body = self.token_client.auth(user_name, user_password)
+ self.assertEqual(200, resp.status)
+
+ token_id = body['token']['id']
+
+ # Use the unscoped token to get a token scoped to tenant1
+ rsp, body = self.token_client.auth_token(token_id, tenant=tenant1_name)
+ self.assertEqual(200, resp.status)
+
+ scoped_token_id = body['token']['id']
+
+ # Revoke the scoped token
+ resp, body = self.client.delete_token(scoped_token_id)
+ self.assertEqual(204, resp.status)
+
+ # Use the unscoped token to get a token scoped to tenant2
+ rsp, body = self.token_client.auth_token(token_id, tenant=tenant2_name)
+ self.assertEqual(204, resp.status)
+
class TokensTestXML(TokensTestJSON):
_interface = 'xml'
diff --git a/tempest/api/identity/admin/test_users_negative.py b/tempest/api/identity/admin/test_users_negative.py
index 1188325..4e8ebe5 100644
--- a/tempest/api/identity/admin/test_users_negative.py
+++ b/tempest/api/identity/admin/test_users_negative.py
@@ -13,11 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import uuid
+
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest.test import attr
-import uuid
class UsersNegativeTestJSON(base.BaseIdentityV2AdminTest):
diff --git a/tempest/api/identity/admin/v3/test_credentials.py b/tempest/api/identity/admin/v3/test_credentials.py
index 5f22d43..6bb0ebe 100644
--- a/tempest/api/identity/admin/v3/test_credentials.py
+++ b/tempest/api/identity/admin/v3/test_credentials.py
@@ -15,13 +15,14 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
class CredentialsTestJSON(base.BaseIdentityV3AdminTest):
_interface = 'json'
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(CredentialsTestJSON, cls).setUpClass()
cls.projects = list()
@@ -35,28 +36,32 @@
resp, cls.project = cls.client.create_project(
data_utils.rand_name('project-'),
description=data_utils.rand_name('project-desc-'))
- assert resp['status'] == '201', "Expected %s" % resp['status']
+ assert resp['status'] == '201', (
+ "Expected 201, but got: %s" % resp['status'])
cls.projects.append(cls.project['id'])
resp, cls.user_body = cls.client.create_user(
u_name, description=u_desc, password=u_password,
email=u_email, project_id=cls.projects[0])
- assert resp['status'] == '201', "Expected: %s" % resp['status']
+ assert resp['status'] == '201', (
+ "Expected 201, but got: %s" % resp['status'])
@classmethod
def tearDownClass(cls):
resp, _ = cls.client.delete_user(cls.user_body['id'])
- assert resp['status'] == '204', "Expected: %s" % resp['status']
+ assert resp['status'] == '204', (
+ "Expected 204, but got: %s" % resp['status'])
for p in cls.projects:
resp, _ = cls.client.delete_project(p)
- assert resp['status'] == '204', "Expected: %s" % resp['status']
+ assert resp['status'] == '204', (
+ "Expected 204, but got: %s" % resp['status'])
super(CredentialsTestJSON, cls).tearDownClass()
def _delete_credential(self, cred_id):
resp, body = self.creds_client.delete_credential(cred_id)
self.assertEqual(resp['status'], '204')
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_credentials_create_get_update_delete(self):
keys = [data_utils.rand_name('Access-'),
data_utils.rand_name('Secret-')]
@@ -91,7 +96,7 @@
self.assertEqual(update_body['blob'][value2],
get_body['blob'][value2])
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_credentials_list_delete(self):
created_cred_ids = list()
fetched_cred_ids = list()
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 05b704f..dd3b576 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -15,13 +15,14 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
class EndPointsTestJSON(base.BaseIdentityV3AdminTest):
_interface = 'json'
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(EndPointsTestJSON, cls).setUpClass()
cls.identity_client = cls.client
@@ -53,7 +54,7 @@
cls.service_client.delete_service(s)
super(EndPointsTestJSON, cls).tearDownClass()
- @attr(type='gate')
+ @test.attr(type='gate')
def test_list_endpoints(self):
# Get a list of endpoints
resp, fetched_endpoints = self.client.list_endpoints()
@@ -65,7 +66,7 @@
"Failed to find endpoint %s in fetched list" %
', '.join(str(e) for e in missing_endpoints))
- @attr(type='gate')
+ @test.attr(type='gate')
def test_create_list_delete_endpoint(self):
region = data_utils.rand_name('region')
url = data_utils.rand_name('url')
@@ -91,7 +92,7 @@
fetched_endpoints_id = [e['id'] for e in fetched_endpoints]
self.assertNotIn(endpoint['id'], fetched_endpoints_id)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_update_endpoint(self):
# Creating an endpoint so as to check update endpoint
# with new values
@@ -102,6 +103,7 @@
self.client.create_endpoint(self.service_id, interface1,
url1, region=region1,
enabled=True)
+ self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
# Creating service so as update endpoint with new service ID
s_name = data_utils.rand_name('service-')
s_type = data_utils.rand_name('type--')
@@ -126,7 +128,6 @@
self.assertEqual(url2, endpoint['url'])
self.assertEqual(region2, endpoint['region'])
self.assertEqual('false', str(endpoint['enabled']).lower())
- self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
class EndPointsTestXML(EndPointsTestJSON):
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
new file mode 100644
index 0000000..28615a4
--- /dev/null
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -0,0 +1,94 @@
+
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from tempest.api.identity import base
+from tempest.common.utils import data_utils
+from tempest import exceptions
+from tempest.test import attr
+
+
+class EndpointsNegativeTestJSON(base.BaseIdentityV3AdminTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(EndpointsNegativeTestJSON, cls).setUpClass()
+ cls.identity_client = cls.client
+ cls.client = cls.endpoints_client
+ cls.service_ids = list()
+ s_name = data_utils.rand_name('service-')
+ s_type = data_utils.rand_name('type--')
+ s_description = data_utils.rand_name('description-')
+ resp, cls.service_data = (
+ cls.service_client.create_service(s_name, s_type,
+ description=s_description))
+ cls.service_id = cls.service_data['id']
+ cls.service_ids.append(cls.service_id)
+
+ @classmethod
+ def tearDownClass(cls):
+ for s in cls.service_ids:
+ cls.service_client.delete_service(s)
+ super(EndpointsNegativeTestJSON, cls).tearDownClass()
+
+ @attr(type=['negative', 'gate'])
+ def test_create_with_enabled_False(self):
+ # Enabled should be a boolean, not a string like 'False'
+ interface = 'public'
+ url = data_utils.rand_name('url')
+ region = data_utils.rand_name('region')
+ self.assertRaises(exceptions.BadRequest, self.client.create_endpoint,
+ self.service_id, interface, url, region=region,
+ force_enabled='False')
+
+ @attr(type=['negative', 'gate'])
+ def test_create_with_enabled_True(self):
+ # Enabled should be a boolean, not a string like 'True'
+ interface = 'public'
+ url = data_utils.rand_name('url')
+ region = data_utils.rand_name('region')
+ self.assertRaises(exceptions.BadRequest, self.client.create_endpoint,
+ self.service_id, interface, url, region=region,
+ force_enabled='True')
+
+ def _assert_update_raises_bad_request(self, enabled):
+
+ # Create an endpoint
+ region1 = data_utils.rand_name('region')
+ url1 = data_utils.rand_name('url')
+ interface1 = 'public'
+ resp, endpoint_for_update = (
+ self.client.create_endpoint(self.service_id, interface1,
+ url1, region=region1, enabled=True))
+ self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
+
+ self.assertRaises(exceptions.BadRequest, self.client.update_endpoint,
+ endpoint_for_update['id'], force_enabled=enabled)
+
+ @attr(type=['negative', 'gate'])
+ def test_update_with_enabled_False(self):
+ # Enabled should be a boolean, not a string like 'False'
+ self._assert_update_raises_bad_request('False')
+
+ @attr(type=['negative', 'gate'])
+ def test_update_with_enabled_True(self):
+ # Enabled should be a boolean, not a string like 'True'
+ self._assert_update_raises_bad_request('True')
+
+
+class EndpointsNegativeTestXML(EndpointsNegativeTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/identity/admin/v3/test_groups.py b/tempest/api/identity/admin/v3/test_groups.py
index 6e898b2..056f713 100644
--- a/tempest/api/identity/admin/v3/test_groups.py
+++ b/tempest/api/identity/admin/v3/test_groups.py
@@ -68,7 +68,7 @@
# list users in group
resp, group_users = self.client.list_group_users(group['id'])
self.assertEqual(resp['status'], '200')
- self.assertEqual(users.sort(), group_users.sort())
+ self.assertEqual(sorted(users), sorted(group_users))
# delete user in group
for user in users:
resp, body = self.client.delete_group_user(group['id'],
@@ -77,6 +77,27 @@
resp, group_users = self.client.list_group_users(group['id'])
self.assertEqual(len(group_users), 0)
+ @test.attr(type='smoke')
+ def test_list_user_groups(self):
+ # create a user
+ resp, user = self.client.create_user(
+ data_utils.rand_name('User-'),
+ password=data_utils.rand_name('Pass-'))
+ self.addCleanup(self.client.delete_user, user['id'])
+ # create two groups, and add user into them
+ groups = []
+ for i in range(2):
+ name = data_utils.rand_name('Group-')
+ resp, group = self.client.create_group(name)
+ groups.append(group)
+ self.addCleanup(self.client.delete_group, group['id'])
+ self.client.add_group_user(group['id'], user['id'])
+ # list groups which user belongs to
+ resp, user_groups = self.client.list_user_groups(user['id'])
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(sorted(groups), sorted(user_groups))
+ self.assertEqual(2, len(user_groups))
+
class GroupsV3TestXML(GroupsV3TestJSON):
_interface = 'xml'
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 467d28b..90dccca 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -15,38 +15,41 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
class RolesV3TestJSON(base.BaseIdentityV3AdminTest):
_interface = 'json'
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(RolesV3TestJSON, cls).setUpClass()
cls.fetched_role_ids = list()
u_name = data_utils.rand_name('user-')
u_desc = '%s description' % u_name
u_email = '%s@testmail.tm' % u_name
- u_password = data_utils.rand_name('pass-')
+ cls.u_password = data_utils.rand_name('pass-')
resp = [None] * 5
- resp[0], cls.project = cls.client.create_project(
- data_utils.rand_name('project-'),
- description=data_utils.rand_name('project-desc-'))
- resp[1], cls.domain = cls.client.create_domain(
+ resp[0], cls.domain = cls.client.create_domain(
data_utils.rand_name('domain-'),
description=data_utils.rand_name('domain-desc-'))
+ resp[1], cls.project = cls.client.create_project(
+ data_utils.rand_name('project-'),
+ description=data_utils.rand_name('project-desc-'),
+ domain_id=cls.domain['id'])
resp[2], cls.group_body = cls.client.create_group(
data_utils.rand_name('Group-'), project_id=cls.project['id'],
domain_id=cls.domain['id'])
resp[3], cls.user_body = cls.client.create_user(
- u_name, description=u_desc, password=u_password,
+ u_name, description=u_desc, password=cls.u_password,
email=u_email, project_id=cls.project['id'],
domain_id=cls.domain['id'])
resp[4], cls.role = cls.client.create_role(
data_utils.rand_name('Role-'))
for r in resp:
- assert r['status'] == '201', "Expected: %s" % r['status']
+ assert r['status'] == '201', (
+ "Expected 201, but got: %s" % r['status'])
@classmethod
def tearDownClass(cls):
@@ -60,7 +63,8 @@
cls.client.update_domain(cls.domain['id'], enabled=False)
resp[4], _ = cls.client.delete_domain(cls.domain['id'])
for r in resp:
- assert r['status'] == '204', "Expected: %s" % r['status']
+ assert r['status'] == '204', (
+ "Expected 204, but got: %s" % r['status'])
super(RolesV3TestJSON, cls).tearDownClass()
def _list_assertions(self, resp, body, fetched_role_ids, role_id):
@@ -68,8 +72,8 @@
self.assertEqual(len(body), 1)
self.assertIn(role_id, fetched_role_ids)
- @attr(type='smoke')
- def test_role_create_update_get(self):
+ @test.attr(type='smoke')
+ def test_role_create_update_get_list(self):
r_name = data_utils.rand_name('Role-')
resp, role = self.client.create_role(r_name)
self.addCleanup(self.client.delete_role, role['id'])
@@ -90,7 +94,11 @@
self.assertEqual(new_name, new_role['name'])
self.assertEqual(updated_role['id'], new_role['id'])
- @attr(type='smoke')
+ resp, roles = self.client.list_roles()
+ self.assertEqual(resp['status'], '200')
+ self.assertIn(role['id'], [r['id'] for r in roles])
+
+ @test.attr(type='smoke')
def test_grant_list_revoke_role_to_user_on_project(self):
resp, _ = self.client.assign_user_role_on_project(
self.project['id'], self.user_body['id'], self.role['id'])
@@ -109,7 +117,7 @@
self.project['id'], self.user_body['id'], self.role['id'])
self.assertEqual(resp['status'], '204')
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_grant_list_revoke_role_to_user_on_domain(self):
resp, _ = self.client.assign_user_role_on_domain(
self.domain['id'], self.user_body['id'], self.role['id'])
@@ -128,12 +136,13 @@
self.domain['id'], self.user_body['id'], self.role['id'])
self.assertEqual(resp['status'], '204')
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_grant_list_revoke_role_to_group_on_project(self):
+ # Grant role to group on project
resp, _ = self.client.assign_group_role_on_project(
self.project['id'], self.group_body['id'], self.role['id'])
self.assertEqual(resp['status'], '204')
-
+ # List group roles on project
resp, roles = self.client.list_group_roles_on_project(
self.project['id'], self.group_body['id'])
@@ -142,12 +151,23 @@
self._list_assertions(resp, roles, self.fetched_role_ids,
self.role['id'])
-
+ # Add user to group, and insure user has role on project
+ self.client.add_group_user(self.group_body['id'], self.user_body['id'])
+ self.addCleanup(self.client.delete_group_user,
+ self.group_body['id'], self.user_body['id'])
+ resp, body = self.token.auth(self.user_body['id'], self.u_password,
+ self.project['name'],
+ domain=self.domain['name'])
+ roles = body['token']['roles']
+ self.assertEqual(resp['status'], '201')
+ self.assertEqual(len(roles), 1)
+ self.assertEqual(roles[0]['id'], self.role['id'])
+ # Revoke role to group on project
resp, _ = self.client.revoke_role_from_group_on_project(
self.project['id'], self.group_body['id'], self.role['id'])
self.assertEqual(resp['status'], '204')
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_grant_list_revoke_role_to_group_on_domain(self):
resp, _ = self.client.assign_group_role_on_domain(
self.domain['id'], self.group_body['id'], self.role['id'])
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 9629213..ebc1cac 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -33,15 +33,15 @@
resp, user = self.client.create_user(
u_name, description=u_desc, password=u_password,
email=u_email)
- self.assertTrue(resp['status'].startswith('2'))
+ self.assertEqual(201, resp.status)
self.addCleanup(self.client.delete_user, user['id'])
# Perform Authentication
resp, body = self.token.auth(user['id'], u_password)
- self.assertEqual(resp['status'], '201')
+ self.assertEqual(201, resp.status)
subject_token = resp['x-subject-token']
# Perform GET Token
resp, token_details = self.client.get_token(subject_token)
- self.assertEqual(resp['status'], '200')
+ self.assertEqual(200, resp.status)
self.assertEqual(resp['x-subject-token'], subject_token)
self.assertEqual(token_details['user']['id'], user['id'])
self.assertEqual(token_details['user']['name'], u_name)
@@ -50,6 +50,115 @@
self.assertRaises(exceptions.NotFound, self.client.get_token,
subject_token)
+ @attr(type='gate')
+ def test_rescope_token(self):
+ """Rescope a token.
+
+ An unscoped token can be requested, that token can be used to request a
+ scoped token. The scoped token can be revoked, and the original token
+ used to get a token in a different project.
+
+ """
+
+ # Create a user.
+ user_name = data_utils.rand_name(name='user-')
+ user_password = data_utils.rand_name(name='pass-')
+ resp, user = self.client.create_user(user_name, password=user_password)
+ self.assertEqual(201, resp.status)
+ self.addCleanup(self.client.delete_user, user['id'])
+
+ # Create a couple projects
+ project1_name = data_utils.rand_name(name='project-')
+ resp, project1 = self.client.create_project(project1_name)
+ self.assertEqual(201, resp.status)
+ self.addCleanup(self.client.delete_project, project1['id'])
+
+ project2_name = data_utils.rand_name(name='project-')
+ resp, project2 = self.client.create_project(project2_name)
+ self.assertEqual(201, resp.status)
+ self.addCleanup(self.client.delete_project, project2['id'])
+
+ # Create a role
+ role_name = data_utils.rand_name(name='role-')
+ resp, role = self.client.create_role(role_name)
+ self.assertEqual(201, resp.status)
+ self.addCleanup(self.client.delete_role, role['id'])
+
+ # Grant the user the role on both projects.
+ resp, _ = self.client.assign_user_role(project1['id'], user['id'],
+ role['id'])
+ self.assertEqual(204, resp.status)
+
+ resp, _ = self.client.assign_user_role(project2['id'], user['id'],
+ role['id'])
+ self.assertEqual(204, resp.status)
+
+ # Get an unscoped token.
+ resp, token_auth = self.token.auth(user=user['id'],
+ password=user_password)
+ self.assertEqual(201, resp.status)
+
+ token_id = resp['x-subject-token']
+ orig_expires_at = token_auth['token']['expires_at']
+ orig_issued_at = token_auth['token']['issued_at']
+ orig_user = token_auth['token']['user']
+
+ self.assertIsInstance(token_auth['token']['expires_at'], unicode)
+ self.assertIsInstance(token_auth['token']['issued_at'], unicode)
+ self.assertEqual(['password'], token_auth['token']['methods'])
+ self.assertEqual(user['id'], token_auth['token']['user']['id'])
+ self.assertEqual(user['name'], token_auth['token']['user']['name'])
+ self.assertEqual('default',
+ token_auth['token']['user']['domain']['id'])
+ self.assertEqual('Default',
+ token_auth['token']['user']['domain']['name'])
+ self.assertNotIn('catalog', token_auth['token'])
+ self.assertNotIn('project', token_auth['token'])
+ self.assertNotIn('roles', token_auth['token'])
+
+ # Use the unscoped token to get a scoped token.
+ resp, token_auth = self.token.auth(token=token_id,
+ tenant=project1_name,
+ domain='Default')
+ token1_id = resp['x-subject-token']
+ self.assertEqual(201, resp.status)
+
+ self.assertEqual(orig_expires_at, token_auth['token']['expires_at'],
+ 'Expiration time should match original token')
+ self.assertIsInstance(token_auth['token']['issued_at'], unicode)
+ self.assertNotEqual(orig_issued_at, token_auth['token']['issued_at'])
+ self.assertEqual(set(['password', 'token']),
+ set(token_auth['token']['methods']))
+ self.assertEqual(orig_user, token_auth['token']['user'],
+ 'User should match original token')
+ self.assertIsInstance(token_auth['token']['catalog'], list)
+ self.assertEqual(project1['id'],
+ token_auth['token']['project']['id'])
+ self.assertEqual(project1['name'],
+ token_auth['token']['project']['name'])
+ self.assertEqual('default',
+ token_auth['token']['project']['domain']['id'])
+ self.assertEqual('Default',
+ token_auth['token']['project']['domain']['name'])
+ self.assertEqual(1, len(token_auth['token']['roles']))
+ self.assertEqual(role['id'], token_auth['token']['roles'][0]['id'])
+ self.assertEqual(role['name'], token_auth['token']['roles'][0]['name'])
+
+ # Revoke the unscoped token.
+ resp, _ = self.client.delete_token(token1_id)
+ self.assertEqual(204, resp.status)
+
+ # Now get another scoped token using the unscoped token.
+ resp, token_auth = self.token.auth(token=token_id,
+ tenant=project2_name,
+ domain='Default')
+ self.assertEqual(201, resp.status)
+
+ self.assertEqual(project2['id'],
+ token_auth['token']['project']['id'])
+ self.assertEqual(project2['name'],
+ token_auth['token']['project']['name'])
+
class TokensV3TestXML(TokensV3TestJSON):
_interface = 'xml'
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index cae20ad..8e3a7d1 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -13,6 +13,7 @@
import datetime
import re
from tempest.api.identity import base
+from tempest import auth
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
@@ -88,10 +89,13 @@
self.assertIsNotNone(self.trustee_user_id)
# Initialize a new client with the trustor credentials
- os = clients.Manager(username=self.trustor_username,
- password=self.trustor_password,
- tenant_name=self.trustor_project_name,
- interface=self._interface)
+ creds = auth.get_credentials(
+ username=self.trustor_username,
+ password=self.trustor_password,
+ tenant_name=self.trustor_project_name)
+ os = clients.Manager(
+ credentials=creds,
+ interface=self._interface)
self.trustor_client = os.identity_v3_client
def cleanup_user_and_roles(self):
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index a5bf248..e4e74c1 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -14,6 +14,7 @@
# under the License.
+from tempest import auth
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
@@ -120,6 +121,14 @@
self.projects = []
self.v3_roles = []
+ @property
+ def test_credentials(self):
+ return auth.get_credentials(username=self.test_user,
+ user_id=self.user['id'],
+ password=self.test_password,
+ tenant_name=self.test_tenant,
+ tenant_id=self.tenant['id'])
+
def setup_test_user(self):
"""Set up a test user."""
self.setup_test_tenant()
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index e439238..e31e635 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -42,11 +42,7 @@
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
if CONF.compute.allow_tenant_isolation:
- creds = cls.isolated_creds.get_primary_creds()
- username, tenant_name, password = creds
- cls.os = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name)
+ cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
else:
cls.os = clients.Manager()
@@ -96,19 +92,12 @@
def setUpClass(cls):
super(BaseV1ImageMembersTest, cls).setUpClass()
if CONF.compute.allow_tenant_isolation:
- creds = cls.isolated_creds.get_alt_creds()
- username, tenant_name, password = creds
- cls.os_alt = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name)
- cls.alt_tenant_id = cls.isolated_creds.get_alt_tenant()['id']
+ cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
else:
cls.os_alt = clients.AltManager()
- identity_client = cls._get_identity_admin_client()
- cls.alt_tenant_id = identity_client.get_tenant_by_name(
- cls.os_alt.credentials['tenant_name'])['id']
cls.alt_img_cli = cls.os_alt.image_client
+ cls.alt_tenant_id = cls.alt_img_cli.tenant_id
def _create_image(self):
image_file = StringIO.StringIO('*' * 1024)
@@ -139,12 +128,8 @@
super(BaseV2MemberImageTest, cls).setUpClass()
if CONF.compute.allow_tenant_isolation:
creds = cls.isolated_creds.get_alt_creds()
- username, tenant_name, password = creds
- cls.os_alt = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name,
- interface=cls._interface)
- cls.alt_tenant_id = cls.isolated_creds.get_alt_tenant()['id']
+ cls.os_alt = clients.Manager(creds)
+ cls.alt_tenant_id = cls.isolated_creds.get_alt_creds().tenant_id
else:
cls.os_alt = clients.AltManager()
alt_tenant_name = cls.os_alt.credentials['tenant_name']
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 517123d..b90891b 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -109,6 +109,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ListImagesTest, cls).setUpClass()
# We add a few images here to test the listing functionality of
@@ -244,6 +245,7 @@
class ListSnapshotImagesTest(base.BaseV1ImageTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ListSnapshotImagesTest, cls).setUpClass()
if not CONF.compute_feature_enabled.api_v3:
@@ -268,7 +270,7 @@
@classmethod
def tearDownClass(cls):
- for server in cls.servers:
+ for server in getattr(cls, "servers", []):
cls.servers_client.delete_server(server['id'])
super(ListSnapshotImagesTest, cls).tearDownClass()
@@ -288,6 +290,7 @@
return image_id
@test.attr(type='gate')
+ @test.services('compute')
def test_index_server_id(self):
# The images should contain images filtered by server id
resp, images = self.client.image_list_detail(
@@ -297,6 +300,7 @@
self.assertEqual(self.snapshot_set, result_set)
@test.attr(type='gate')
+ @test.services('compute')
def test_index_type(self):
# The list of servers should be filtered by image type
params = {'image_type': 'snapshot'}
@@ -307,6 +311,7 @@
self.assertIn(self.snapshot, result_set)
@test.attr(type='gate')
+ @test.services('compute')
def test_index_limit(self):
# Verify only the expected number of results are returned
resp, images = self.client.image_list_detail(limit=1)
@@ -315,6 +320,7 @@
self.assertEqual(1, len(images))
@test.attr(type='gate')
+ @test.services('compute')
def test_index_by_change_since(self):
# Verify an update image is returned
# Becoming ACTIVE will modify the updated time
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index ce11911..2592409 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -101,6 +101,7 @@
disk_format='iso',
visibility='public')
self.assertEqual(201, resp.status)
+ self.addCleanup(self.client.delete_image, body['id'])
self.assertEqual('queued', body['status'])
image_id = body['id']
@@ -134,6 +135,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ListImagesTest, cls).setUpClass()
# We add a few images here to test the listing functionality of
diff --git a/tempest/api/network/admin/test_agent_management.py b/tempest/api/network/admin/test_agent_management.py
index 342bc6a..b848994 100644
--- a/tempest/api/network/admin/test_agent_management.py
+++ b/tempest/api/network/admin/test_agent_management.py
@@ -37,8 +37,10 @@
agents = body['agents']
# Hearthbeats must be excluded from comparison
self.agent.pop('heartbeat_timestamp', None)
+ self.agent.pop('configurations', None)
for agent in agents:
agent.pop('heartbeat_timestamp', None)
+ agent.pop('configurations', None)
self.assertIn(self.agent, agents)
@test.attr(type=['smoke'])
diff --git a/tempest/api/network/admin/test_dhcp_agent_scheduler.py b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
index ecd992a..25e1cc0 100644
--- a/tempest/api/network/admin/test_dhcp_agent_scheduler.py
+++ b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
@@ -20,13 +20,14 @@
_interface = 'json'
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(DHCPAgentSchedulersTestJSON, cls).setUpClass()
if not test.is_extension_enabled('dhcp_agent_scheduler', 'network'):
msg = "dhcp_agent_scheduler extension not enabled."
raise cls.skipException(msg)
# Create a network and make sure it will be hosted by a
- # dhcp agent.
+ # dhcp agent: this is done by creating a regular port
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.cidr = cls.subnet['cidr']
@@ -59,14 +60,31 @@
return network_id in network_ids
@test.attr(type='smoke')
- def test_remove_network_from_dhcp_agent(self):
- resp, body = self.admin_client.list_dhcp_agent_hosting_network(
- self.network['id'])
+ def test_add_remove_network_from_dhcp_agent(self):
+ # The agent is now bound to the network, we can free the port
+ self.client.delete_port(self.port['id'])
+ self.ports.remove(self.port)
+ agent = dict()
+ agent['agent_type'] = None
+ resp, body = self.admin_client.list_agents()
agents = body['agents']
- self.assertIsNotNone(agents)
- # Get an agent.
- agent = agents[0]
- network_id = self.network['id']
+ for a in agents:
+ if a['agent_type'] == 'DHCP agent':
+ agent = a
+ break
+ self.assertEqual(agent['agent_type'], 'DHCP agent', 'Could not find '
+ 'DHCP agent in agent list though dhcp_agent_scheduler'
+ ' is enabled.')
+ network = self.create_network()
+ network_id = network['id']
+ if self._check_network_in_dhcp_agent(network_id, agent):
+ self._remove_network_from_dhcp_agent(network_id, agent)
+ self._add_dhcp_agent_to_network(network_id, agent)
+ else:
+ self._add_dhcp_agent_to_network(network_id, agent)
+ self._remove_network_from_dhcp_agent(network_id, agent)
+
+ def _remove_network_from_dhcp_agent(self, network_id, agent):
resp, body = self.admin_client.remove_network_from_dhcp_agent(
agent_id=agent['id'],
network_id=network_id)
@@ -74,6 +92,13 @@
self.assertFalse(self._check_network_in_dhcp_agent(
network_id, agent))
+ def _add_dhcp_agent_to_network(self, network_id, agent):
+ resp, body = self.admin_client.add_dhcp_agent_to_network(
+ agent['id'], network_id)
+ self.assertEqual(resp['status'], '201')
+ self.assertTrue(self._check_network_in_dhcp_agent(
+ network_id, agent))
+
class DHCPAgentSchedulersTestXML(DHCPAgentSchedulersTestJSON):
_interface = 'xml'
diff --git a/tempest/api/network/admin/test_external_network_extension.py b/tempest/api/network/admin/test_external_network_extension.py
new file mode 100644
index 0000000..c7fde77
--- /dev/null
+++ b/tempest/api/network/admin/test_external_network_extension.py
@@ -0,0 +1,94 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest.common.utils import data_utils
+
+
+class ExternalNetworksTestJSON(base.BaseAdminNetworkTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(ExternalNetworksTestJSON, cls).setUpClass()
+ cls.network = cls.create_network()
+
+ def _create_network(self, external=True):
+ post_body = {'name': data_utils.rand_name('network-')}
+ if external:
+ post_body['router:external'] = external
+ resp, body = self.admin_client.create_network(**post_body)
+ network = body['network']
+ self.assertEqual('201', resp['status'])
+ self.addCleanup(self.admin_client.delete_network, network['id'])
+ return network
+
+ def test_create_external_network(self):
+ # Create a network as an admin user specifying the
+ # external network extension attribute
+ ext_network = self._create_network()
+ # Verifies router:external parameter
+ self.assertIsNotNone(ext_network['id'])
+ self.assertTrue(ext_network['router:external'])
+
+ def test_update_external_network(self):
+ # Update a network as an admin user specifying the
+ # external network extension attribute
+ network = self._create_network(external=False)
+ self.assertFalse(network.get('router:external', False))
+ update_body = {'router:external': True}
+ resp, body = self.admin_client.update_network(network['id'],
+ **update_body)
+ self.assertEqual('200', resp['status'])
+ updated_network = body['network']
+ # Verify that router:external parameter was updated
+ self.assertTrue(updated_network['router:external'])
+
+ def test_list_external_networks(self):
+ # Create external_net
+ external_network = self._create_network()
+ # List networks as a normal user and confirm the external
+ # network extension attribute is returned for those networks
+ # that were created as external
+ resp, body = self.client.list_networks()
+ self.assertEqual('200', resp['status'])
+ networks_list = [net['id'] for net in body['networks']]
+ self.assertIn(external_network['id'], networks_list)
+ self.assertIn(self.network['id'], networks_list)
+ for net in body['networks']:
+ if net['id'] == self.network['id']:
+ self.assertFalse(net['router:external'])
+ elif net['id'] == external_network['id']:
+ self.assertTrue(net['router:external'])
+
+ def test_show_external_networks_attribute(self):
+ # Create external_net
+ external_network = self._create_network()
+ # Show an external network as a normal user and confirm the
+ # external network extension attribute is returned.
+ resp, body = self.client.show_network(external_network['id'])
+ self.assertEqual('200', resp['status'])
+ show_ext_net = body['network']
+ self.assertEqual(external_network['name'], show_ext_net['name'])
+ self.assertEqual(external_network['id'], show_ext_net['id'])
+ self.assertTrue(show_ext_net['router:external'])
+ resp, body = self.client.show_network(self.network['id'])
+ self.assertEqual('200', resp['status'])
+ show_net = body['network']
+ # Verify with show that router:external is False for network
+ self.assertEqual(self.network['name'], show_net['name'])
+ self.assertEqual(self.network['id'], show_net['id'])
+ self.assertFalse(show_net['router:external'])
+
+
+class ExternalNetworksTestXML(ExternalNetworksTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/network/admin/test_floating_ips_admin_actions.py b/tempest/api/network/admin/test_floating_ips_admin_actions.py
new file mode 100644
index 0000000..5728432
--- /dev/null
+++ b/tempest/api/network/admin/test_floating_ips_admin_actions.py
@@ -0,0 +1,72 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest import clients
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class FloatingIPAdminTestJSON(base.BaseAdminNetworkTest):
+ _interface = 'json'
+ force_tenant_isolation = True
+
+ @classmethod
+ def setUpClass(cls):
+ super(FloatingIPAdminTestJSON, cls).setUpClass()
+ cls.ext_net_id = CONF.network.public_network_id
+ cls.floating_ip = cls.create_floatingip(cls.ext_net_id)
+ cls.alt_manager = clients.Manager(cls.isolated_creds.get_alt_creds())
+ cls.alt_client = cls.alt_manager.network_client
+
+ @test.attr(type='smoke')
+ def test_list_floating_ips_from_admin_and_nonadmin(self):
+ # Create floating ip from admin user
+ resp, floating_ip_admin = self.admin_client.create_floatingip(
+ floating_network_id=self.ext_net_id)
+ self.assertEqual('201', resp['status'])
+ self.addCleanup(self.admin_client.delete_floatingip,
+ floating_ip_admin['floatingip']['id'])
+ # Create floating ip from alt user
+ resp, body = self.alt_client.create_floatingip(
+ floating_network_id=self.ext_net_id)
+ self.assertEqual('201', resp['status'])
+ floating_ip_alt = body['floatingip']
+ self.addCleanup(self.alt_client.delete_floatingip,
+ floating_ip_alt['id'])
+ # List floating ips from admin
+ resp, body = self.admin_client.list_floatingips()
+ self.assertEqual('200', resp['status'])
+ floating_ip_ids_admin = [f['id'] for f in body['floatingips']]
+ # Check that admin sees all floating ips
+ self.assertIn(self.floating_ip['id'], floating_ip_ids_admin)
+ self.assertIn(floating_ip_admin['floatingip']['id'],
+ floating_ip_ids_admin)
+ self.assertIn(floating_ip_alt['id'], floating_ip_ids_admin)
+ # List floating ips from nonadmin
+ resp, body = self.client.list_floatingips()
+ floating_ip_ids = [f['id'] for f in body['floatingips']]
+ # Check that nonadmin user doesn't see floating ip created from admin
+ # and floating ip that is created in another tenant (alt user)
+ self.assertIn(self.floating_ip['id'], floating_ip_ids)
+ self.assertNotIn(floating_ip_admin['floatingip']['id'],
+ floating_ip_ids)
+ self.assertNotIn(floating_ip_alt['id'], floating_ip_ids)
+
+
+class FloatingIPAdminTestXML(FloatingIPAdminTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
index eb397ba..f4050c5 100644
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ b/tempest/api/network/admin/test_l3_agent_scheduler.py
@@ -76,11 +76,8 @@
resp, body = self.admin_client.remove_router_from_l3_agent(
self.agent['id'], router['router']['id'])
self.assertEqual('204', resp['status'])
- resp, body = self.admin_client.list_l3_agents_hosting_router(
- router['router']['id'])
- for agent in body['agents']:
- l3_agent_ids.append(agent['id'])
- self.assertNotIn(self.agent['id'], l3_agent_ids)
+ # NOTE(afazekas): The deletion not asserted, because neutron
+ # is not forbidden to reschedule the router to the same agent
class L3AgentSchedulerTestXML(L3AgentSchedulerTestJSON):
diff --git a/tempest/api/network/admin/test_lbaas_agent_scheduler.py b/tempest/api/network/admin/test_lbaas_agent_scheduler.py
index a5ba90f..675c62d 100644
--- a/tempest/api/network/admin/test_lbaas_agent_scheduler.py
+++ b/tempest/api/network/admin/test_lbaas_agent_scheduler.py
@@ -35,6 +35,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(LBaaSAgentSchedulerTestJSON, cls).setUpClass()
if not test.is_extension_enabled('lbaas_agent_scheduler', 'network'):
diff --git a/tempest/api/network/admin/test_load_balancer_admin_actions.py b/tempest/api/network/admin/test_load_balancer_admin_actions.py
new file mode 100644
index 0000000..fe4fc60
--- /dev/null
+++ b/tempest/api/network/admin/test_load_balancer_admin_actions.py
@@ -0,0 +1,118 @@
+# Copyright 2014 Mirantis.inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class LoadBalancerAdminTestJSON(base.BaseAdminNetworkTest):
+ _interface = 'json'
+
+ """
+ Test admin actions for load balancer.
+
+ Create VIP for another tenant
+ Create health monitor for another tenant
+ """
+
+ @classmethod
+ @test.safe_setup
+ def setUpClass(cls):
+ super(LoadBalancerAdminTestJSON, cls).setUpClass()
+ if not test.is_extension_enabled('lbaas', 'network'):
+ msg = "lbaas extension not enabled."
+ raise cls.skipException(msg)
+ cls.force_tenant_isolation = True
+ manager = cls.get_client_manager()
+ cls.client = manager.network_client
+ cls.tenant_id = cls.isolated_creds.get_primary_creds().tenant_id
+ cls.network = cls.create_network()
+ cls.subnet = cls.create_subnet(cls.network)
+ cls.pool = cls.create_pool(data_utils.rand_name('pool-'),
+ "ROUND_ROBIN", "HTTP", cls.subnet)
+
+ @test.attr(type='smoke')
+ def test_create_vip_as_admin_for_another_tenant(self):
+ name = data_utils.rand_name('vip-')
+ resp, body = self.admin_client.create_pool(
+ name=data_utils.rand_name('pool-'), lb_method="ROUND_ROBIN",
+ protocol="HTTP", subnet_id=self.subnet['id'],
+ tenant_id=self.tenant_id)
+ self.assertEqual('201', resp['status'])
+ pool = body['pool']
+ self.addCleanup(self.admin_client.delete_pool, pool['id'])
+ resp, body = self.admin_client.create_vip(name=name,
+ protocol="HTTP",
+ protocol_port=80,
+ subnet_id=self.subnet['id'],
+ pool_id=pool['id'],
+ tenant_id=self.tenant_id)
+ self.assertEqual('201', resp['status'])
+ vip = body['vip']
+ self.addCleanup(self.admin_client.delete_vip, vip['id'])
+ self.assertIsNotNone(vip['id'])
+ self.assertEqual(self.tenant_id, vip['tenant_id'])
+ resp, body = self.client.show_vip(vip['id'])
+ self.assertEqual('200', resp['status'])
+ show_vip = body['vip']
+ self.assertEqual(vip['id'], show_vip['id'])
+ self.assertEqual(vip['name'], show_vip['name'])
+
+ @test.attr(type='smoke')
+ def test_create_health_monitor_as_admin_for_another_tenant(self):
+ resp, body = (
+ self.admin_client.create_health_monitor(delay=4,
+ max_retries=3,
+ type="TCP",
+ timeout=1,
+ tenant_id=self.tenant_id))
+ self.assertEqual('201', resp['status'])
+ health_monitor = body['health_monitor']
+ self.addCleanup(self.admin_client.delete_health_monitor,
+ health_monitor['id'])
+ self.assertIsNotNone(health_monitor['id'])
+ self.assertEqual(self.tenant_id, health_monitor['tenant_id'])
+ resp, body = self.client.show_health_monitor(health_monitor['id'])
+ self.assertEqual('200', resp['status'])
+ show_health_monitor = body['health_monitor']
+ self.assertEqual(health_monitor['id'], show_health_monitor['id'])
+
+ @test.attr(type='smoke')
+ def test_create_pool_from_admin_user_other_tenant(self):
+ resp, body = self.admin_client.create_pool(
+ name=data_utils.rand_name('pool-'), lb_method="ROUND_ROBIN",
+ protocol="HTTP", subnet_id=self.subnet['id'],
+ tenant_id=self.tenant_id)
+ self.assertEqual('201', resp['status'])
+ pool = body['pool']
+ self.addCleanup(self.admin_client.delete_pool, pool['id'])
+ self.assertIsNotNone(pool['id'])
+ self.assertEqual(self.tenant_id, pool['tenant_id'])
+
+ @test.attr(type='smoke')
+ def test_create_member_from_admin_user_other_tenant(self):
+ resp, body = self.admin_client.create_member(
+ address="10.0.9.47", protocol_port=80, pool_id=self.pool['id'],
+ tenant_id=self.tenant_id)
+ self.assertEqual('201', resp['status'])
+ member = body['member']
+ self.addCleanup(self.admin_client.delete_member, member['id'])
+ self.assertIsNotNone(member['id'])
+ self.assertEqual(self.tenant_id, member['tenant_id'])
+
+
+class LoadBalancerAdminTestXML(LoadBalancerAdminTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/network/test_quotas.py b/tempest/api/network/admin/test_quotas.py
similarity index 86%
rename from tempest/api/network/test_quotas.py
rename to tempest/api/network/admin/test_quotas.py
index 38784d8..a307986 100644
--- a/tempest/api/network/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -15,12 +15,11 @@
from tempest.api.network import base
-from tempest import clients
from tempest.common.utils import data_utils
from tempest import test
-class QuotasTest(base.BaseNetworkTest):
+class QuotasTest(base.BaseAdminNetworkTest):
_interface = 'json'
"""
@@ -32,13 +31,9 @@
update quotas for a specified tenant
reset quotas to default values for a specified tenant
- v2.0 of the API is assumed. It is also assumed that the following
- option is defined in the [service_available] section of etc/tempest.conf:
-
- neutron as True
-
- Finally, it is assumed that the per-tenant quota extension API is
- configured in /etc/neutron/neutron.conf as follows:
+ v2.0 of the API is assumed.
+ It is also assumed that the per-tenant quota extension API is configured
+ in /etc/neutron/neutron.conf as follows:
quota_driver = neutron.db.quota_db.DbQuotaDriver
"""
@@ -49,9 +44,7 @@
if not test.is_extension_enabled('quotas', 'network'):
msg = "quotas extension not enabled."
raise cls.skipException(msg)
- admin_manager = clients.AdminManager()
- cls.admin_client = admin_manager.network_client
- cls.identity_admin_client = admin_manager.identity_client
+ cls.identity_admin_client = cls.os_adm.identity_client
@test.attr(type='gate')
def test_quotas(self):
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 231c4bf..dcd9bff 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -79,9 +79,17 @@
cls.floating_ips = []
cls.metering_labels = []
cls.metering_label_rules = []
+ cls.fw_rules = []
+ cls.fw_policies = []
@classmethod
def tearDownClass(cls):
+ # Clean up firewall policies
+ for fw_policy in cls.fw_policies:
+ cls.client.delete_firewall_policy(fw_policy['id'])
+ # Clean up firewall rules
+ for fw_rule in cls.fw_rules:
+ cls.client.delete_firewall_rule(fw_rule['id'])
# Clean up ike policies
for ikepolicy in cls.ikepolicies:
cls.client.delete_ikepolicy(ikepolicy['id'])
@@ -90,15 +98,11 @@
cls.client.delete_vpnservice(vpnservice['id'])
# Clean up floating IPs
for floating_ip in cls.floating_ips:
- cls.client.delete_floating_ip(floating_ip['id'])
+ cls.client.delete_floatingip(floating_ip['id'])
# Clean up routers
for router in cls.routers:
- resp, body = cls.client.list_router_interfaces(router['id'])
- interfaces = body['ports']
- for i in interfaces:
- cls.client.remove_router_interface_with_subnet_id(
- router['id'], i['fixed_ips'][0]['subnet_id'])
- cls.client.delete_router(router['id'])
+ cls.delete_router(router)
+
# Clean up health monitors
for health_monitor in cls.health_monitors:
cls.client.delete_health_monitor(health_monitor['id'])
@@ -141,7 +145,7 @@
return network
@classmethod
- def create_subnet(cls, network):
+ def create_subnet(cls, network, gateway=None):
"""Wrapper utility that returns a test subnet."""
# The cidr and mask_bits depend on the ip version.
if cls._ip_version == 4:
@@ -152,14 +156,19 @@
mask_bits = CONF.network.tenant_network_v6_mask_bits
# Find a cidr that is not in use yet and create a subnet with it
for subnet_cidr in cidr.subnet(mask_bits):
+ if not gateway:
+ gateway = str(netaddr.IPAddress(subnet_cidr) + 1)
try:
resp, body = cls.client.create_subnet(
network_id=network['id'],
cidr=str(subnet_cidr),
- ip_version=cls._ip_version)
+ ip_version=cls._ip_version,
+ gateway_ip=gateway)
break
except exceptions.BadRequest as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
+ # Unset gateway value if there is an overlapping subnet
+ gateway = None
if not is_overlapping_cidr:
raise
else:
@@ -170,14 +179,22 @@
return subnet
@classmethod
- def create_port(cls, network):
+ def create_port(cls, network, **kwargs):
"""Wrapper utility that returns a test port."""
- resp, body = cls.client.create_port(network_id=network['id'])
+ resp, body = cls.client.create_port(network_id=network['id'],
+ **kwargs)
port = body['port']
cls.ports.append(port)
return port
@classmethod
+ def update_port(cls, port, **kwargs):
+ """Wrapper utility that updates a test port."""
+ resp, body = cls.client.update_port(port['id'],
+ **kwargs)
+ return body['port']
+
+ @classmethod
def create_router(cls, router_name=None, admin_state_up=False,
external_network_id=None, enable_snat=None):
ext_gw_info = {}
@@ -193,11 +210,10 @@
return router
@classmethod
- def create_floating_ip(cls, external_network_id, **kwargs):
+ def create_floatingip(cls, external_network_id):
"""Wrapper utility that returns a test floating IP."""
- resp, body = cls.client.create_floating_ip(
- external_network_id,
- **kwargs)
+ resp, body = cls.client.create_floatingip(
+ floating_network_id=external_network_id)
fip = body['floatingip']
cls.floating_ips.append(fip)
return fip
@@ -277,12 +293,13 @@
"""Wrapper utility that returns a router interface."""
resp, interface = cls.client.add_router_interface_with_subnet_id(
router_id, subnet_id)
+ return interface
@classmethod
def create_vpnservice(cls, subnet_id, router_id):
"""Wrapper utility that returns a test vpn service."""
resp, body = cls.client.create_vpnservice(
- subnet_id, router_id, admin_state_up=True,
+ subnet_id=subnet_id, router_id=router_id, admin_state_up=True,
name=data_utils.rand_name("vpnservice-"))
vpnservice = body['vpnservice']
cls.vpnservices.append(vpnservice)
@@ -291,11 +308,40 @@
@classmethod
def create_ikepolicy(cls, name):
"""Wrapper utility that returns a test ike policy."""
- resp, body = cls.client.create_ikepolicy(name)
+ resp, body = cls.client.create_ikepolicy(name=name)
ikepolicy = body['ikepolicy']
cls.ikepolicies.append(ikepolicy)
return ikepolicy
+ @classmethod
+ def create_firewall_rule(cls, action, protocol):
+ """Wrapper utility that returns a test firewall rule."""
+ resp, body = cls.client.create_firewall_rule(
+ name=data_utils.rand_name("fw-rule"),
+ action=action,
+ protocol=protocol)
+ fw_rule = body['firewall_rule']
+ cls.fw_rules.append(fw_rule)
+ return fw_rule
+
+ @classmethod
+ def create_firewall_policy(cls):
+ """Wrapper utility that returns a test firewall policy."""
+ resp, body = cls.client.create_firewall_policy(
+ name=data_utils.rand_name("fw-policy"))
+ fw_policy = body['firewall_policy']
+ cls.fw_policies.append(fw_policy)
+ return fw_policy
+
+ @classmethod
+ def delete_router(cls, router):
+ resp, body = cls.client.list_router_interfaces(router['id'])
+ interfaces = body['ports']
+ for i in interfaces:
+ cls.client.remove_router_interface_with_subnet_id(
+ router['id'], i['fixed_ips'][0]['subnet_id'])
+ cls.client.delete_router(router['id'])
+
class BaseAdminNetworkTest(BaseNetworkTest):
@@ -311,11 +357,7 @@
raise cls.skipException(msg)
if (CONF.compute.allow_tenant_isolation or
cls.force_tenant_isolation is True):
- creds = cls.isolated_creds.get_admin_creds()
- admin_username, admin_tenant_name, admin_password = creds
- cls.os_adm = clients.Manager(username=admin_username,
- password=admin_password,
- tenant_name=admin_tenant_name,
+ cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
interface=cls._interface)
else:
cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
diff --git a/tempest/api/network/test_extra_dhcp_options.py b/tempest/api/network/test_extra_dhcp_options.py
index ed86d75..371c651 100644
--- a/tempest/api/network/test_extra_dhcp_options.py
+++ b/tempest/api/network/test_extra_dhcp_options.py
@@ -36,6 +36,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ExtraDHCPOptionsTestJSON, cls).setUpClass()
if not test.is_extension_enabled('extra_dhcp_opt', 'network'):
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index b31c090..d0d25ec 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -44,6 +44,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(FloatingIPTestJSON, cls).setUpClass()
if not test.is_extension_enabled('router', 'network'):
@@ -65,16 +66,22 @@
@test.attr(type='smoke')
def test_create_list_show_update_delete_floating_ip(self):
# Creates a floating IP
- created_floating_ip = self.create_floating_ip(
- self.ext_net_id, port_id=self.ports[0]['id'])
+ resp, body = self.client.create_floatingip(
+ floating_network_id=self.ext_net_id, port_id=self.ports[0]['id'])
+ self.assertEqual('201', resp['status'])
+ created_floating_ip = body['floatingip']
+ self.addCleanup(self.client.delete_floatingip,
+ created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['id'])
self.assertIsNotNone(created_floating_ip['tenant_id'])
self.assertIsNotNone(created_floating_ip['floating_ip_address'])
self.assertEqual(created_floating_ip['port_id'], self.ports[0]['id'])
self.assertEqual(created_floating_ip['floating_network_id'],
self.ext_net_id)
+ self.assertIn(created_floating_ip['fixed_ip_address'],
+ [ip['ip_address'] for ip in self.ports[0]['fixed_ips']])
# Verifies the details of a floating_ip
- resp, floating_ip = self.client.show_floating_ip(
+ resp, floating_ip = self.client.show_floatingip(
created_floating_ip['id'])
self.assertEqual('200', resp['status'])
shown_floating_ip = floating_ip['floatingip']
@@ -95,7 +102,7 @@
floatingip_id_list.append(f['id'])
self.assertIn(created_floating_ip['id'], floatingip_id_list)
# Associate floating IP to the other port
- resp, floating_ip = self.client.update_floating_ip(
+ resp, floating_ip = self.client.update_floatingip(
created_floating_ip['id'], port_id=self.ports[1]['id'])
self.assertEqual('200', resp['status'])
updated_floating_ip = floating_ip['floatingip']
@@ -105,7 +112,7 @@
self.assertEqual(updated_floating_ip['router_id'], self.router['id'])
# Disassociate floating IP from the port
- resp, floating_ip = self.client.update_floating_ip(
+ resp, floating_ip = self.client.update_floatingip(
created_floating_ip['id'], port_id=None)
self.assertEqual('200', resp['status'])
updated_floating_ip = floating_ip['floatingip']
@@ -116,17 +123,22 @@
@test.attr(type='smoke')
def test_floating_ip_delete_port(self):
# Create a floating IP
- created_floating_ip = self.create_floating_ip(self.ext_net_id)
+ resp, body = self.client.create_floatingip(
+ floating_network_id=self.ext_net_id)
+ self.assertEqual('201', resp['status'])
+ created_floating_ip = body['floatingip']
+ self.addCleanup(self.client.delete_floatingip,
+ created_floating_ip['id'])
# Create a port
resp, port = self.client.create_port(network_id=self.network['id'])
created_port = port['port']
- resp, floating_ip = self.client.update_floating_ip(
+ resp, floating_ip = self.client.update_floatingip(
created_floating_ip['id'], port_id=created_port['id'])
self.assertEqual('200', resp['status'])
# Delete port
self.client.delete_port(created_port['id'])
# Verifies the details of the floating_ip
- resp, floating_ip = self.client.show_floating_ip(
+ resp, floating_ip = self.client.show_floatingip(
created_floating_ip['id'])
self.assertEqual('200', resp['status'])
shown_floating_ip = floating_ip['floatingip']
@@ -139,8 +151,12 @@
@test.attr(type='smoke')
def test_floating_ip_update_different_router(self):
# Associate a floating IP to a port on a router
- created_floating_ip = self.create_floating_ip(
- self.ext_net_id, port_id=self.ports[1]['id'])
+ resp, body = self.client.create_floatingip(
+ floating_network_id=self.ext_net_id, port_id=self.ports[1]['id'])
+ self.assertEqual('201', resp['status'])
+ created_floating_ip = body['floatingip']
+ self.addCleanup(self.client.delete_floatingip,
+ created_floating_ip['id'])
self.assertEqual(created_floating_ip['router_id'], self.router['id'])
network2 = self.create_network()
subnet2 = self.create_subnet(network2)
@@ -149,7 +165,7 @@
self.create_router_interface(router2['id'], subnet2['id'])
port_other_router = self.create_port(network2)
# Associate floating IP to the other port on another router
- resp, floating_ip = self.client.update_floating_ip(
+ resp, floating_ip = self.client.update_floatingip(
created_floating_ip['id'], port_id=port_other_router['id'])
self.assertEqual('200', resp['status'])
updated_floating_ip = floating_ip['floatingip']
@@ -158,6 +174,24 @@
port_other_router['id'])
self.assertIsNotNone(updated_floating_ip['fixed_ip_address'])
+ @test.attr(type='smoke')
+ def test_create_floating_ip_specifying_a_fixed_ip_address(self):
+ resp, body = self.client.create_floatingip(
+ floating_network_id=self.ext_net_id,
+ port_id=self.ports[1]['id'],
+ fixed_ip_address=self.ports[1]['fixed_ips'][0]['ip_address'])
+ self.assertEqual('201', resp['status'])
+ created_floating_ip = body['floatingip']
+ self.addCleanup(self.client.delete_floatingip,
+ created_floating_ip['id'])
+ self.assertIsNotNone(created_floating_ip['id'])
+ self.assertEqual(created_floating_ip['fixed_ip_address'],
+ self.ports[1]['fixed_ips'][0]['ip_address'])
+ resp, floating_ip = self.client.update_floatingip(
+ created_floating_ip['id'], port_id=None)
+ self.assertEqual('200', resp['status'])
+ self.assertIsNone(floating_ip['floatingip']['port_id'])
+
class FloatingIPTestXML(FloatingIPTestJSON):
_interface = 'xml'
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
new file mode 100644
index 0000000..555cbda
--- /dev/null
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -0,0 +1,235 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest import test
+
+CONF = config.CONF
+
+
+class FWaaSExtensionTestJSON(base.BaseNetworkTest):
+ _interface = 'json'
+
+ """
+ Tests the following operations in the Neutron API using the REST client for
+ Neutron:
+
+ List firewall rules
+ Create firewall rule
+ Update firewall rule
+ Delete firewall rule
+ Show firewall rule
+ List firewall policies
+ Create firewall policy
+ Update firewall policy
+ Delete firewall policy
+ Show firewall policy
+ List firewall
+ Create firewall
+ Update firewall
+ Delete firewall
+ Show firewall
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(FWaaSExtensionTestJSON, cls).setUpClass()
+ if not test.is_extension_enabled('fwaas', 'network'):
+ msg = "FWaaS Extension not enabled."
+ raise cls.skipException(msg)
+ cls.fw_rule = cls.create_firewall_rule("allow", "tcp")
+ cls.fw_policy = cls.create_firewall_policy()
+
+ def _try_delete_policy(self, policy_id):
+ # delete policy, if it exists
+ try:
+ self.client.delete_firewall_policy(policy_id)
+ # if policy is not found, this means it was deleted in the test
+ except exceptions.NotFound:
+ pass
+
+ def _try_delete_firewall(self, fw_id):
+ # delete firewall, if it exists
+ try:
+ self.client.delete_firewall(fw_id)
+ # if firewall is not found, this means it was deleted in the test
+ except exceptions.NotFound:
+ pass
+
+ self.client.wait_for_resource_deletion('firewall', fw_id)
+
+ def _wait_for_active(self, fw_id):
+ def _wait():
+ resp, firewall = self.client.show_firewall(fw_id)
+ self.assertEqual('200', resp['status'])
+ firewall = firewall['firewall']
+ return firewall['status'] == 'ACTIVE'
+
+ if not test.call_until_true(_wait, CONF.network.build_timeout,
+ CONF.network.build_interval):
+ m = 'Timed out waiting for firewall %s to become ACTIVE.' % fw_id
+ raise exceptions.TimeoutException(m)
+
+ @test.attr(type='smoke')
+ def test_list_firewall_rules(self):
+ # List firewall rules
+ resp, fw_rules = self.client.list_firewall_rules()
+ self.assertEqual('200', resp['status'])
+ fw_rules = fw_rules['firewall_rules']
+ self.assertIn((self.fw_rule['id'],
+ self.fw_rule['name'],
+ self.fw_rule['action'],
+ self.fw_rule['protocol'],
+ self.fw_rule['ip_version'],
+ self.fw_rule['enabled']),
+ [(m['id'],
+ m['name'],
+ m['action'],
+ m['protocol'],
+ m['ip_version'],
+ m['enabled']) for m in fw_rules])
+
+ @test.attr(type='smoke')
+ def test_create_update_delete_firewall_rule(self):
+ # Create firewall rule
+ resp, body = self.client.create_firewall_rule(
+ name=data_utils.rand_name("fw-rule"),
+ action="allow",
+ protocol="tcp")
+ self.assertEqual('201', resp['status'])
+ fw_rule_id = body['firewall_rule']['id']
+
+ # Update firewall rule
+ resp, body = self.client.update_firewall_rule(fw_rule_id,
+ shared=True)
+ self.assertEqual('200', resp['status'])
+ self.assertTrue(body["firewall_rule"]['shared'])
+
+ # Delete firewall rule
+ resp, _ = self.client.delete_firewall_rule(fw_rule_id)
+ self.assertEqual('204', resp['status'])
+ # Confirm deletion
+ resp, fw_rules = self.client.list_firewall_rules()
+ self.assertNotIn(fw_rule_id,
+ [m['id'] for m in fw_rules['firewall_rules']])
+
+ @test.attr(type='smoke')
+ def test_show_firewall_rule(self):
+ # show a created firewall rule
+ resp, fw_rule = self.client.show_firewall_rule(self.fw_rule['id'])
+ self.assertEqual('200', resp['status'])
+ for key, value in fw_rule['firewall_rule'].iteritems():
+ self.assertEqual(self.fw_rule[key], value)
+
+ @test.attr(type='smoke')
+ def test_list_firewall_policies(self):
+ resp, fw_policies = self.client.list_firewall_policies()
+ self.assertEqual('200', resp['status'])
+ fw_policies = fw_policies['firewall_policies']
+ self.assertIn((self.fw_policy['id'],
+ self.fw_policy['name'],
+ self.fw_policy['firewall_rules']),
+ [(m['id'],
+ m['name'],
+ m['firewall_rules']) for m in fw_policies])
+
+ @test.attr(type='smoke')
+ def test_create_update_delete_firewall_policy(self):
+ # Create firewall policy
+ resp, body = self.client.create_firewall_policy(
+ name=data_utils.rand_name("fw-policy"))
+ self.assertEqual('201', resp['status'])
+ fw_policy_id = body['firewall_policy']['id']
+ self.addCleanup(self._try_delete_policy, fw_policy_id)
+
+ # Update firewall policy
+ resp, body = self.client.update_firewall_policy(fw_policy_id,
+ shared=True,
+ name="updated_policy")
+ self.assertEqual('200', resp['status'])
+ updated_fw_policy = body["firewall_policy"]
+ self.assertTrue(updated_fw_policy['shared'])
+ self.assertEqual("updated_policy", updated_fw_policy['name'])
+
+ # Delete firewall policy
+ resp, _ = self.client.delete_firewall_policy(fw_policy_id)
+ self.assertEqual('204', resp['status'])
+ # Confirm deletion
+ resp, fw_policies = self.client.list_firewall_policies()
+ fw_policies = fw_policies['firewall_policies']
+ self.assertNotIn(fw_policy_id, [m['id'] for m in fw_policies])
+
+ @test.attr(type='smoke')
+ def test_show_firewall_policy(self):
+ # show a created firewall policy
+ resp, fw_policy = self.client.show_firewall_policy(
+ self.fw_policy['id'])
+ self.assertEqual('200', resp['status'])
+ fw_policy = fw_policy['firewall_policy']
+ for key, value in fw_policy.iteritems():
+ self.assertEqual(self.fw_policy[key], value)
+
+ @test.attr(type='smoke')
+ def test_create_show_delete_firewall(self):
+ # Create tenant network resources required for an ACTIVE firewall
+ network = self.create_network()
+ subnet = self.create_subnet(network)
+ router = self.create_router(
+ data_utils.rand_name('router-'),
+ admin_state_up=True)
+ self.client.add_router_interface_with_subnet_id(
+ router['id'], subnet['id'])
+
+ # Create firewall
+ resp, body = self.client.create_firewall(
+ name=data_utils.rand_name("firewall"),
+ firewall_policy_id=self.fw_policy['id'])
+ self.assertEqual('201', resp['status'])
+ created_firewall = body['firewall']
+ firewall_id = created_firewall['id']
+ self.addCleanup(self._try_delete_firewall, firewall_id)
+
+ self._wait_for_active(firewall_id)
+
+ # show a created firewall
+ resp, firewall = self.client.show_firewall(firewall_id)
+ self.assertEqual('200', resp['status'])
+ firewall = firewall['firewall']
+
+ for key, value in firewall.iteritems():
+ if key == 'status':
+ continue
+ self.assertEqual(created_firewall[key], value)
+
+ # list firewall
+ resp, firewalls = self.client.list_firewalls()
+ self.assertEqual('200', resp['status'])
+ firewalls = firewalls['firewalls']
+ self.assertIn((created_firewall['id'],
+ created_firewall['name'],
+ created_firewall['firewall_policy_id']),
+ [(m['id'],
+ m['name'],
+ m['firewall_policy_id']) for m in firewalls])
+
+ # Delete firewall
+ resp, _ = self.client.delete_firewall(firewall_id)
+ self.assertEqual('204', resp['status'])
+
+
+class FWaaSExtensionTestXML(FWaaSExtensionTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/network/test_load_balancer.py b/tempest/api/network/test_load_balancer.py
index 695dbf8..673fc47 100644
--- a/tempest/api/network/test_load_balancer.py
+++ b/tempest/api/network/test_load_balancer.py
@@ -38,6 +38,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(LoadBalancerTestJSON, cls).setUpClass()
if not test.is_extension_enabled('lbaas', 'network'):
@@ -108,6 +109,7 @@
def test_create_update_delete_pool_vip(self):
# Creates a vip
name = data_utils.rand_name('vip-')
+ address = self.subnet['allocation_pools'][0]['end']
resp, body = self.client.create_pool(
name=data_utils.rand_name("pool-"),
lb_method='ROUND_ROBIN',
@@ -118,16 +120,36 @@
protocol="HTTP",
protocol_port=80,
subnet_id=self.subnet['id'],
- pool_id=pool['id'])
+ pool_id=pool['id'],
+ address=address)
self.assertEqual('201', resp['status'])
vip = body['vip']
vip_id = vip['id']
+ # Confirm VIP's address correctness with a show
+ resp, body = self.client.show_vip(vip_id)
+ self.assertEqual('200', resp['status'])
+ vip = body['vip']
+ self.assertEqual(address, vip['address'])
# Verification of vip update
new_name = "New_vip"
- resp, body = self.client.update_vip(vip_id, name=new_name)
+ new_description = "New description"
+ persistence_type = "HTTP_COOKIE"
+ update_data = {"session_persistence": {
+ "type": persistence_type}}
+ resp, body = self.client.update_vip(vip_id,
+ name=new_name,
+ description=new_description,
+ connection_limit=10,
+ admin_state_up=False,
+ **update_data)
self.assertEqual('200', resp['status'])
updated_vip = body['vip']
- self.assertEqual(updated_vip['name'], new_name)
+ self.assertEqual(new_name, updated_vip['name'])
+ self.assertEqual(new_description, updated_vip['description'])
+ self.assertEqual(10, updated_vip['connection_limit'])
+ self.assertFalse(updated_vip['admin_state_up'])
+ self.assertEqual(persistence_type,
+ updated_vip['session_persistence']['type'])
# Verification of vip delete
resp, body = self.client.delete_vip(vip['id'])
self.assertEqual('204', resp['status'])
@@ -135,10 +157,14 @@
# Verification of pool update
new_name = "New_pool"
resp, body = self.client.update_pool(pool['id'],
- name=new_name)
+ name=new_name,
+ description="new_description",
+ lb_method='LEAST_CONNECTIONS')
self.assertEqual('200', resp['status'])
updated_pool = body['pool']
- self.assertEqual(updated_pool['name'], new_name)
+ self.assertEqual(new_name, updated_pool['name'])
+ self.assertEqual('new_description', updated_pool['description'])
+ self.assertEqual('LEAST_CONNECTIONS', updated_pool['lb_method'])
# Verification of pool delete
resp, body = self.client.delete_pool(pool['id'])
self.assertEqual('204', resp['status'])
@@ -274,6 +300,40 @@
self.assertEqual('204', resp['status'])
@test.attr(type='smoke')
+ def test_create_health_monitor_http_type(self):
+ hm_type = "HTTP"
+ resp, body = self.client.create_health_monitor(delay=4,
+ max_retries=3,
+ type=hm_type,
+ timeout=1)
+ self.assertEqual('201', resp['status'])
+ health_monitor = body['health_monitor']
+ self.addCleanup(self.client.delete_health_monitor,
+ health_monitor['id'])
+ self.assertEqual(hm_type, health_monitor['type'])
+
+ @test.attr(type='smoke')
+ def test_update_health_monitor_http_method(self):
+ resp, body = self.client.create_health_monitor(delay=4,
+ max_retries=3,
+ type="HTTP",
+ timeout=1)
+ self.assertEqual('201', resp['status'])
+ health_monitor = body['health_monitor']
+ self.addCleanup(self.client.delete_health_monitor,
+ health_monitor['id'])
+ resp, body = (self.client.update_health_monitor
+ (health_monitor['id'],
+ http_method="POST",
+ url_path="/home/user",
+ expected_codes="290"))
+ self.assertEqual('200', resp['status'])
+ updated_health_monitor = body['health_monitor']
+ self.assertEqual("POST", updated_health_monitor['http_method'])
+ self.assertEqual("/home/user", updated_health_monitor['url_path'])
+ self.assertEqual("290", updated_health_monitor['expected_codes'])
+
+ @test.attr(type='smoke')
def test_show_health_monitor(self):
# Verifies the details of a health_monitor
resp, body = self.client.show_health_monitor(self.health_monitor['id'])
@@ -322,6 +382,92 @@
self.assertIn("active_connections", stats)
self.assertIn("bytes_out", stats)
+ @test.attr(type='smoke')
+ def test_update_list_of_health_monitors_associated_with_pool(self):
+ resp, _ = (self.client.associate_health_monitor_with_pool
+ (self.health_monitor['id'], self.pool['id']))
+ self.assertEqual('201', resp['status'])
+ resp, _ = self.client.update_health_monitor(
+ self.health_monitor['id'], admin_state_up=False)
+ self.assertEqual('200', resp['status'])
+ resp, body = self.client.show_pool(self.pool['id'])
+ self.assertEqual('200', resp['status'])
+ health_monitors = body['pool']['health_monitors']
+ for health_monitor_id in health_monitors:
+ resp, body = self.client.show_health_monitor(health_monitor_id)
+ self.assertEqual('200', resp['status'])
+ self.assertFalse(body['health_monitor']['admin_state_up'])
+ resp, _ = (self.client.disassociate_health_monitor_with_pool
+ (self.health_monitor['id'], self.pool['id']))
+ self.assertEqual('204', resp['status'])
+
+ @test.attr(type='smoke')
+ def test_update_admin_state_up_of_pool(self):
+ resp, _ = self.client.update_pool(self.pool['id'],
+ admin_state_up=False)
+ self.assertEqual('200', resp['status'])
+ resp, body = self.client.show_pool(self.pool['id'])
+ self.assertEqual('200', resp['status'])
+ pool = body['pool']
+ self.assertFalse(pool['admin_state_up'])
+
+ @test.attr(type='smoke')
+ def test_show_vip_associated_with_pool(self):
+ resp, body = self.client.show_pool(self.pool['id'])
+ self.assertEqual('200', resp['status'])
+ pool = body['pool']
+ resp, body = self.client.show_vip(pool['vip_id'])
+ self.assertEqual('200', resp['status'])
+ vip = body['vip']
+ self.assertEqual(self.vip['name'], vip['name'])
+ self.assertEqual(self.vip['id'], vip['id'])
+
+ @test.attr(type='smoke')
+ def test_show_members_associated_with_pool(self):
+ resp, body = self.client.show_pool(self.pool['id'])
+ self.assertEqual('200', resp['status'])
+ members = body['pool']['members']
+ for member_id in members:
+ resp, body = self.client.show_member(member_id)
+ self.assertEqual('200', resp['status'])
+ self.assertIsNotNone(body['member']['status'])
+ self.assertEqual(member_id, body['member']['id'])
+ self.assertIsNotNone(body['member']['admin_state_up'])
+
+ @test.attr(type='smoke')
+ def test_update_pool_related_to_member(self):
+ # Create new pool
+ resp, body = self.client.create_pool(
+ name=data_utils.rand_name("pool-"),
+ lb_method='ROUND_ROBIN',
+ protocol='HTTP',
+ subnet_id=self.subnet['id'])
+ self.assertEqual('201', resp['status'])
+ new_pool = body['pool']
+ self.addCleanup(self.client.delete_pool, new_pool['id'])
+ # Update member with new pool's id
+ resp, body = self.client.update_member(self.member['id'],
+ pool_id=new_pool['id'])
+ self.assertEqual('200', resp['status'])
+ # Confirm with show that pool_id change
+ resp, body = self.client.show_member(self.member['id'])
+ member = body['member']
+ self.assertEqual(member['pool_id'], new_pool['id'])
+ # Update member with old pool id, this is needed for clean up
+ resp, body = self.client.update_member(self.member['id'],
+ pool_id=self.pool['id'])
+ self.assertEqual('200', resp['status'])
+
+ @test.attr(type='smoke')
+ def test_update_member_weight(self):
+ resp, _ = self.client.update_member(self.member['id'],
+ weight=2)
+ self.assertEqual('200', resp['status'])
+ resp, body = self.client.show_member(self.member['id'])
+ self.assertEqual('200', resp['status'])
+ member = body['member']
+ self.assertEqual(2, member['weight'])
+
class LoadBalancerTestXML(LoadBalancerTestJSON):
_interface = 'xml'
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 88e7238..660b376 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -18,7 +18,8 @@
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import config
-from tempest.test import attr
+from tempest import exceptions
+from tempest import test
CONF = config.CONF
@@ -36,13 +37,9 @@
create a subnet for a tenant
list tenant's subnets
show a tenant subnet details
- port create
- port delete
- port list
- port show
- port update
network update
subnet update
+ delete a network also deletes its subnets
All subnet tests are run once with ipv4 and once with ipv6.
@@ -61,15 +58,15 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(NetworksTestJSON, cls).setUpClass()
cls.network = cls.create_network()
cls.name = cls.network['name']
cls.subnet = cls.create_subnet(cls.network)
cls.cidr = cls.subnet['cidr']
- cls.port = cls.create_port(cls.network)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_create_update_delete_network_subnet(self):
# Create a network
name = data_utils.rand_name('network-')
@@ -100,7 +97,7 @@
resp, body = self.client.delete_network(net_id)
self.assertEqual('204', resp['status'])
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_show_network(self):
# Verify the details of a network
resp, body = self.client.show_network(self.network['id'])
@@ -109,19 +106,19 @@
for key in ['id', 'name']:
self.assertEqual(network[key], self.network[key])
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_show_network_fields(self):
# Verify specific fields of a network
- field_list = [('fields', 'id'), ('fields', 'name'), ]
+ fields = ['id', 'name']
resp, body = self.client.show_network(self.network['id'],
- field_list=field_list)
+ fields=fields)
self.assertEqual('200', resp['status'])
network = body['network']
- self.assertEqual(len(network), len(field_list))
- for label, field_name in field_list:
+ self.assertEqual(sorted(network.keys()), sorted(fields))
+ for field_name in fields:
self.assertEqual(network[field_name], self.network[field_name])
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_list_networks(self):
# Verify the network exists in the list of all networks
resp, body = self.client.list_networks()
@@ -130,18 +127,18 @@
if network['id'] == self.network['id']]
self.assertNotEmpty(networks, "Created network not found in the list")
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_list_networks_fields(self):
# Verify specific fields of the networks
- resp, body = self.client.list_networks(fields='id')
+ fields = ['id', 'name']
+ resp, body = self.client.list_networks(fields=fields)
self.assertEqual('200', resp['status'])
networks = body['networks']
self.assertNotEmpty(networks, "Network list returned is empty")
for network in networks:
- self.assertEqual(len(network), 1)
- self.assertIn('id', network)
+ self.assertEqual(sorted(network.keys()), sorted(fields))
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_show_subnet(self):
# Verify the details of a subnet
resp, body = self.client.show_subnet(self.subnet['id'])
@@ -152,19 +149,19 @@
self.assertIn(key, subnet)
self.assertEqual(subnet[key], self.subnet[key])
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_show_subnet_fields(self):
# Verify specific fields of a subnet
- field_list = [('fields', 'id'), ('fields', 'cidr'), ]
+ fields = ['id', 'network_id']
resp, body = self.client.show_subnet(self.subnet['id'],
- field_list=field_list)
+ fields=fields)
self.assertEqual('200', resp['status'])
subnet = body['subnet']
- self.assertEqual(len(subnet), len(field_list))
- for label, field_name in field_list:
+ self.assertEqual(sorted(subnet.keys()), sorted(fields))
+ for field_name in fields:
self.assertEqual(subnet[field_name], self.subnet[field_name])
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_list_subnets(self):
# Verify the subnet exists in the list of all subnets
resp, body = self.client.list_subnets()
@@ -173,100 +170,82 @@
if subnet['id'] == self.subnet['id']]
self.assertNotEmpty(subnets, "Created subnet not found in the list")
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_list_subnets_fields(self):
# Verify specific fields of subnets
- resp, body = self.client.list_subnets(fields='id')
+ fields = ['id', 'network_id']
+ resp, body = self.client.list_subnets(fields=fields)
self.assertEqual('200', resp['status'])
subnets = body['subnets']
self.assertNotEmpty(subnets, "Subnet list returned is empty")
for subnet in subnets:
- self.assertEqual(len(subnet), 1)
- self.assertIn('id', subnet)
+ self.assertEqual(sorted(subnet.keys()), sorted(fields))
- @attr(type='smoke')
- def test_create_update_delete_port(self):
- # Verify port creation
- resp, body = self.client.create_port(network_id=self.network['id'])
+ def _try_delete_network(self, net_id):
+ # delete network, if it exists
+ try:
+ self.client.delete_network(net_id)
+ # if network is not found, this means it was deleted in the test
+ except exceptions.NotFound:
+ pass
+
+ @test.attr(type='smoke')
+ def test_delete_network_with_subnet(self):
+ # Creates a network
+ name = data_utils.rand_name('network-')
+ resp, body = self.client.create_network(name=name)
self.assertEqual('201', resp['status'])
- port = body['port']
- self.assertTrue(port['admin_state_up'])
- # Verify port update
- new_name = "New_Port"
- resp, body = self.client.update_port(
- port['id'],
- name=new_name,
- admin_state_up=False)
- self.assertEqual('200', resp['status'])
- updated_port = body['port']
- self.assertEqual(updated_port['name'], new_name)
- self.assertFalse(updated_port['admin_state_up'])
- # Verify port deletion
- resp, body = self.client.delete_port(port['id'])
+ network = body['network']
+ net_id = network['id']
+ self.addCleanup(self._try_delete_network, net_id)
+
+ # Find a cidr that is not in use yet and create a subnet with it
+ subnet = self.create_subnet(network)
+ subnet_id = subnet['id']
+
+ # Delete network while the subnet still exists
+ resp, body = self.client.delete_network(net_id)
self.assertEqual('204', resp['status'])
- @attr(type='smoke')
- def test_show_port(self):
- # Verify the details of port
- resp, body = self.client.show_port(self.port['id'])
- self.assertEqual('200', resp['status'])
- port = body['port']
- self.assertIn('id', port)
- self.assertEqual(port['id'], self.port['id'])
+ # Verify that the subnet got automatically deleted.
+ self.assertRaises(exceptions.NotFound, self.client.show_subnet,
+ subnet_id)
- @attr(type='smoke')
- def test_show_port_fields(self):
- # Verify specific fields of a port
- field_list = [('fields', 'id'), ]
- resp, body = self.client.show_port(self.port['id'],
- field_list=field_list)
- self.assertEqual('200', resp['status'])
- port = body['port']
- self.assertEqual(len(port), len(field_list))
- for label, field_name in field_list:
- self.assertEqual(port[field_name], self.port[field_name])
+ # Since create_subnet adds the subnet to the delete list, and it is
+ # is actually deleted here - this will create and issue, hence remove
+ # it from the list.
+ self.subnets.pop()
- @attr(type='smoke')
- def test_list_ports(self):
- # Verify the port exists in the list of all ports
- resp, body = self.client.list_ports()
- self.assertEqual('200', resp['status'])
- ports = [port['id'] for port in body['ports']
- if port['id'] == self.port['id']]
- self.assertNotEmpty(ports, "Created port not found in the list")
+ @test.attr(type='smoke')
+ def test_create_delete_subnet_with_gw(self):
+ gateway = '10.100.0.13'
+ name = data_utils.rand_name('network-')
+ resp, body = self.client.create_network(name=name)
+ self.assertEqual('201', resp['status'])
+ network = body['network']
+ net_id = network['id']
+ subnet = self.create_subnet(network, gateway)
+ # Verifies Subnet GW in IPv4
+ self.assertEqual(subnet['gateway_ip'], gateway)
+ # Delete network and subnet
+ resp, body = self.client.delete_network(net_id)
+ self.assertEqual('204', resp['status'])
+ self.subnets.pop()
- @attr(type='smoke')
- def test_port_list_filter_by_router_id(self):
- # Create a router
- network = self.create_network()
- self.create_subnet(network)
- router = self.create_router(data_utils.rand_name('router-'))
- resp, port = self.client.create_port(network_id=network['id'])
- # Add router interface to port created above
- resp, interface = self.client.add_router_interface_with_port_id(
- router['id'], port['port']['id'])
- self.addCleanup(self.client.remove_router_interface_with_port_id,
- router['id'], port['port']['id'])
- # List ports filtered by router_id
- resp, port_list = self.client.list_ports(
- device_id=router['id'])
- self.assertEqual('200', resp['status'])
- ports = port_list['ports']
- self.assertEqual(len(ports), 1)
- self.assertEqual(ports[0]['id'], port['port']['id'])
- self.assertEqual(ports[0]['device_id'], router['id'])
-
- @attr(type='smoke')
- def test_list_ports_fields(self):
- # Verify specific fields of ports
- resp, body = self.client.list_ports(fields='id')
- self.assertEqual('200', resp['status'])
- ports = body['ports']
- self.assertNotEmpty(ports, "Port list returned is empty")
- # Asserting the fields returned are correct
- for port in ports:
- self.assertEqual(len(port), 1)
- self.assertIn('id', port)
+ @test.attr(type='smoke')
+ def test_create_delete_subnet_without_gw(self):
+ name = data_utils.rand_name('network-')
+ resp, body = self.client.create_network(name=name)
+ self.assertEqual('201', resp['status'])
+ network = body['network']
+ net_id = network['id']
+ subnet = self.create_subnet(network)
+ # Verifies Subnet GW in IPv4
+ self.assertEqual(subnet['gateway_ip'], '10.100.0.1')
+ # Delete network and subnet
+ resp, body = self.client.delete_network(net_id)
+ self.assertEqual('204', resp['status'])
+ self.subnets.pop()
class NetworksTestXML(NetworksTestJSON):
@@ -296,6 +275,7 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(BulkNetworkOpsTestJSON, cls).setUpClass()
cls.network1 = cls.create_network()
@@ -331,7 +311,7 @@
for n in created_ports:
self.assertNotIn(n['id'], ports_list)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_bulk_create_delete_network(self):
# Creates 2 networks in one request
network_names = [data_utils.rand_name('network-'),
@@ -347,7 +327,7 @@
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], networks_list)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_bulk_create_delete_subnet(self):
# Creates 2 subnets in one request
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
@@ -379,7 +359,7 @@
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], subnets_list)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_bulk_create_delete_port(self):
# Creates 2 ports in one request
networks = [self.network1['id'], self.network2['id']]
@@ -415,11 +395,41 @@
@classmethod
def setUpClass(cls):
- super(NetworksIpV6TestJSON, cls).setUpClass()
if not CONF.network_feature_enabled.ipv6:
- cls.tearDownClass()
skip_msg = "IPv6 Tests are disabled."
raise cls.skipException(skip_msg)
+ super(NetworksIpV6TestJSON, cls).setUpClass()
+
+ @test.attr(type='smoke')
+ def test_create_delete_subnet_with_gw(self):
+ gateway = '2003::2'
+ name = data_utils.rand_name('network-')
+ resp, body = self.client.create_network(name=name)
+ self.assertEqual('201', resp['status'])
+ network = body['network']
+ net_id = network['id']
+ subnet = self.create_subnet(network, gateway)
+ # Verifies Subnet GW in IPv6
+ self.assertEqual(subnet['gateway_ip'], gateway)
+ # Delete network and subnet
+ resp, body = self.client.delete_network(net_id)
+ self.assertEqual('204', resp['status'])
+ self.subnets.pop()
+
+ @test.attr(type='smoke')
+ def test_create_delete_subnet_without_gw(self):
+ name = data_utils.rand_name('network-')
+ resp, body = self.client.create_network(name=name)
+ self.assertEqual('201', resp['status'])
+ network = body['network']
+ net_id = network['id']
+ subnet = self.create_subnet(network)
+ # Verifies Subnet GW in IPv6
+ self.assertEqual(subnet['gateway_ip'], '2003::1')
+ # Delete network and subnet
+ resp, body = self.client.delete_network(net_id)
+ self.assertEqual('204', resp['status'])
+ self.subnets.pop()
class NetworksIpV6TestXML(NetworksIpV6TestJSON):
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
new file mode 100644
index 0000000..e6e6ea1
--- /dev/null
+++ b/tempest/api/network/test_ports.py
@@ -0,0 +1,299 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import socket
+
+from tempest.api.network import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class PortsTestJSON(base.BaseNetworkTest):
+ _interface = 'json'
+
+ """
+ Test the following operations for ports:
+
+ port create
+ port delete
+ port list
+ port show
+ port update
+ """
+
+ @classmethod
+ @test.safe_setup
+ def setUpClass(cls):
+ super(PortsTestJSON, cls).setUpClass()
+ cls.network = cls.create_network()
+ cls.port = cls.create_port(cls.network)
+
+ def _delete_port(self, port_id):
+ resp, body = self.client.delete_port(port_id)
+ self.assertEqual('204', resp['status'])
+ resp, body = self.client.list_ports()
+ self.assertEqual('200', resp['status'])
+ ports_list = body['ports']
+ self.assertFalse(port_id in [n['id'] for n in ports_list])
+
+ @test.attr(type='smoke')
+ def test_create_update_delete_port(self):
+ # Verify port creation
+ resp, body = self.client.create_port(network_id=self.network['id'])
+ self.assertEqual('201', resp['status'])
+ port = body['port']
+ # Schedule port deletion with verification upon test completion
+ self.addCleanup(self._delete_port, port['id'])
+ self.assertTrue(port['admin_state_up'])
+ # Verify port update
+ new_name = "New_Port"
+ resp, body = self.client.update_port(
+ port['id'],
+ name=new_name,
+ admin_state_up=False)
+ self.assertEqual('200', resp['status'])
+ updated_port = body['port']
+ self.assertEqual(updated_port['name'], new_name)
+ self.assertFalse(updated_port['admin_state_up'])
+
+ @test.attr(type='smoke')
+ def test_show_port(self):
+ # Verify the details of port
+ resp, body = self.client.show_port(self.port['id'])
+ self.assertEqual('200', resp['status'])
+ port = body['port']
+ self.assertIn('id', port)
+ self.assertEqual(port['id'], self.port['id'])
+ self.assertEqual(self.port['admin_state_up'], port['admin_state_up'])
+ self.assertEqual(self.port['device_id'], port['device_id'])
+ self.assertEqual(self.port['device_owner'], port['device_owner'])
+ self.assertEqual(self.port['mac_address'], port['mac_address'])
+ self.assertEqual(self.port['name'], port['name'])
+ self.assertEqual(self.port['security_groups'],
+ port['security_groups'])
+ self.assertEqual(self.port['network_id'], port['network_id'])
+ self.assertEqual(self.port['security_groups'],
+ port['security_groups'])
+ self.assertEqual(port['fixed_ips'], [])
+
+ @test.attr(type='smoke')
+ def test_show_port_fields(self):
+ # Verify specific fields of a port
+ fields = ['id', 'mac_address']
+ resp, body = self.client.show_port(self.port['id'],
+ fields=fields)
+ self.assertEqual('200', resp['status'])
+ port = body['port']
+ self.assertEqual(sorted(port.keys()), sorted(fields))
+ for field_name in fields:
+ self.assertEqual(port[field_name], self.port[field_name])
+
+ @test.attr(type='smoke')
+ def test_list_ports(self):
+ # Verify the port exists in the list of all ports
+ resp, body = self.client.list_ports()
+ self.assertEqual('200', resp['status'])
+ ports = [port['id'] for port in body['ports']
+ if port['id'] == self.port['id']]
+ self.assertNotEmpty(ports, "Created port not found in the list")
+
+ @test.attr(type='smoke')
+ def test_port_list_filter_by_router_id(self):
+ # Create a router
+ network = self.create_network()
+ self.create_subnet(network)
+ router = self.create_router(data_utils.rand_name('router-'))
+ resp, port = self.client.create_port(network_id=network['id'])
+ # Add router interface to port created above
+ resp, interface = self.client.add_router_interface_with_port_id(
+ router['id'], port['port']['id'])
+ self.addCleanup(self.client.remove_router_interface_with_port_id,
+ router['id'], port['port']['id'])
+ # List ports filtered by router_id
+ resp, port_list = self.client.list_ports(
+ device_id=router['id'])
+ self.assertEqual('200', resp['status'])
+ ports = port_list['ports']
+ self.assertEqual(len(ports), 1)
+ self.assertEqual(ports[0]['id'], port['port']['id'])
+ self.assertEqual(ports[0]['device_id'], router['id'])
+
+ @test.attr(type='smoke')
+ def test_list_ports_fields(self):
+ # Verify specific fields of ports
+ fields = ['id', 'mac_address']
+ resp, body = self.client.list_ports(fields=fields)
+ self.assertEqual('200', resp['status'])
+ ports = body['ports']
+ self.assertNotEmpty(ports, "Port list returned is empty")
+ # Asserting the fields returned are correct
+ for port in ports:
+ self.assertEqual(sorted(fields), sorted(port.keys()))
+
+ @test.attr(type='smoke')
+ def test_update_port_with_second_ip(self):
+ # Create a network with two subnets
+ network = self.create_network()
+ subnet_1 = self.create_subnet(network)
+ subnet_2 = self.create_subnet(network)
+ fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
+ fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
+
+ # Create a port with a single IP address from first subnet
+ port = self.create_port(network,
+ fixed_ips=fixed_ip_1)
+ self.assertEqual(1, len(port['fixed_ips']))
+
+ # Update the port with a second IP address from second subnet
+ fixed_ips = fixed_ip_1 + fixed_ip_2
+ port = self.update_port(port, fixed_ips=fixed_ips)
+ self.assertEqual(2, len(port['fixed_ips']))
+
+ # Update the port to return to a single IP address
+ port = self.update_port(port, fixed_ips=fixed_ip_1)
+ self.assertEqual(1, len(port['fixed_ips']))
+
+
+class PortsTestXML(PortsTestJSON):
+ _interface = 'xml'
+
+
+class PortsAdminExtendedAttrsTestJSON(base.BaseAdminNetworkTest):
+ _interface = 'json'
+
+ @classmethod
+ @test.safe_setup
+ def setUpClass(cls):
+ super(PortsAdminExtendedAttrsTestJSON, cls).setUpClass()
+ cls.identity_client = cls._get_identity_admin_client()
+ cls.tenant = cls.identity_client.get_tenant_by_name(
+ CONF.identity.tenant_name)
+ cls.network = cls.create_network()
+ cls.host_id = socket.gethostname()
+
+ @test.attr(type='smoke')
+ def test_create_port_binding_ext_attr(self):
+ post_body = {"network_id": self.network['id'],
+ "binding:host_id": self.host_id}
+ resp, body = self.admin_client.create_port(**post_body)
+ self.assertEqual('201', resp['status'])
+ port = body['port']
+ self.addCleanup(self.admin_client.delete_port, port['id'])
+ host_id = port['binding:host_id']
+ self.assertIsNotNone(host_id)
+ self.assertEqual(self.host_id, host_id)
+
+ @test.attr(type='smoke')
+ def test_update_port_binding_ext_attr(self):
+ post_body = {"network_id": self.network['id']}
+ resp, body = self.admin_client.create_port(**post_body)
+ self.assertEqual('201', resp['status'])
+ port = body['port']
+ self.addCleanup(self.admin_client.delete_port, port['id'])
+ update_body = {"binding:host_id": self.host_id}
+ resp, body = self.admin_client.update_port(port['id'], **update_body)
+ self.assertEqual('200', resp['status'])
+ updated_port = body['port']
+ host_id = updated_port['binding:host_id']
+ self.assertIsNotNone(host_id)
+ self.assertEqual(self.host_id, host_id)
+
+ @test.attr(type='smoke')
+ def test_list_ports_binding_ext_attr(self):
+ # Create a new port
+ post_body = {"network_id": self.network['id']}
+ resp, body = self.admin_client.create_port(**post_body)
+ self.assertEqual('201', resp['status'])
+ port = body['port']
+ self.addCleanup(self.admin_client.delete_port, port['id'])
+
+ # Update the port's binding attributes so that is now 'bound'
+ # to a host
+ update_body = {"binding:host_id": self.host_id}
+ resp, _ = self.admin_client.update_port(port['id'], **update_body)
+ self.assertEqual('200', resp['status'])
+
+ # List all ports, ensure new port is part of list and its binding
+ # attributes are set and accurate
+ resp, body = self.admin_client.list_ports()
+ self.assertEqual('200', resp['status'])
+ ports_list = body['ports']
+ pids_list = [p['id'] for p in ports_list]
+ self.assertIn(port['id'], pids_list)
+ listed_port = [p for p in ports_list if p['id'] == port['id']]
+ self.assertEqual(1, len(listed_port),
+ 'Multiple ports listed with id %s in ports listing: '
+ '%s' % (port['id'], ports_list))
+ self.assertEqual(self.host_id, listed_port[0]['binding:host_id'])
+
+ @test.attr(type='smoke')
+ def test_show_port_binding_ext_attr(self):
+ resp, body = self.admin_client.create_port(
+ network_id=self.network['id'])
+ self.assertEqual('201', resp['status'])
+ port = body['port']
+ self.addCleanup(self.admin_client.delete_port, port['id'])
+ resp, body = self.admin_client.show_port(port['id'])
+ self.assertEqual('200', resp['status'])
+ show_port = body['port']
+ self.assertEqual(port['binding:host_id'],
+ show_port['binding:host_id'])
+ self.assertEqual(port['binding:vif_type'],
+ show_port['binding:vif_type'])
+ self.assertEqual(port['binding:vif_details'],
+ show_port['binding:vif_details'])
+
+
+class PortsAdminExtendedAttrsTestXML(PortsAdminExtendedAttrsTestJSON):
+ _interface = 'xml'
+
+
+class PortsIpV6TestJSON(PortsTestJSON):
+ _ip_version = 6
+ _tenant_network_cidr = CONF.network.tenant_network_v6_cidr
+ _tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
+
+ @classmethod
+ def setUpClass(cls):
+ super(PortsIpV6TestJSON, cls).setUpClass()
+ if not CONF.network_feature_enabled.ipv6:
+ cls.tearDownClass()
+ skip_msg = "IPv6 Tests are disabled."
+ raise cls.skipException(skip_msg)
+
+
+class PortsIpV6TestXML(PortsIpV6TestJSON):
+ _interface = 'xml'
+
+
+class PortsAdminExtendedAttrsIpV6TestJSON(PortsAdminExtendedAttrsTestJSON):
+ _ip_version = 6
+ _tenant_network_cidr = CONF.network.tenant_network_v6_cidr
+ _tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
+
+ @classmethod
+ def setUpClass(cls):
+ if not CONF.network_feature_enabled.ipv6:
+ skip_msg = "IPv6 Tests are disabled."
+ raise cls.skipException(skip_msg)
+ super(PortsAdminExtendedAttrsIpV6TestJSON, cls).setUpClass()
+
+
+class PortsAdminExtendedAttrsIpV6TestXML(
+ PortsAdminExtendedAttrsIpV6TestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 2657031..7605b8a 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -36,6 +36,18 @@
admin_manager = clients.AdminManager()
cls.identity_admin_client = admin_manager.identity_client
+ def _cleanup_router(self, router):
+ self.delete_router(router)
+ self.routers.remove(router)
+
+ def _create_router(self, name, admin_state_up=False,
+ external_network_id=None, enable_snat=None):
+ # associate a cleanup with created routers to avoid quota limits
+ router = self.create_router(name, admin_state_up,
+ external_network_id, enable_snat)
+ self.addCleanup(self._cleanup_router, router)
+ return router
+
@test.attr(type='smoke')
def test_create_show_list_update_delete_router(self):
# Create a router
@@ -102,7 +114,7 @@
def test_add_remove_router_interface_with_subnet_id(self):
network = self.create_network()
subnet = self.create_subnet(network)
- router = self.create_router(data_utils.rand_name('router-'))
+ router = self._create_router(data_utils.rand_name('router-'))
# Add router interface with subnet id
resp, interface = self.client.add_router_interface_with_subnet_id(
router['id'], subnet['id'])
@@ -121,7 +133,7 @@
def test_add_remove_router_interface_with_port_id(self):
network = self.create_network()
self.create_subnet(network)
- router = self.create_router(data_utils.rand_name('router-'))
+ router = self._create_router(data_utils.rand_name('router-'))
resp, port_body = self.client.create_port(
network_id=network['id'])
# add router interface to port created above
@@ -164,7 +176,7 @@
@test.attr(type='smoke')
def test_update_router_set_gateway(self):
- router = self.create_router(data_utils.rand_name('router-'))
+ router = self._create_router(data_utils.rand_name('router-'))
self.client.update_router(
router['id'],
external_gateway_info={
@@ -180,7 +192,7 @@
@test.requires_ext(extension='ext-gw-mode', service='network')
@test.attr(type='smoke')
def test_update_router_set_gateway_with_snat_explicit(self):
- router = self.create_router(data_utils.rand_name('router-'))
+ router = self._create_router(data_utils.rand_name('router-'))
self.admin_client.update_router_with_snat_gw_info(
router['id'],
external_gateway_info={
@@ -195,7 +207,7 @@
@test.requires_ext(extension='ext-gw-mode', service='network')
@test.attr(type='smoke')
def test_update_router_set_gateway_without_snat(self):
- router = self.create_router(data_utils.rand_name('router-'))
+ router = self._create_router(data_utils.rand_name('router-'))
self.admin_client.update_router_with_snat_gw_info(
router['id'],
external_gateway_info={
@@ -209,7 +221,7 @@
@test.attr(type='smoke')
def test_update_router_unset_gateway(self):
- router = self.create_router(
+ router = self._create_router(
data_utils.rand_name('router-'),
external_network_id=CONF.network.public_network_id)
self.client.update_router(router['id'], external_gateway_info={})
@@ -223,7 +235,7 @@
@test.requires_ext(extension='ext-gw-mode', service='network')
@test.attr(type='smoke')
def test_update_router_reset_gateway_without_snat(self):
- router = self.create_router(
+ router = self._create_router(
data_utils.rand_name('router-'),
external_network_id=CONF.network.public_network_id)
self.admin_client.update_router_with_snat_gw_info(
@@ -244,22 +256,38 @@
self.name = self.network['name']
self.subnet = self.create_subnet(self.network)
# Add router interface with subnet id
- self.router = self.create_router(data_utils.rand_name('router-'), True)
+ self.router = self._create_router(
+ data_utils.rand_name('router-'), True)
self.create_router_interface(self.router['id'], self.subnet['id'])
self.addCleanup(
self._delete_extra_routes,
self.router['id'])
- # Update router extra route
+ # Update router extra route, second ip of the range is
+ # used as next hop
cidr = netaddr.IPNetwork(self.subnet['cidr'])
+ next_hop = str(cidr[2])
+ destination = str(self.subnet['cidr'])
resp, extra_route = self.client.update_extra_routes(
- self.router['id'], str(cidr[0]), str(self.subnet['cidr']))
+ self.router['id'], next_hop, destination)
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(1, len(extra_route['router']['routes']))
+ self.assertEqual(destination,
+ extra_route['router']['routes'][0]['destination'])
+ self.assertEqual(next_hop,
+ extra_route['router']['routes'][0]['nexthop'])
+ resp, show_body = self.client.show_router(self.router['id'])
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(destination,
+ show_body['router']['routes'][0]['destination'])
+ self.assertEqual(next_hop,
+ show_body['router']['routes'][0]['nexthop'])
def _delete_extra_routes(self, router_id):
resp, _ = self.client.delete_extra_routes(router_id)
@test.attr(type='smoke')
def test_update_router_admin_state(self):
- self.router = self.create_router(data_utils.rand_name('router-'))
+ self.router = self._create_router(data_utils.rand_name('router-'))
self.assertFalse(self.router['admin_state_up'])
# Update router admin state
resp, update_body = self.client.update_router(self.router['id'],
@@ -275,7 +303,7 @@
network = self.create_network()
subnet01 = self.create_subnet(network)
subnet02 = self.create_subnet(network)
- router = self.create_router(data_utils.rand_name('router-'))
+ router = self._create_router(data_utils.rand_name('router-'))
interface01 = self._add_router_interface_with_subnet_id(router['id'],
subnet01['id'])
self._verify_router_interface(router['id'], subnet01['id'],
diff --git a/tempest/api/network/test_routers_negative.py b/tempest/api/network/test_routers_negative.py
index e6ad4de..91ab9d6 100644
--- a/tempest/api/network/test_routers_negative.py
+++ b/tempest/api/network/test_routers_negative.py
@@ -23,6 +23,7 @@
_interface = 'json'
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(RoutersNegativeTest, cls).setUpClass()
if not test.is_extension_enabled('router', 'network'):
diff --git a/tempest/api/network/test_vpnaas_extensions.py b/tempest/api/network/test_vpnaas_extensions.py
index 78bc80a..a49e944 100644
--- a/tempest/api/network/test_vpnaas_extensions.py
+++ b/tempest/api/network/test_vpnaas_extensions.py
@@ -21,7 +21,7 @@
CONF = config.CONF
-class VPNaaSJSON(base.BaseNetworkTest):
+class VPNaaSTestJSON(base.BaseNetworkTest):
_interface = 'json'
"""
@@ -37,11 +37,12 @@
"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
if not test.is_extension_enabled('vpnaas', 'network'):
msg = "vpnaas extension not enabled."
raise cls.skipException(msg)
- super(VPNaaSJSON, cls).setUpClass()
+ super(VPNaaSTestJSON, cls).setUpClass()
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.router = cls.create_router(
@@ -81,8 +82,8 @@
def test_create_update_delete_vpn_service(self):
# Creates a VPN service
name = data_utils.rand_name('vpn-service-')
- resp, body = self.client.create_vpnservice(self.subnet['id'],
- self.router['id'],
+ resp, body = self.client.create_vpnservice(subnet_id=self.subnet['id'],
+ router_id=self.router['id'],
name=name,
admin_state_up=True)
self.assertEqual('201', resp['status'])
@@ -133,7 +134,7 @@
# Creates a IKE policy
name = data_utils.rand_name('ike-policy-')
resp, body = (self.client.create_ikepolicy(
- name,
+ name=name,
ike_version="v1",
encryption_algorithm="aes-128",
auth_algorithm="sha1"))
@@ -175,3 +176,7 @@
ikepolicy['phase1_negotiation_mode'])
self.assertEqual(self.ikepolicy['ike_version'],
ikepolicy['ike_version'])
+
+
+class VPNaaSTestXML(VPNaaSTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 45c895b..6b18182 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -38,23 +38,12 @@
cls.__name__, network_resources=cls.network_resources)
if CONF.compute.allow_tenant_isolation:
# Get isolated creds for normal user
- creds = cls.isolated_creds.get_primary_creds()
- username, tenant_name, password = creds
- cls.os = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name)
+ cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
# Get isolated creds for admin user
- admin_creds = cls.isolated_creds.get_admin_creds()
- admin_username, admin_tenant_name, admin_password = admin_creds
- cls.os_admin = clients.Manager(username=admin_username,
- password=admin_password,
- tenant_name=admin_tenant_name)
+ cls.os_admin = clients.Manager(
+ cls.isolated_creds.get_admin_creds())
# Get isolated creds for alt user
- alt_creds = cls.isolated_creds.get_alt_creds()
- alt_username, alt_tenant, alt_password = alt_creds
- cls.os_alt = clients.Manager(username=alt_username,
- password=alt_password,
- tenant_name=alt_tenant)
+ cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
# Add isolated users to operator role so that they can create a
# container in swift.
cls._assign_member_role()
@@ -92,8 +81,8 @@
@classmethod
def _assign_member_role(cls):
- primary_user = cls.isolated_creds.get_primary_user()
- alt_user = cls.isolated_creds.get_alt_user()
+ primary_creds = cls.isolated_creds.get_primary_creds()
+ alt_creds = cls.isolated_creds.get_alt_creds()
swift_role = CONF.object_storage.operator_role
try:
resp, roles = cls.os_admin.identity_client.list_roles()
@@ -101,9 +90,9 @@
except StopIteration:
msg = "No role named %s found" % swift_role
raise exceptions.NotFound(msg)
- for user in [primary_user, alt_user]:
- cls.os_admin.identity_client.assign_user_role(user['tenantId'],
- user['id'],
+ for creds in [primary_creds, alt_creds]:
+ cls.os_admin.identity_client.assign_user_role(creds.tenant_id,
+ creds.user_id,
role['id'])
@classmethod
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index b14adc0..19e3068 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -27,6 +27,7 @@
class AccountQuotasTest(base.BaseObjectTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(AccountQuotasTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -34,10 +35,7 @@
cls.data.setup_test_user()
- cls.os_reselleradmin = clients.Manager(
- cls.data.test_user,
- cls.data.test_password,
- cls.data.test_tenant)
+ cls.os_reselleradmin = clients.Manager(cls.data.test_credentials)
# Retrieve the ResellerAdmin role id
reseller_role_id = None
@@ -49,15 +47,11 @@
msg = "No ResellerAdmin role found"
raise exceptions.NotFound(msg)
- # Retrieve the ResellerAdmin tenant id
- _, users = cls.os_admin.identity_client.get_users()
- reseller_user_id = next(usr['id'] for usr in users if usr['name']
- == cls.data.test_user)
+ # Retrieve the ResellerAdmin user id
+ reseller_user_id = cls.data.test_credentials.user_id
# Retrieve the ResellerAdmin tenant id
- _, tenants = cls.os_admin.identity_client.list_tenants()
- reseller_tenant_id = next(tnt['id'] for tnt in tenants if tnt['name']
- == cls.data.test_tenant)
+ reseller_tenant_id = cls.data.test_credentials.tenant_id
# Assign the newly created user the appropriate ResellerAdmin role
cls.os_admin.identity_client.assign_user_role(
@@ -68,7 +62,7 @@
# Retrieve a ResellerAdmin auth data and use it to set a quota
# on the client's account
cls.reselleradmin_auth_data = \
- cls.os_reselleradmin.get_auth_provider().auth_data
+ cls.os_reselleradmin.auth_provider.auth_data
def setUp(self):
super(AccountQuotasTest, self).setUp()
@@ -82,7 +76,8 @@
# Set a quota of 20 bytes on the user's account before each test
headers = {"X-Account-Meta-Quota-Bytes": "20"}
- self.os.custom_account_client.request("POST", "", headers, "")
+ self.os.custom_account_client.request("POST", url="", headers=headers,
+ body="")
def tearDown(self):
# Set the reselleradmin auth in headers for next custom_account_client
@@ -94,12 +89,14 @@
# remove the quota from the container
headers = {"X-Remove-Account-Meta-Quota-Bytes": "x"}
- self.os.custom_account_client.request("POST", "", headers, "")
+ self.os.custom_account_client.request("POST", url="", headers=headers,
+ body="")
super(AccountQuotasTest, self).tearDown()
@classmethod
def tearDownClass(cls):
- cls.delete_containers([cls.container_name])
+ if hasattr(cls, "container_name"):
+ cls.delete_containers([cls.container_name])
cls.data.teardown_all()
super(AccountQuotasTest, cls).tearDownClass()
@@ -135,8 +132,9 @@
)
headers = {"X-Account-Meta-Quota-Bytes": quota}
- resp, _ = self.os.custom_account_client.request("POST", "",
- headers, "")
+ resp, _ = self.os.custom_account_client.request("POST", url="",
+ headers=headers,
+ body="")
self.assertEqual(resp["status"], "204")
self.assertHeaders(resp, 'Account', 'POST')
diff --git a/tempest/api/object_storage/test_account_quotas_negative.py b/tempest/api/object_storage/test_account_quotas_negative.py
index 402cd90..6afd381 100644
--- a/tempest/api/object_storage/test_account_quotas_negative.py
+++ b/tempest/api/object_storage/test_account_quotas_negative.py
@@ -27,6 +27,7 @@
class AccountQuotasNegativeTest(base.BaseObjectTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(AccountQuotasNegativeTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -34,10 +35,7 @@
cls.data.setup_test_user()
- cls.os_reselleradmin = clients.Manager(
- cls.data.test_user,
- cls.data.test_password,
- cls.data.test_tenant)
+ cls.os_reselleradmin = clients.Manager(cls.data.test_credentials)
# Retrieve the ResellerAdmin role id
reseller_role_id = None
@@ -50,14 +48,10 @@
raise exceptions.NotFound(msg)
# Retrieve the ResellerAdmin tenant id
- _, users = cls.os_admin.identity_client.get_users()
- reseller_user_id = next(usr['id'] for usr in users if usr['name']
- == cls.data.test_user)
+ reseller_user_id = cls.data.test_credentials.user_id
# Retrieve the ResellerAdmin tenant id
- _, tenants = cls.os_admin.identity_client.list_tenants()
- reseller_tenant_id = next(tnt['id'] for tnt in tenants if tnt['name']
- == cls.data.test_tenant)
+ reseller_tenant_id = cls.data.test_credentials.tenant_id
# Assign the newly created user the appropriate ResellerAdmin role
cls.os_admin.identity_client.assign_user_role(
@@ -68,7 +62,7 @@
# Retrieve a ResellerAdmin auth data and use it to set a quota
# on the client's account
cls.reselleradmin_auth_data = \
- cls.os_reselleradmin.get_auth_provider().auth_data
+ cls.os_reselleradmin.auth_provider.auth_data
def setUp(self):
super(AccountQuotasNegativeTest, self).setUp()
@@ -81,7 +75,8 @@
# Set a quota of 20 bytes on the user's account before each test
headers = {"X-Account-Meta-Quota-Bytes": "20"}
- self.os.custom_account_client.request("POST", "", headers, "")
+ self.os.custom_account_client.request("POST", url="", headers=headers,
+ body="")
def tearDown(self):
# Set the reselleradmin auth in headers for next custom_account_client
@@ -93,12 +88,14 @@
# remove the quota from the container
headers = {"X-Remove-Account-Meta-Quota-Bytes": "x"}
- self.os.custom_account_client.request("POST", "", headers, "")
+ self.os.custom_account_client.request("POST", url="", headers=headers,
+ body="")
super(AccountQuotasNegativeTest, self).tearDown()
@classmethod
def tearDownClass(cls):
- cls.delete_containers([cls.container_name])
+ if hasattr(cls, "container_name"):
+ cls.delete_containers([cls.container_name])
cls.data.teardown_all()
super(AccountQuotasNegativeTest, cls).tearDownClass()
@@ -120,6 +117,7 @@
{"Quota-Bytes": "100"})
@test.attr(type=["negative", "smoke"])
+ @test.skip_because(bug="1310597")
@test.requires_ext(extension='account_quotas', service='object')
def test_upload_large_object(self):
object_name = data_utils.rand_name(name="TestObject")
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index 4b895d8..d615374 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -29,10 +29,13 @@
class AccountTest(base.BaseObjectTest):
+
+ containers = []
+
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(AccountTest, cls).setUpClass()
- cls.containers = []
for i in moves.xrange(ord('a'), ord('f') + 1):
name = data_utils.rand_name(name='%s-' % chr(i))
cls.container_client.create_container(name)
@@ -64,9 +67,7 @@
self.data.setup_test_user()
os_test_user = clients.Manager(
- self.data.test_user,
- self.data.test_password,
- self.data.test_tenant)
+ self.data.test_credentials)
# Retrieve the id of an operator role of object storage
test_role_id = None
diff --git a/tempest/api/object_storage/test_account_services_negative.py b/tempest/api/object_storage/test_account_services_negative.py
index ea93aa3..d5f8649 100644
--- a/tempest/api/object_storage/test_account_services_negative.py
+++ b/tempest/api/object_storage/test_account_services_negative.py
@@ -28,10 +28,8 @@
# create user
self.data.setup_test_user()
- test_os = clients.Manager(self.data.test_user,
- self.data.test_password,
- self.data.test_tenant)
- test_auth_provider = test_os.get_auth_provider()
+ test_os = clients.Manager(self.data.test_credentials)
+ test_auth_provider = test_os.auth_provider
# Get auth for the test user
test_auth_provider.auth_data
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
index 085ef51..fc51504 100644
--- a/tempest/api/object_storage/test_container_acl.py
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -24,10 +24,8 @@
def setUpClass(cls):
super(ObjectTestACLs, cls).setUpClass()
cls.data.setup_test_user()
- test_os = clients.Manager(cls.data.test_user,
- cls.data.test_password,
- cls.data.test_tenant)
- cls.test_auth_data = test_os.get_auth_provider().auth_data
+ test_os = clients.Manager(cls.data.test_credentials)
+ cls.test_auth_data = test_os.auth_provider.auth_data
@classmethod
def tearDownClass(cls):
diff --git a/tempest/api/object_storage/test_container_acl_negative.py b/tempest/api/object_storage/test_container_acl_negative.py
index a5a0950..ca53876 100644
--- a/tempest/api/object_storage/test_container_acl_negative.py
+++ b/tempest/api/object_storage/test_container_acl_negative.py
@@ -26,10 +26,8 @@
def setUpClass(cls):
super(ObjectACLsNegativeTest, cls).setUpClass()
cls.data.setup_test_user()
- test_os = clients.Manager(cls.data.test_user,
- cls.data.test_password,
- cls.data.test_tenant)
- cls.test_auth_data = test_os.get_auth_provider().auth_data
+ test_os = clients.Manager(cls.data.test_credentials)
+ cls.test_auth_data = test_os.auth_provider.auth_data
@classmethod
def tearDownClass(cls):
diff --git a/tempest/api/object_storage/test_container_staticweb.py b/tempest/api/object_storage/test_container_staticweb.py
index 6c71340..581c6d9 100644
--- a/tempest/api/object_storage/test_container_staticweb.py
+++ b/tempest/api/object_storage/test_container_staticweb.py
@@ -23,6 +23,7 @@
class StaticWebTest(base.BaseObjectTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(StaticWebTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -45,7 +46,8 @@
@classmethod
def tearDownClass(cls):
- cls.delete_containers([cls.container_name])
+ if hasattr(cls, "container_name"):
+ cls.delete_containers([cls.container_name])
cls.data.teardown_all()
super(StaticWebTest, cls).tearDownClass()
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 9bd986f..6bda83b 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -31,8 +31,10 @@
class ContainerSyncTest(base.BaseObjectTest):
+ clients = {}
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ContainerSyncTest, cls).setUpClass()
cls.containers = []
@@ -50,7 +52,6 @@
int(container_sync_timeout / cls.container_sync_interval)
# define container and object clients
- cls.clients = {}
cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
(cls.container_client, cls.object_client)
cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
index 4f399b4..d1541b9 100644
--- a/tempest/api/object_storage/test_crossdomain.py
+++ b/tempest/api/object_storage/test_crossdomain.py
@@ -29,10 +29,7 @@
# endpoint and test the healthcheck feature.
cls.data.setup_test_user()
- cls.os_test_user = clients.Manager(
- cls.data.test_user,
- cls.data.test_password,
- cls.data.test_tenant)
+ cls.os_test_user = clients.Manager(cls.data.test_credentials)
cls.xml_start = '<?xml version="1.0"?>\n' \
'<!DOCTYPE cross-domain-policy SYSTEM ' \
diff --git a/tempest/api/object_storage/test_object_formpost.py b/tempest/api/object_storage/test_object_formpost.py
index e0d15ac..dc5585e 100644
--- a/tempest/api/object_storage/test_object_formpost.py
+++ b/tempest/api/object_storage/test_object_formpost.py
@@ -26,7 +26,11 @@
class ObjectFormPostTest(base.BaseObjectTest):
+ metadata = {}
+ containers = []
+
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ObjectFormPostTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name='TestContainer')
@@ -39,6 +43,18 @@
cls.metadata = {'Temp-URL-Key': cls.key}
cls.account_client.create_account_metadata(metadata=cls.metadata)
+ def setUp(self):
+ super(ObjectFormPostTest, self).setUp()
+
+ # make sure the metadata has been set
+ account_client_metadata, _ = \
+ self.account_client.list_account_metadata()
+ self.assertIn('x-account-meta-temp-url-key',
+ account_client_metadata)
+ self.assertEqual(
+ account_client_metadata['x-account-meta-temp-url-key'],
+ self.key)
+
@classmethod
def tearDownClass(cls):
cls.account_client.delete_account_metadata(metadata=cls.metadata)
@@ -100,13 +116,9 @@
headers = {'Content-Type': content_type,
'Content-Length': str(len(body))}
- url = "%s/%s/%s" % (self.container_client.base_url,
- self.container_name,
- self.object_name)
+ url = "%s/%s" % (self.container_name, self.object_name)
- # Use a raw request, otherwise authentication headers are used
- resp, body = self.object_client.http_obj.request(url, "POST",
- body, headers=headers)
+ resp, body = self.object_client.post(url, body, headers=headers)
self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
self.assertHeaders(resp, "Object", "POST")
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index a52c248..878bf6d 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -20,12 +20,17 @@
from tempest.api.object_storage import base
from tempest.common.utils import data_utils
+from tempest import exceptions
from tempest import test
class ObjectFormPostNegativeTest(base.BaseObjectTest):
+ metadata = {}
+ containers = []
+
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ObjectFormPostNegativeTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name='TestContainer')
@@ -38,6 +43,18 @@
cls.metadata = {'Temp-URL-Key': cls.key}
cls.account_client.create_account_metadata(metadata=cls.metadata)
+ def setUp(self):
+ super(ObjectFormPostNegativeTest, self).setUp()
+
+ # make sure the metadata has been set
+ account_client_metadata, _ = \
+ self.account_client.list_account_metadata()
+ self.assertIn('x-account-meta-temp-url-key',
+ account_client_metadata)
+ self.assertEqual(
+ account_client_metadata['x-account-meta-temp-url-key'],
+ self.key)
+
@classmethod
def tearDownClass(cls):
cls.account_client.delete_account_metadata(metadata=cls.metadata)
@@ -100,12 +117,25 @@
headers = {'Content-Type': content_type,
'Content-Length': str(len(body))}
- url = "%s/%s/%s" % (self.container_client.base_url,
- self.container_name,
- self.object_name)
+ url = "%s/%s" % (self.container_name, self.object_name)
+ exc = self.assertRaises(
+ exceptions.Unauthorized,
+ self.object_client.post,
+ url, body, headers=headers)
+ self.assertIn('FormPost: Form Expired', str(exc))
- # Use a raw request, otherwise authentication headers are used
- resp, body = self.object_client.http_obj.request(url, "POST",
- body, headers=headers)
- self.assertEqual(int(resp['status']), 401)
- self.assertIn('FormPost: Form Expired', body)
+ @test.requires_ext(extension='formpost', service='object')
+ @test.attr(type='gate')
+ def test_post_object_using_form_invalid_signature(self):
+ self.key = "Wrong"
+ body, content_type = self.get_multipart_form()
+
+ headers = {'Content-Type': content_type,
+ 'Content-Length': str(len(body))}
+
+ url = "%s/%s" % (self.container_name, self.object_name)
+ exc = self.assertRaises(
+ exceptions.Unauthorized,
+ self.object_client.post,
+ url, body, headers=headers)
+ self.assertIn('FormPost: Invalid Signature', str(exc))
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 91df292..06e63a4 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -14,7 +14,10 @@
# under the License.
import hashlib
+import random
+import re
from six import moves
+import time
from tempest.api.object_storage import base
from tempest.common import custom_matchers
@@ -35,6 +38,29 @@
cls.delete_containers(cls.containers)
super(ObjectTest, cls).tearDownClass()
+ def _create_object(self, metadata=None):
+ # setup object
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ self.object_client.create_object(self.container_name,
+ object_name, data, metadata=metadata)
+
+ return object_name, data
+
+ def _upload_segments(self):
+ # create object
+ object_name = data_utils.rand_name(name='LObject')
+ data = data_utils.arbitrary_string()
+ segments = 10
+ data_segments = [data + str(i) for i in moves.xrange(segments)]
+ # uploading segments
+ for i in moves.xrange(segments):
+ resp, _ = self.object_client.create_object_segments(
+ self.container_name, object_name, i, data_segments[i])
+ self.assertEqual(resp['status'], '201')
+
+ return object_name, data_segments
+
@test.attr(type='smoke')
def test_create_object(self):
# create object
@@ -64,42 +90,227 @@
self.assertHeaders(resp, 'Object', 'DELETE')
@test.attr(type='smoke')
- def test_object_metadata(self):
- # add metadata to storage object, test if metadata is retrievable
+ def test_update_object_metadata(self):
+ # update object metadata
+ object_name, data = self._create_object()
- # create Object
- object_name = data_utils.rand_name(name='TestObject')
- data = data_utils.arbitrary_string()
- resp, _ = self.object_client.create_object(self.container_name,
- object_name, data)
- # set object metadata
- meta_key = data_utils.rand_name(name='test-')
- meta_value = data_utils.rand_name(name='MetaValue-')
- orig_metadata = {meta_key: meta_value}
+ metadata = {'X-Object-Meta-test-meta': 'Meta'}
resp, _ = self.object_client.update_object_metadata(
- self.container_name, object_name, orig_metadata)
+ self.container_name,
+ object_name,
+ metadata,
+ metadata_prefix='')
self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
self.assertHeaders(resp, 'Object', 'POST')
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertIn('x-object-meta-test-meta', resp)
+ self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
+
+ def test_update_object_metadata_with_remove_metadata(self):
+ # update object metadata with remove metadata
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=create_metadata)
+
+ update_metadata = {'X-Remove-Object-Meta-test-meta1': 'Meta1'}
+ resp, _ = self.object_client.update_object_metadata(
+ self.container_name,
+ object_name,
+ update_metadata,
+ metadata_prefix='')
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'POST')
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertNotIn('x-object-meta-test-meta1', resp)
+
+ @test.attr(type='smoke')
+ def test_update_object_metadata_with_create_and_remove_metadata(self):
+ # creation and deletion of metadata with one request
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=create_metadata)
+
+ update_metadata = {'X-Object-Meta-test-meta2': 'Meta2',
+ 'X-Remove-Object-Meta-test-meta1': 'Meta1'}
+ resp, _ = self.object_client.update_object_metadata(
+ self.container_name,
+ object_name,
+ update_metadata,
+ metadata_prefix='')
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'POST')
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertNotIn('x-object-meta-test-meta1', resp)
+ self.assertIn('x-object-meta-test-meta2', resp)
+ self.assertEqual(resp['x-object-meta-test-meta2'], 'Meta2')
+
+ @test.attr(type='smoke')
+ def test_update_object_metadata_with_x_object_manifest(self):
+ # update object metadata with x_object_manifest
+
+ # uploading segments
+ object_name, data_segments = self._upload_segments()
+ # creating a manifest file
+ data_empty = ''
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data_empty,
+ metadata=None)
+ object_prefix = '%s/%s' % (self.container_name, object_name)
+ update_metadata = {'X-Object-Manifest': object_prefix}
+ resp, _ = self.object_client.update_object_metadata(
+ self.container_name,
+ object_name,
+ update_metadata,
+ metadata_prefix='')
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'POST')
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertIn('x-object-manifest', resp)
+ self.assertNotEqual(len(resp['x-object-manifest']), 0)
+
+ def test_update_object_metadata_with_x_object_metakey(self):
+ # update object metadata with a blenk value of metadata
+ object_name, data = self._create_object()
+
+ update_metadata = {'X-Object-Meta-test-meta': ''}
+ resp, _ = self.object_client.update_object_metadata(
+ self.container_name,
+ object_name,
+ update_metadata,
+ metadata_prefix='')
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'POST')
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertIn('x-object-meta-test-meta', resp)
+ self.assertEqual(resp['x-object-meta-test-meta'], '')
+
+ @test.attr(type='smoke')
+ def test_update_object_metadata_with_x_remove_object_metakey(self):
+ # update object metadata with a blank value of remove metadata
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ create_metadata = {'X-Object-Meta-test-meta': 'Meta'}
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=create_metadata)
+
+ update_metadata = {'X-Remove-Object-Meta-test-meta': ''}
+ resp, _ = self.object_client.update_object_metadata(
+ self.container_name,
+ object_name,
+ update_metadata,
+ metadata_prefix='')
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'POST')
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertNotIn('x-object-meta-test-meta', resp)
+
+ @test.attr(type='smoke')
+ def test_list_object_metadata(self):
# get object metadata
- resp, resp_metadata = self.object_client.list_object_metadata(
- self.container_name, object_name)
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ metadata = {'X-Object-Meta-test-meta': 'Meta'}
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=metadata)
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
self.assertHeaders(resp, 'Object', 'HEAD')
+ self.assertIn('x-object-meta-test-meta', resp)
+ self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
- actual_meta_key = 'x-object-meta-' + meta_key
- self.assertIn(actual_meta_key, resp)
- self.assertEqual(resp[actual_meta_key], meta_value)
+ @test.attr(type='smoke')
+ def test_list_no_object_metadata(self):
+ # get empty list of object metadata
+ object_name, data = self._create_object()
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'HEAD')
+ self.assertNotIn('x-object-meta-', str(resp))
+
+ @test.attr(type='smoke')
+ def test_list_object_metadata_with_x_object_manifest(self):
+ # get object metadata with x_object_manifest
+
+ # uploading segments
+ object_name, data_segments = self._upload_segments()
+ # creating a manifest file
+ object_prefix = '%s/%s' % (self.container_name, object_name)
+ metadata = {'X-Object-Manifest': object_prefix}
+ data_empty = ''
+ resp, _ = self.object_client.create_object(
+ self.container_name,
+ object_name,
+ data_empty,
+ metadata=metadata)
+
+ resp, _ = self.object_client.list_object_metadata(
+ self.container_name,
+ object_name)
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+
+ # Check only the existence of common headers with custom matcher
+ self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
+ 'Object', 'HEAD'))
+ self.assertIn('x-object-manifest', resp)
+
+ # Etag value of a large object is enclosed in double-quotations.
+ # This is a special case, therefore the formats of response headers
+ # are checked without a custom matcher.
+ self.assertTrue(resp['etag'].startswith('\"'))
+ self.assertTrue(resp['etag'].endswith('\"'))
+ self.assertTrue(resp['etag'].strip('\"').isalnum())
+ self.assertTrue(re.match("^\d+\.?\d*\Z", resp['x-timestamp']))
+ self.assertNotEqual(len(resp['content-type']), 0)
+ self.assertTrue(re.match("^tx[0-9a-f]*-[0-9a-f]*$",
+ resp['x-trans-id']))
+ self.assertNotEqual(len(resp['date']), 0)
+ self.assertEqual(resp['accept-ranges'], 'bytes')
+ self.assertEqual(resp['x-object-manifest'],
+ '%s/%s' % (self.container_name, object_name))
@test.attr(type='smoke')
def test_get_object(self):
# retrieve object's data (in response body)
# create object
- object_name = data_utils.rand_name(name='TestObject')
- data = data_utils.arbitrary_string()
- resp, _ = self.object_client.create_object(self.container_name,
- object_name, data)
+ object_name, data = self._create_object()
# get object
resp, body = self.object_client.get_object(self.container_name,
object_name)
@@ -109,6 +320,183 @@
self.assertEqual(body, data)
@test.attr(type='smoke')
+ def test_get_object_with_metadata(self):
+ # get object with metadata
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ metadata = {'X-Object-Meta-test-meta': 'Meta'}
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=metadata)
+ resp, body = self.object_client.get_object(
+ self.container_name,
+ object_name,
+ metadata=None)
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'GET')
+ self.assertIn('x-object-meta-test-meta', resp)
+ self.assertEqual(resp['x-object-meta-test-meta'], 'Meta')
+ self.assertEqual(body, data)
+
+ @test.attr(type='smoke')
+ def test_get_object_with_range(self):
+ # get object with range
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string(100)
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=None)
+ rand_num = random.randint(3, len(data) - 1)
+ metadata = {'Range': 'bytes=%s-%s' % (rand_num - 3, rand_num - 1)}
+ resp, body = self.object_client.get_object(
+ self.container_name,
+ object_name,
+ metadata=metadata)
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'GET')
+ self.assertEqual(body, data[rand_num - 3: rand_num])
+
+ @test.attr(type='smoke')
+ def test_get_object_with_x_object_manifest(self):
+ # get object with x_object_manifest
+
+ # uploading segments
+ object_name, data_segments = self._upload_segments()
+ # creating a manifest file
+ object_prefix = '%s/%s' % (self.container_name, object_name)
+ metadata = {'X-Object-Manifest': object_prefix}
+ data_empty = ''
+ resp, body = self.object_client.create_object(
+ self.container_name,
+ object_name,
+ data_empty,
+ metadata=metadata)
+
+ resp, body = self.object_client.get_object(
+ self.container_name,
+ object_name,
+ metadata=None)
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+
+ # Check only the existence of common headers with custom matcher
+ self.assertThat(resp, custom_matchers.ExistsAllResponseHeaders(
+ 'Object', 'GET'))
+ self.assertIn('x-object-manifest', resp)
+
+ # Etag value of a large object is enclosed in double-quotations.
+ # This is a special case, therefore the formats of response headers
+ # are checked without a custom matcher.
+ self.assertTrue(resp['etag'].startswith('\"'))
+ self.assertTrue(resp['etag'].endswith('\"'))
+ self.assertTrue(resp['etag'].strip('\"').isalnum())
+ self.assertTrue(re.match("^\d+\.?\d*\Z", resp['x-timestamp']))
+ self.assertNotEqual(len(resp['content-type']), 0)
+ self.assertTrue(re.match("^tx[0-9a-f]*-[0-9a-f]*$",
+ resp['x-trans-id']))
+ self.assertNotEqual(len(resp['date']), 0)
+ self.assertEqual(resp['accept-ranges'], 'bytes')
+ self.assertEqual(resp['x-object-manifest'],
+ '%s/%s' % (self.container_name, object_name))
+
+ self.assertEqual(''.join(data_segments), body)
+
+ @test.attr(type='smoke')
+ def test_get_object_with_if_match(self):
+ # get object with if_match
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string(10)
+ create_md5 = hashlib.md5(data).hexdigest()
+ create_metadata = {'Etag': create_md5}
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=create_metadata)
+
+ list_metadata = {'If-Match': create_md5}
+ resp, body = self.object_client.get_object(
+ self.container_name,
+ object_name,
+ metadata=list_metadata)
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'GET')
+ self.assertEqual(body, data)
+
+ @test.attr(type='smoke')
+ def test_get_object_with_if_modified_since(self):
+ # get object with if_modified_since
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string()
+ time_now = time.time()
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=None)
+
+ http_date = time.ctime(time_now - 86400)
+ list_metadata = {'If-Modified-Since': http_date}
+ resp, body = self.object_client.get_object(
+ self.container_name,
+ object_name,
+ metadata=list_metadata)
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'GET')
+ self.assertEqual(body, data)
+
+ def test_get_object_with_if_none_match(self):
+ # get object with if_none_match
+ object_name = data_utils.rand_name(name='TestObject')
+ data = data_utils.arbitrary_string(10)
+ create_md5 = hashlib.md5(data).hexdigest()
+ create_metadata = {'Etag': create_md5}
+ self.object_client.create_object(self.container_name,
+ object_name,
+ data,
+ metadata=create_metadata)
+
+ list_data = data_utils.arbitrary_string(15)
+ list_md5 = hashlib.md5(list_data).hexdigest()
+ list_metadata = {'If-None-Match': list_md5}
+ resp, body = self.object_client.get_object(
+ self.container_name,
+ object_name,
+ metadata=list_metadata)
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'GET')
+ self.assertEqual(body, data)
+
+ @test.attr(type='smoke')
+ def test_get_object_with_if_unmodified_since(self):
+ # get object with if_unmodified_since
+ object_name, data = self._create_object()
+
+ time_now = time.time()
+ http_date = time.ctime(time_now + 86400)
+ list_metadata = {'If-Unmodified-Since': http_date}
+ resp, body = self.object_client.get_object(
+ self.container_name,
+ object_name,
+ metadata=list_metadata)
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'GET')
+ self.assertEqual(body, data)
+
+ @test.attr(type='smoke')
+ def test_get_object_with_x_newest(self):
+ # get object with x_newest
+ object_name, data = self._create_object()
+
+ list_metadata = {'X-Newest': 'true'}
+ resp, body = self.object_client.get_object(
+ self.container_name,
+ object_name,
+ metadata=list_metadata)
+ self.assertIn(int(resp['status']), test.HTTP_SUCCESS)
+ self.assertHeaders(resp, 'Object', 'GET')
+ self.assertEqual(body, data)
+
+ @test.attr(type='smoke')
def test_copy_object_in_same_container(self):
# create source object
src_object_name = data_utils.rand_name(name='SrcObject')
@@ -286,10 +674,7 @@
# Make a conditional request for an object using the If-None-Match
# header, it should get downloaded only if the local file is different,
# otherwise the response code should be 304 Not Modified
- object_name = data_utils.rand_name(name='TestObject')
- data = data_utils.arbitrary_string()
- self.object_client.create_object(self.container_name,
- object_name, data)
+ object_name, data = self._create_object()
# local copy is identical, no download
md5 = hashlib.md5(data).hexdigest()
headers = {'If-None-Match': md5}
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index cf24f66..7d26433 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -27,7 +27,11 @@
class ObjectTempUrlNegativeTest(base.BaseObjectTest):
+ metadata = {}
+ containers = []
+
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ObjectTempUrlNegativeTest, cls).setUpClass()
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 3424082..4e40de9 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -10,9 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os.path
+
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
+from tempest import exceptions
from tempest.openstack.common import log as logging
import tempest.test
@@ -27,17 +30,18 @@
@classmethod
def setUpClass(cls):
super(BaseOrchestrationTest, cls).setUpClass()
- os = clients.OrchestrationManager()
+ cls.os = clients.Manager()
if not CONF.service_available.heat:
raise cls.skipException("Heat support is required")
cls.build_timeout = CONF.orchestration.build_timeout
cls.build_interval = CONF.orchestration.build_interval
- cls.os = os
- cls.orchestration_client = os.orchestration_client
- cls.servers_client = os.servers_client
- cls.keypairs_client = os.keypairs_client
- cls.network_client = os.network_client
+ cls.orchestration_client = cls.os.orchestration_client
+ cls.client = cls.orchestration_client
+ cls.servers_client = cls.os.servers_client
+ cls.keypairs_client = cls.os.keypairs_client
+ cls.network_client = cls.os.network_client
+ cls.volumes_client = cls.os.volumes_client
cls.stacks = []
cls.keypairs = []
@@ -50,11 +54,9 @@
@classmethod
def _get_identity_admin_client(cls):
- """
- Returns an instance of the Identity Admin API client
- """
- os = clients.AdminManager(interface=cls._interface)
- admin_client = os.identity_client
+ """Returns an instance of the Identity Admin API client."""
+ manager = clients.AdminManager(interface=cls._interface)
+ admin_client = manager.identity_client
return admin_client
@classmethod
@@ -69,18 +71,18 @@
return stack_identifier
@classmethod
- def clear_stacks(cls):
+ def _clear_stacks(cls):
for stack_identifier in cls.stacks:
try:
- cls.orchestration_client.delete_stack(stack_identifier)
- except Exception:
+ cls.client.delete_stack(stack_identifier)
+ except exceptions.NotFound:
pass
for stack_identifier in cls.stacks:
try:
- cls.orchestration_client.wait_for_stack_status(
+ cls.client.wait_for_stack_status(
stack_identifier, 'DELETE_COMPLETE')
- except Exception:
+ except exceptions.NotFound:
pass
@classmethod
@@ -91,7 +93,7 @@
return body
@classmethod
- def clear_keypairs(cls):
+ def _clear_keypairs(cls):
for kp_name in cls.keypairs:
try:
cls.keypairs_client.delete_keypair(kp_name)
@@ -99,13 +101,44 @@
pass
@classmethod
+ def load_template(cls, name, ext='yaml'):
+ loc = ["stacks", "templates", "%s.%s" % (name, ext)]
+ fullpath = os.path.join(os.path.dirname(__file__), *loc)
+
+ with open(fullpath, "r") as f:
+ content = f.read()
+ return content
+
+ @classmethod
def tearDownClass(cls):
- cls.clear_stacks()
- cls.clear_keypairs()
+ cls._clear_stacks()
+ cls._clear_keypairs()
super(BaseOrchestrationTest, cls).tearDownClass()
@staticmethod
def stack_output(stack, output_key):
- """Return a stack output value for a give key."""
+ """Return a stack output value for a given key."""
return next((o['output_value'] for o in stack['outputs']
if o['output_key'] == output_key), None)
+
+ def assert_fields_in_dict(self, obj, *fields):
+ for field in fields:
+ self.assertIn(field, obj)
+
+ def list_resources(self, stack_identifier):
+ """Get a dict mapping of resource names to types."""
+ resp, resources = self.client.list_resources(stack_identifier)
+ self.assertEqual('200', resp['status'])
+ self.assertIsInstance(resources, list)
+ for res in resources:
+ self.assert_fields_in_dict(res, 'logical_resource_id',
+ 'resource_type', 'resource_status',
+ 'updated_time')
+
+ return dict((r['resource_name'], r['resource_type'])
+ for r in resources)
+
+ def get_stack_output(self, stack_identifier, output_key):
+ resp, body = self.client.get_stack(stack_identifier)
+ self.assertEqual('200', resp['status'])
+ return self.stack_output(body, output_key)
diff --git a/tempest/api/orchestration/stacks/templates/cfn_init_signal.yaml b/tempest/api/orchestration/stacks/templates/cfn_init_signal.yaml
new file mode 100644
index 0000000..fa5345e
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/cfn_init_signal.yaml
@@ -0,0 +1,82 @@
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+ Template which uses a wait condition to confirm that a minimal
+ cfn-init and cfn-signal has worked
+Parameters:
+ key_name:
+ Type: String
+ flavor:
+ Type: String
+ image:
+ Type: String
+ network:
+ Type: String
+ timeout:
+ Type: Number
+Resources:
+ CfnUser:
+ Type: AWS::IAM::User
+ SmokeSecurityGroup:
+ Type: AWS::EC2::SecurityGroup
+ Properties:
+ GroupDescription: Enable only ping and SSH access
+ SecurityGroupIngress:
+ - {CidrIp: 0.0.0.0/0, FromPort: '-1', IpProtocol: icmp, ToPort: '-1'}
+ - {CidrIp: 0.0.0.0/0, FromPort: '22', IpProtocol: tcp, ToPort: '22'}
+ SmokeKeys:
+ Type: AWS::IAM::AccessKey
+ Properties:
+ UserName: {Ref: CfnUser}
+ SmokeServer:
+ Type: OS::Nova::Server
+ Metadata:
+ AWS::CloudFormation::Init:
+ config:
+ files:
+ /tmp/smoke-status:
+ content: smoke test complete
+ /etc/cfn/cfn-credentials:
+ content:
+ Fn::Replace:
+ - SmokeKeys: {Ref: SmokeKeys}
+ SecretAccessKey:
+ 'Fn::GetAtt': [SmokeKeys, SecretAccessKey]
+ - |
+ AWSAccessKeyId=SmokeKeys
+ AWSSecretKey=SecretAccessKey
+ mode: '000400'
+ owner: root
+ group: root
+ Properties:
+ image: {Ref: image}
+ flavor: {Ref: flavor}
+ key_name: {Ref: key_name}
+ security_groups:
+ - {Ref: SmokeSecurityGroup}
+ networks:
+ - uuid: {Ref: network}
+ user_data:
+ Fn::Replace:
+ - WaitHandle: {Ref: WaitHandle}
+ - |
+ #!/bin/bash -v
+ /opt/aws/bin/cfn-init
+ /opt/aws/bin/cfn-signal -e 0 --data "`cat /tmp/smoke-status`" \
+ "WaitHandle"
+ WaitHandle:
+ Type: AWS::CloudFormation::WaitConditionHandle
+ WaitCondition:
+ Type: AWS::CloudFormation::WaitCondition
+ DependsOn: SmokeServer
+ Properties:
+ Handle: {Ref: WaitHandle}
+ Timeout: {Ref: timeout}
+Outputs:
+ WaitConditionStatus:
+ Description: Contents of /tmp/smoke-status on SmokeServer
+ Value:
+ Fn::GetAtt: [WaitCondition, Data]
+ SmokeServerIp:
+ Description: IP address of server
+ Value:
+ Fn::GetAtt: [SmokeServer, first_address]
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
new file mode 100644
index 0000000..3e03a30
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
@@ -0,0 +1,24 @@
+heat_template_version: 2013-05-23
+
+resources:
+ volume:
+ type: OS::Cinder::Volume
+ properties:
+ size: 1
+ description: a descriptive description
+
+outputs:
+ status:
+ description: status
+ value: { get_attr: ['volume', 'status'] }
+
+ size:
+ description: size
+ value: { get_attr: ['volume', 'size'] }
+
+ display_description:
+ description: display_description
+ value: { get_attr: ['volume', 'display_description'] }
+
+ volume_id:
+ value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
new file mode 100644
index 0000000..08e3da4
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
@@ -0,0 +1,25 @@
+heat_template_version: 2013-05-23
+
+resources:
+ volume:
+ deletion_policy: 'Retain'
+ type: OS::Cinder::Volume
+ properties:
+ size: 1
+ description: a descriptive description
+
+outputs:
+ status:
+ description: status
+ value: { get_attr: ['volume', 'status'] }
+
+ size:
+ description: size
+ value: { get_attr: ['volume', 'size'] }
+
+ display_description:
+ description: display_description
+ value: { get_attr: ['volume', 'display_description'] }
+
+ volume_id:
+ value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/neutron_basic.yaml b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
new file mode 100644
index 0000000..63b03f4
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
@@ -0,0 +1,70 @@
+heat_template_version: '2013-05-23'
+description: |
+ Template which creates single EC2 instance
+parameters:
+ KeyName:
+ type: string
+ InstanceType:
+ type: string
+ ImageId:
+ type: string
+ ExternalRouterId:
+ type: string
+ ExternalNetworkId:
+ type: string
+ timeout:
+ type: number
+resources:
+ Network:
+ type: OS::Neutron::Net
+ properties:
+ name: NewNetwork
+ Subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: {Ref: Network}
+ name: NewSubnet
+ ip_version: 4
+ cidr: 10.0.3.0/24
+ dns_nameservers: ["8.8.8.8"]
+ allocation_pools:
+ - {end: 10.0.3.150, start: 10.0.3.20}
+ Router:
+ type: OS::Neutron::Router
+ properties:
+ name: NewRouter
+ admin_state_up: false
+ external_gateway_info:
+ network: {get_param: ExternalNetworkId}
+ RouterInterface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: {get_param: ExternalRouterId}
+ subnet_id: {get_resource: Subnet}
+ Server:
+ type: OS::Nova::Server
+ metadata:
+ Name: SmokeServerNeutron
+ properties:
+ image: {get_param: ImageId}
+ flavor: {get_param: InstanceType}
+ key_name: {get_param: KeyName}
+ networks:
+ - network: {get_resource: Network}
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/bash -v
+
+ /opt/aws/bin/cfn-signal -e 0 -r "SmokeServerNeutron created" \
+ 'wait_handle'
+ params:
+ wait_handle: {get_resource: WaitHandleNeutron}
+ WaitHandleNeutron:
+ type: AWS::CloudFormation::WaitConditionHandle
+ WaitCondition:
+ type: AWS::CloudFormation::WaitCondition
+ depends_on: Server
+ properties:
+ Handle: {get_resource: WaitHandleNeutron}
+ Timeout: {get_param: timeout}
diff --git a/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml b/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml
new file mode 100644
index 0000000..58a934e
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml
@@ -0,0 +1,32 @@
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+ Template which creates some simple resources
+Parameters:
+ trigger:
+ Type: String
+ Default: not_yet
+Resources:
+ fluffy:
+ Type: AWS::AutoScaling::LaunchConfiguration
+ Metadata:
+ kittens:
+ - Tom
+ - Stinky
+ Properties:
+ ImageId: not_used
+ InstanceType: not_used
+ UserData:
+ Fn::Replace:
+ - variable_a: {Ref: trigger}
+ variable_b: bee
+ - |
+ A == variable_a
+ B == variable_b
+Outputs:
+ fluffy:
+ Description: "fluffies irc nick"
+ Value:
+ Fn::Replace:
+ - nick: {Ref: fluffy}
+ - |
+ #nick
diff --git a/tempest/api/orchestration/stacks/templates/nova_keypair.json b/tempest/api/orchestration/stacks/templates/nova_keypair.json
new file mode 100644
index 0000000..63d3817
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/nova_keypair.json
@@ -0,0 +1,48 @@
+{
+ "AWSTemplateFormatVersion" : "2010-09-09",
+ "Description" : "Template which create two key pairs.",
+ "Parameters" : {
+ "KeyPairName1": {
+ "Type": "String",
+ "Default": "testkey1"
+ },
+ "KeyPairName2": {
+ "Type": "String",
+ "Default": "testkey2"
+ }
+ },
+ "Resources" : {
+ "KeyPairSavePrivate": {
+ "Type": "OS::Nova::KeyPair",
+ "Properties": {
+ "name" : { "Ref" : "KeyPairName1" },
+ "save_private_key": true
+ }
+ },
+ "KeyPairDontSavePrivate": {
+ "Type": "OS::Nova::KeyPair",
+ "Properties": {
+ "name" : { "Ref" : "KeyPairName2" },
+ "save_private_key": false
+ }
+ }
+ },
+ "Outputs": {
+ "KeyPair_PublicKey": {
+ "Description": "Public Key of generated keypair.",
+ "Value": { "Fn::GetAtt" : ["KeyPairSavePrivate", "public_key"] }
+ },
+ "KeyPair_PrivateKey": {
+ "Description": "Private Key of generated keypair.",
+ "Value": { "Fn::GetAtt" : ["KeyPairSavePrivate", "private_key"] }
+ },
+ "KeyPairDontSavePrivate_PublicKey": {
+ "Description": "Public Key of generated keypair.",
+ "Value": { "Fn::GetAtt" : ["KeyPairDontSavePrivate", "public_key"] }
+ },
+ "KeyPairDontSavePrivate_PrivateKey": {
+ "Description": "Private Key of generated keypair.",
+ "Value": { "Fn::GetAtt" : ["KeyPairDontSavePrivate", "private_key"] }
+ }
+ }
+}
diff --git a/tempest/api/orchestration/stacks/templates/nova_keypair.yaml b/tempest/api/orchestration/stacks/templates/nova_keypair.yaml
new file mode 100644
index 0000000..81ad99c
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/nova_keypair.yaml
@@ -0,0 +1,43 @@
+heat_template_version: 2013-05-23
+
+description: >
+ Template which creates two key pairs.
+
+parameters:
+ KeyPairName1:
+ type: string
+ default: testkey
+
+ KeyPairName2:
+ type: string
+ default: testkey2
+
+resources:
+ KeyPairSavePrivate:
+ type: OS::Nova::KeyPair
+ properties:
+ name: { get_param: KeyPairName1 }
+ save_private_key: true
+
+ KeyPairDontSavePrivate:
+ type: OS::Nova::KeyPair
+ properties:
+ name: { get_param: KeyPairName2 }
+ save_private_key: false
+
+outputs:
+ KeyPair_PublicKey:
+ description: Public Key of generated keypair
+ value: { get_attr: [KeyPairSavePrivate, public_key] }
+
+ KeyPair_PrivateKey:
+ description: Private Key of generated keypair
+ value: { get_attr: [KeyPairSavePrivate, private_key] }
+
+ KeyPairDontSavePrivate_PublicKey:
+ description: Public Key of generated keypair
+ value: { get_attr: [KeyPairDontSavePrivate, public_key] }
+
+ KeyPairDontSavePrivate_PrivateKey:
+ description: Private Key of generated keypair
+ value: { get_attr: [KeyPairDontSavePrivate, private_key] }
diff --git a/tempest/api/orchestration/stacks/templates/swift_basic.yaml b/tempest/api/orchestration/stacks/templates/swift_basic.yaml
new file mode 100644
index 0000000..713f8bc
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/swift_basic.yaml
@@ -0,0 +1,23 @@
+heat_template_version: 2013-05-23
+description: Template which creates a Swift container resource
+
+resources:
+ SwiftContainerWebsite:
+ deletion_policy: "Delete"
+ type: OS::Swift::Container
+ properties:
+ X-Container-Read: ".r:*"
+ X-Container-Meta:
+ web-index: "index.html"
+ web-error: "error.html"
+
+ SwiftContainer:
+ type: OS::Swift::Container
+
+outputs:
+ WebsiteURL:
+ description: "URL for website hosted on S3"
+ value: { get_attr: [SwiftContainer, WebsiteURL] }
+ DomainName:
+ description: "Domain of Swift host"
+ value: { get_attr: [SwiftContainer, DomainName] }
diff --git a/tempest/api/orchestration/stacks/test_limits.py b/tempest/api/orchestration/stacks/test_limits.py
index 22f544d..283ab2b 100644
--- a/tempest/api/orchestration/stacks/test_limits.py
+++ b/tempest/api/orchestration/stacks/test_limits.py
@@ -24,16 +24,10 @@
class TestServerStackLimits(base.BaseOrchestrationTest):
- _interface = 'json'
-
- @classmethod
- def setUpClass(cls):
- super(TestServerStackLimits, cls).setUpClass()
- cls.client = cls.orchestration_client
- cls.stack_name = data_utils.rand_name('heat')
@attr(type='gate')
def test_exceed_max_template_size_fails(self):
+ stack_name = data_utils.rand_name('heat')
fill = 'A' * CONF.orchestration.max_template_size
template = '''
HeatTemplateFormatVersion: '2012-12-12'
@@ -41,5 +35,19 @@
Outputs:
Foo: bar''' % fill
ex = self.assertRaises(exceptions.BadRequest, self.create_stack,
- self.stack_name, template)
+ stack_name, template)
self.assertIn('Template exceeds maximum allowed size', str(ex))
+
+ @attr(type='gate')
+ def test_exceed_max_resources_per_stack(self):
+ stack_name = data_utils.rand_name('heat')
+ # Create a big template, one resource more than the limit
+ template = 'heat_template_version: \'2013-05-23\'\nresources:\n'
+ rsrc_snippet = ' random%s:\n type: \'OS::Heat::RandomString\'\n'
+ num_resources = CONF.orchestration.max_resources_per_stack + 1
+ for i in range(num_resources):
+ template += rsrc_snippet % i
+
+ ex = self.assertRaises(exceptions.BadRequest, self.create_stack,
+ stack_name, template)
+ self.assertIn('Maximum resources per stack exceeded', str(ex))
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index 18ba37b..3086d78 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -18,7 +18,7 @@
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
CONF = config.CONF
@@ -26,90 +26,19 @@
class NeutronResourcesTestJSON(base.BaseOrchestrationTest):
- _interface = 'json'
-
- template = """
-heat_template_version: '2013-05-23'
-description: |
- Template which creates single EC2 instance
-parameters:
- KeyName:
- type: string
- InstanceType:
- type: string
- ImageId:
- type: string
- ExternalRouterId:
- type: string
- ExternalNetworkId:
- type: string
-resources:
- Network:
- type: OS::Neutron::Net
- properties:
- name: NewNetwork
- Subnet:
- type: OS::Neutron::Subnet
- properties:
- network_id: {Ref: Network}
- name: NewSubnet
- ip_version: 4
- cidr: 10.0.3.0/24
- dns_nameservers: ["8.8.8.8"]
- allocation_pools:
- - {end: 10.0.3.150, start: 10.0.3.20}
- Router:
- type: OS::Neutron::Router
- properties:
- name: NewRouter
- admin_state_up: false
- external_gateway_info:
- network: {get_param: ExternalNetworkId}
- enable_snat: false
- RouterInterface:
- type: OS::Neutron::RouterInterface
- properties:
- router_id: {get_param: ExternalRouterId}
- subnet_id: {get_resource: Subnet}
- Server:
- type: AWS::EC2::Instance
- metadata:
- Name: SmokeServerNeutron
- properties:
- ImageId: {get_param: ImageId}
- InstanceType: {get_param: InstanceType}
- KeyName: {get_param: KeyName}
- SubnetId: {get_resource: Subnet}
- UserData:
- str_replace:
- template: |
- #!/bin/bash -v
-
- /opt/aws/bin/cfn-signal -e 0 -r "SmokeServerNeutron created" \
- 'wait_handle'
- params:
- wait_handle: {get_resource: WaitHandleNeutron}
- WaitHandleNeutron:
- type: AWS::CloudFormation::WaitConditionHandle
- WaitCondition:
- type: AWS::CloudFormation::WaitCondition
- depends_on: Server
- properties:
- Handle: {get_resource: WaitHandleNeutron}
- Timeout: '600'
-"""
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(NeutronResourcesTestJSON, cls).setUpClass()
if not CONF.orchestration.image_ref:
raise cls.skipException("No image available to test")
- cls.client = cls.orchestration_client
os = clients.Manager()
if not CONF.service_available.neutron:
raise cls.skipException("Neutron support is required")
cls.network_client = os.network_client
cls.stack_name = data_utils.rand_name('heat')
+ template = cls.load_template('neutron_basic')
cls.keypair_name = (CONF.orchestration.keypair_name or
cls._create_keypair()['name'])
cls.external_router_id = cls._get_external_router_id()
@@ -118,13 +47,14 @@
# create the stack
cls.stack_identifier = cls.create_stack(
cls.stack_name,
- cls.template,
+ template,
parameters={
'KeyName': cls.keypair_name,
'InstanceType': CONF.orchestration.instance_type,
'ImageId': CONF.orchestration.image_ref,
'ExternalRouterId': cls.external_router_id,
- 'ExternalNetworkId': cls.external_network_id
+ 'ExternalNetworkId': cls.external_network_id,
+ 'timeout': CONF.orchestration.build_timeout
})
cls.stack_id = cls.stack_identifier.split('/')[1]
try:
@@ -155,13 +85,13 @@
'network:router_interface', ports)
return router_ports[0]['device_id']
- @attr(type='slow')
+ @test.attr(type='slow')
def test_created_resources(self):
"""Verifies created neutron resources."""
resources = [('Network', 'OS::Neutron::Net'),
('Subnet', 'OS::Neutron::Subnet'),
('RouterInterface', 'OS::Neutron::RouterInterface'),
- ('Server', 'AWS::EC2::Instance')]
+ ('Server', 'OS::Nova::Server')]
for resource_name, resource_type in resources:
resource = self.test_resources.get(resource_name, None)
self.assertIsInstance(resource, dict)
@@ -169,7 +99,7 @@
self.assertEqual(resource_type, resource['resource_type'])
self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
- @attr(type='slow')
+ @test.attr(type='slow')
def test_created_network(self):
"""Verifies created network."""
network_id = self.test_resources.get('Network')['physical_resource_id']
@@ -180,7 +110,7 @@
self.assertEqual(network_id, network['id'])
self.assertEqual('NewNetwork', network['name'])
- @attr(type='slow')
+ @test.attr(type='slow')
def test_created_subnet(self):
"""Verifies created subnet."""
subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
@@ -197,7 +127,7 @@
self.assertEqual(4, subnet['ip_version'])
self.assertEqual('10.0.3.0/24', subnet['cidr'])
- @attr(type='slow')
+ @test.attr(type='slow')
def test_created_router(self):
"""Verifies created router."""
router_id = self.test_resources.get('Router')['physical_resource_id']
@@ -207,11 +137,9 @@
self.assertEqual('NewRouter', router['name'])
self.assertEqual(self.external_network_id,
router['external_gateway_info']['network_id'])
- self.assertEqual(False,
- router['external_gateway_info']['enable_snat'])
self.assertEqual(False, router['admin_state_up'])
- @attr(type='slow')
+ @test.attr(type='slow')
def test_created_router_interface(self):
"""Verifies created router interface."""
network_id = self.test_resources.get('Network')['physical_resource_id']
@@ -232,7 +160,7 @@
router_interface_ip = subnet_fixed_ips[0]['ip_address']
self.assertEqual('10.0.3.1', router_interface_ip)
- @attr(type='slow')
+ @test.attr(type='slow')
def test_created_server(self):
"""Verifies created sever."""
server_id = self.test_resources.get('Server')['physical_resource_id']
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 11d01f7..9ef95a1 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -21,53 +21,17 @@
class StacksTestJSON(base.BaseOrchestrationTest):
- _interface = 'json'
-
- template = """
-HeatTemplateFormatVersion: '2012-12-12'
-Description: |
- Template which creates some simple resources
-Parameters:
- trigger:
- Type: String
- Default: not_yet
-Resources:
- fluffy:
- Type: AWS::AutoScaling::LaunchConfiguration
- Metadata:
- kittens:
- - Tom
- - Stinky
- Properties:
- ImageId: not_used
- InstanceType: not_used
- UserData:
- Fn::Replace:
- - variable_a: {Ref: trigger}
- variable_b: bee
- - |
- A == variable_a
- B == variable_b
-Outputs:
- fluffy:
- Description: "fluffies irc nick"
- Value:
- Fn::Replace:
- - nick: {Ref: fluffy}
- - |
- #nick
-"""
@classmethod
def setUpClass(cls):
super(StacksTestJSON, cls).setUpClass()
- cls.client = cls.orchestration_client
cls.stack_name = data_utils.rand_name('heat')
+ template = cls.load_template('non_empty_stack')
# create the stack
cls.stack_identifier = cls.create_stack(
cls.stack_name,
- cls.template,
+ template,
parameters={
'trigger': 'start'
})
@@ -76,16 +40,18 @@
cls.resource_type = 'AWS::AutoScaling::LaunchConfiguration'
cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
- def assert_fields_in_dict(self, obj, *fields):
- for field in fields:
- self.assertIn(field, obj)
+ def _list_stacks(self, expected_num=None, **filter_kwargs):
+ resp, stacks = self.client.list_stacks(params=filter_kwargs)
+ self.assertEqual('200', resp['status'])
+ self.assertIsInstance(stacks, list)
+ if expected_num is not None:
+ self.assertEqual(expected_num, len(stacks))
+ return stacks
@attr(type='gate')
def test_stack_list(self):
- """Created stack should be on the list of existing stacks."""
- resp, stacks = self.client.list_stacks()
- self.assertEqual('200', resp['status'])
- self.assertIsInstance(stacks, list)
+ """Created stack should be in the list of existing stacks."""
+ stacks = self._list_stacks()
stacks_names = map(lambda stack: stack['stack_name'], stacks)
self.assertIn(self.stack_name, stacks_names)
@@ -111,7 +77,7 @@
@attr(type='gate')
def test_suspend_resume_stack(self):
- """suspend and resume a stack."""
+ """Suspend and resume a stack."""
resp, suspend_stack = self.client.suspend_stack(self.stack_identifier)
self.assertEqual('200', resp['status'])
self.client.wait_for_stack_status(self.stack_identifier,
@@ -125,20 +91,8 @@
def test_list_resources(self):
"""Getting list of created resources for the stack should be possible.
"""
- resp, resources = self.client.list_resources(self.stack_identifier)
- self.assertEqual('200', resp['status'])
- self.assertIsInstance(resources, list)
- for res in resources:
- self.assert_fields_in_dict(res, 'logical_resource_id',
- 'resource_type', 'resource_status',
- 'updated_time')
-
- resources_names = map(lambda resource: resource['logical_resource_id'],
- resources)
- self.assertIn(self.resource_name, resources_names)
- resources_types = map(lambda resource: resource['resource_type'],
- resources)
- self.assertIn(self.resource_type, resources_types)
+ resources = self.list_resources(self.stack_identifier)
+ self.assertEqual({self.resource_name: self.resource_type}, resources)
@attr(type='gate')
def test_show_resource(self):
@@ -156,7 +110,7 @@
@attr(type='gate')
def test_resource_metadata(self):
- """Getting metadata for created resource should be possible."""
+ """Getting metadata for created resources should be possible."""
resp, metadata = self.client.show_resource_metadata(
self.stack_identifier,
self.resource_name)
@@ -182,7 +136,7 @@
@attr(type='gate')
def test_show_event(self):
- """Getting details about existing event should be possible."""
+ """Getting details about an event should be possible."""
resp, events = self.client.list_resource_events(self.stack_identifier,
self.resource_name)
self.assertNotEqual([], events)
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index 9d3bf13..7f088de 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -22,63 +22,18 @@
class NovaKeyPairResourcesYAMLTest(base.BaseOrchestrationTest):
- _interface = 'json'
- template = """
-heat_template_version: 2013-05-23
-
-description: >
- Template which creates two key pairs.
-
-parameters:
- KeyPairName1:
- type: string
- default: testkey
-
- KeyPairName2:
- type: string
- default: testkey2
-
-resources:
- KeyPairSavePrivate:
- type: OS::Nova::KeyPair
- properties:
- name: { get_param: KeyPairName1 }
- save_private_key: true
-
- KeyPairDontSavePrivate:
- type: OS::Nova::KeyPair
- properties:
- name: { get_param: KeyPairName2 }
- save_private_key: false
-
-outputs:
- KeyPair_PublicKey:
- description: Public Key of generated keypair
- value: { get_attr: [KeyPairSavePrivate, public_key] }
-
- KeyPair_PrivateKey:
- description: Private Key of generated keypair
- value: { get_attr: [KeyPairSavePrivate, private_key] }
-
- KeyPairDontSavePrivate_PublicKey:
- description: Public Key of generated keypair
- value: { get_attr: [KeyPairDontSavePrivate, public_key] }
-
- KeyPairDontSavePrivate_PrivateKey:
- description: Private Key of generated keypair
- value: { get_attr: [KeyPairDontSavePrivate, private_key] }
-"""
+ _tpl_type = 'yaml'
@classmethod
def setUpClass(cls):
super(NovaKeyPairResourcesYAMLTest, cls).setUpClass()
- cls.client = cls.orchestration_client
cls.stack_name = data_utils.rand_name('heat')
+ template = cls.load_template('nova_keypair', ext=cls._tpl_type)
# create the stack, avoid any duplicated key.
cls.stack_identifier = cls.create_stack(
cls.stack_name,
- cls.template,
+ template,
parameters={
'KeyPairName1': cls.stack_name + '_1',
'KeyPairName2': cls.stack_name + '_2'
@@ -115,13 +70,13 @@
output_map[outputs['output_key']] = outputs['output_value']
#Test that first key generated public and private keys
self.assertTrue('KeyPair_PublicKey' in output_map)
- self.assertTrue("Generated by" in output_map['KeyPair_PublicKey'])
+ self.assertTrue("Generated" in output_map['KeyPair_PublicKey'])
self.assertTrue('KeyPair_PrivateKey' in output_map)
self.assertTrue('-----BEGIN' in output_map['KeyPair_PrivateKey'])
#Test that second key generated public key, and private key is not
#in the output due to save_private_key = false
self.assertTrue('KeyPairDontSavePrivate_PublicKey' in output_map)
- self.assertTrue('Generated by' in
+ self.assertTrue('Generated' in
output_map['KeyPairDontSavePrivate_PublicKey'])
self.assertTrue(u'KeyPairDontSavePrivate_PrivateKey' in output_map)
private_key = output_map['KeyPairDontSavePrivate_PrivateKey']
@@ -129,53 +84,4 @@
class NovaKeyPairResourcesAWSTest(NovaKeyPairResourcesYAMLTest):
- template = """
-{
- "AWSTemplateFormatVersion" : "2010-09-09",
- "Description" : "Template which create two key pairs.",
- "Parameters" : {
- "KeyPairName1": {
- "Type": "String",
- "Default": "testkey1"
- },
- "KeyPairName2": {
- "Type": "String",
- "Default": "testkey2"
- }
- },
- "Resources" : {
- "KeyPairSavePrivate": {
- "Type": "OS::Nova::KeyPair",
- "Properties": {
- "name" : { "Ref" : "KeyPairName1" },
- "save_private_key": true
- }
- },
- "KeyPairDontSavePrivate": {
- "Type": "OS::Nova::KeyPair",
- "Properties": {
- "name" : { "Ref" : "KeyPairName2" },
- "save_private_key": false
- }
- }
- },
- "Outputs": {
- "KeyPair_PublicKey": {
- "Description": "Public Key of generated keypair.",
- "Value": { "Fn::GetAtt" : ["KeyPairSavePrivate", "public_key"] }
- },
- "KeyPair_PrivateKey": {
- "Description": "Private Key of generated keypair.",
- "Value": { "Fn::GetAtt" : ["KeyPairSavePrivate", "private_key"] }
- },
- "KeyPairDontSavePrivate_PublicKey": {
- "Description": "Public Key of generated keypair.",
- "Value": { "Fn::GetAtt" : ["KeyPairDontSavePrivate", "public_key"] }
- },
- "KeyPairDontSavePrivate_PrivateKey": {
- "Description": "Private Key of generated keypair.",
- "Value": { "Fn::GetAtt" : ["KeyPairDontSavePrivate", "private_key"] }
- }
- }
-}
-"""
+ _tpl_type = 'json'
diff --git a/tempest/api/orchestration/stacks/test_server_cfn_init.py b/tempest/api/orchestration/stacks/test_server_cfn_init.py
index 95deaf5..4b845b1 100644
--- a/tempest/api/orchestration/stacks/test_server_cfn_init.py
+++ b/tempest/api/orchestration/stacks/test_server_cfn_init.py
@@ -26,99 +26,15 @@
class ServerCfnInitTestJSON(base.BaseOrchestrationTest):
- _interface = 'json'
existing_keypair = CONF.orchestration.keypair_name is not None
- template = """
-HeatTemplateFormatVersion: '2012-12-12'
-Description: |
- Template which uses a wait condition to confirm that a minimal
- cfn-init and cfn-signal has worked
-Parameters:
- key_name:
- Type: String
- flavor:
- Type: String
- image:
- Type: String
- network:
- Type: String
-Resources:
- CfnUser:
- Type: AWS::IAM::User
- SmokeSecurityGroup:
- Type: AWS::EC2::SecurityGroup
- Properties:
- GroupDescription: Enable only ping and SSH access
- SecurityGroupIngress:
- - {CidrIp: 0.0.0.0/0, FromPort: '-1', IpProtocol: icmp, ToPort: '-1'}
- - {CidrIp: 0.0.0.0/0, FromPort: '22', IpProtocol: tcp, ToPort: '22'}
- SmokeKeys:
- Type: AWS::IAM::AccessKey
- Properties:
- UserName: {Ref: CfnUser}
- SmokeServer:
- Type: OS::Nova::Server
- Metadata:
- AWS::CloudFormation::Init:
- config:
- files:
- /tmp/smoke-status:
- content: smoke test complete
- /etc/cfn/cfn-credentials:
- content:
- Fn::Replace:
- - SmokeKeys: {Ref: SmokeKeys}
- SecretAccessKey:
- 'Fn::GetAtt': [SmokeKeys, SecretAccessKey]
- - |
- AWSAccessKeyId=SmokeKeys
- AWSSecretKey=SecretAccessKey
- mode: '000400'
- owner: root
- group: root
- Properties:
- image: {Ref: image}
- flavor: {Ref: flavor}
- key_name: {Ref: key_name}
- security_groups:
- - {Ref: SmokeSecurityGroup}
- networks:
- - uuid: {Ref: network}
- user_data:
- Fn::Replace:
- - WaitHandle: {Ref: WaitHandle}
- - |
- #!/bin/bash -v
- /opt/aws/bin/cfn-init
- /opt/aws/bin/cfn-signal -e 0 --data "`cat /tmp/smoke-status`" \
- "WaitHandle"
- WaitHandle:
- Type: AWS::CloudFormation::WaitConditionHandle
- WaitCondition:
- Type: AWS::CloudFormation::WaitCondition
- DependsOn: SmokeServer
- Properties:
- Handle: {Ref: WaitHandle}
- Timeout: '600'
-Outputs:
- WaitConditionStatus:
- Description: Contents of /tmp/smoke-status on SmokeServer
- Value:
- Fn::GetAtt: [WaitCondition, Data]
- SmokeServerIp:
- Description: IP address of server
- Value:
- Fn::GetAtt: [SmokeServer, first_address]
-"""
-
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ServerCfnInitTestJSON, cls).setUpClass()
if not CONF.orchestration.image_ref:
raise cls.skipException("No image available to test")
- cls.client = cls.orchestration_client
-
+ template = cls.load_template('cfn_init_signal')
stack_name = data_utils.rand_name('heat')
if CONF.orchestration.keypair_name:
keypair_name = CONF.orchestration.keypair_name
@@ -129,12 +45,13 @@
# create the stack
cls.stack_identifier = cls.create_stack(
stack_name,
- cls.template,
+ template,
parameters={
'key_name': keypair_name,
'flavor': CONF.orchestration.instance_type,
'image': CONF.orchestration.image_ref,
- 'network': cls._get_default_network()['id']
+ 'network': cls._get_default_network()['id'],
+ 'timeout': CONF.orchestration.build_timeout
})
@test.attr(type='slow')
@@ -177,7 +94,8 @@
try:
self.client.wait_for_resource_status(
sid, 'WaitCondition', 'CREATE_COMPLETE')
- except exceptions.TimeoutException as e:
+ except (exceptions.StackResourceBuildErrorException,
+ exceptions.TimeoutException) as e:
# attempt to log the server console to help with debugging
# the cause of the server not signalling the waitcondition
# to heat.
@@ -192,14 +110,6 @@
# wait for create to complete.
self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
- # fetch the stack
- resp, body = self.client.get_stack(sid)
- self.assertEqual('CREATE_COMPLETE', body['stack_status'])
-
- # fetch the stack
- resp, body = self.client.get_stack(sid)
- self.assertEqual('CREATE_COMPLETE', body['stack_status'])
-
# This is an assert of great significance, as it means the following
# has happened:
# - cfn-init read the provided metadata and wrote out a file
@@ -207,5 +117,5 @@
# - a cfn-signal was built which was signed with provided credentials
# - the wait condition was fulfilled and the stack has changed state
wait_status = json.loads(
- self.stack_output(body, 'WaitConditionStatus'))
+ self.get_stack_output(sid, 'WaitConditionStatus'))
self.assertEqual('smoke test complete', wait_status['00000'])
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index fc2dda8..867995c 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -20,14 +20,11 @@
class StacksTestJSON(base.BaseOrchestrationTest):
- _interface = 'json'
-
empty_template = "HeatTemplateFormatVersion: '2012-12-12'\n"
@classmethod
def setUpClass(cls):
super(StacksTestJSON, cls).setUpClass()
- cls.client = cls.orchestration_client
@attr(type='smoke')
def test_stack_list_responds(self):
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index 713cfd4..6d53fb2 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -18,45 +18,19 @@
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
+from tempest import test
CONF = config.CONF
class SwiftResourcesTestJSON(base.BaseOrchestrationTest):
- _interface = 'json'
- template = """
-heat_template_version: 2013-05-23
-description: Template which creates a Swift container resource
-
-resources:
- SwiftContainerWebsite:
- deletion_policy: "Delete"
- type: OS::Swift::Container
- properties:
- X-Container-Read: ".r:*"
- X-Container-Meta:
- web-index: "index.html"
- web-error: "error.html"
-
- SwiftContainer:
- type: OS::Swift::Container
-
-outputs:
- WebsiteURL:
- description: "URL for website hosted on S3"
- value: { get_attr: [SwiftContainer, WebsiteURL] }
- DomainName:
- description: "Domain of Swift host"
- value: { get_attr: [SwiftContainer, DomainName] }
-
-"""
-
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(SwiftResourcesTestJSON, cls).setUpClass()
- cls.client = cls.orchestration_client
cls.stack_name = data_utils.rand_name('heat')
+ template = cls.load_template('swift_basic')
os = clients.Manager()
if not CONF.service_available.swift:
raise cls.skipException("Swift support is required")
@@ -65,7 +39,7 @@
# create the stack
cls.stack_identifier = cls.create_stack(
cls.stack_name,
- cls.template)
+ template)
cls.stack_id = cls.stack_identifier.split('/')[1]
cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
cls.test_resources = {}
@@ -74,7 +48,7 @@
cls.test_resources[resource['logical_resource_id']] = resource
def test_created_resources(self):
- """Created stack should be on the list of existing stacks."""
+ """Created stack should be in the list of existing stacks."""
resources = [('SwiftContainer', 'OS::Swift::Container'),
('SwiftContainerWebsite', 'OS::Swift::Container')]
for resource_name, resource_type in resources:
diff --git a/tempest/api/orchestration/stacks/test_templates.py b/tempest/api/orchestration/stacks/test_templates.py
index 2da819d..74950a9 100644
--- a/tempest/api/orchestration/stacks/test_templates.py
+++ b/tempest/api/orchestration/stacks/test_templates.py
@@ -12,12 +12,10 @@
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
class TemplateYAMLTestJSON(base.BaseOrchestrationTest):
- _interface = 'json'
-
template = """
HeatTemplateFormatVersion: '2012-12-12'
Description: |
@@ -27,12 +25,10 @@
Type: AWS::IAM::User
"""
- invalid_template_url = 'http://www.example.com/template.yaml'
-
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(TemplateYAMLTestJSON, cls).setUpClass()
- cls.client = cls.orchestration_client
cls.stack_name = data_utils.rand_name('heat')
cls.stack_identifier = cls.create_stack(cls.stack_name, cls.template)
cls.client.wait_for_stack_status(cls.stack_identifier,
@@ -40,13 +36,13 @@
cls.stack_id = cls.stack_identifier.split('/')[1]
cls.parameters = {}
- @attr(type='gate')
+ @test.attr(type='gate')
def test_show_template(self):
"""Getting template used to create the stack."""
resp, template = self.client.show_template(self.stack_identifier)
self.assertEqual('200', resp['status'])
- @attr(type='gate')
+ @test.attr(type='gate')
def test_validate_template(self):
"""Validating template passing it content."""
resp, parameters = self.client.validate_template(self.template,
@@ -66,5 +62,3 @@
}
}
"""
-
- invalid_template_url = 'http://www.example.com/template.template'
diff --git a/tempest/api/orchestration/stacks/test_templates_negative.py b/tempest/api/orchestration/stacks/test_templates_negative.py
index c55f6ee..b325104 100644
--- a/tempest/api/orchestration/stacks/test_templates_negative.py
+++ b/tempest/api/orchestration/stacks/test_templates_negative.py
@@ -18,8 +18,6 @@
class TemplateYAMLNegativeTestJSON(base.BaseOrchestrationTest):
- _interface = 'json'
-
template = """
HeatTemplateFormatVersion: '2012-12-12'
Description: |
@@ -34,7 +32,6 @@
@classmethod
def setUpClass(cls):
super(TemplateYAMLNegativeTestJSON, cls).setUpClass()
- cls.client = cls.orchestration_client
cls.parameters = {}
@test.attr(type=['gate', 'negative'])
diff --git a/tempest/api/orchestration/stacks/test_update.py b/tempest/api/orchestration/stacks/test_update.py
new file mode 100644
index 0000000..a9a43b6
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_update.py
@@ -0,0 +1,84 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+LOG = logging.getLogger(__name__)
+
+
+class UpdateStackTestJSON(base.BaseOrchestrationTest):
+ _interface = 'json'
+
+ template = '''
+heat_template_version: 2013-05-23
+resources:
+ random1:
+ type: OS::Heat::RandomString
+'''
+ update_template = '''
+heat_template_version: 2013-05-23
+resources:
+ random1:
+ type: OS::Heat::RandomString
+ random2:
+ type: OS::Heat::RandomString
+'''
+
+ def update_stack(self, stack_identifier, template):
+ stack_name = stack_identifier.split('/')[0]
+ resp = self.client.update_stack(
+ stack_identifier=stack_identifier,
+ name=stack_name,
+ template=template)
+ self.assertEqual('202', resp[0]['status'])
+ self.client.wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
+
+ @test.attr(type='gate')
+ def test_stack_update_nochange(self):
+ stack_name = data_utils.rand_name('heat')
+ stack_identifier = self.create_stack(stack_name, self.template)
+ self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+ expected_resources = {'random1': 'OS::Heat::RandomString'}
+ self.assertEqual(expected_resources,
+ self.list_resources(stack_identifier))
+
+ # Update with no changes, resources should be unchanged
+ self.update_stack(stack_identifier, self.template)
+ self.assertEqual(expected_resources,
+ self.list_resources(stack_identifier))
+
+ @test.attr(type='gate')
+ @test.skip_because(bug='1308682')
+ def test_stack_update_add_remove(self):
+ stack_name = data_utils.rand_name('heat')
+ stack_identifier = self.create_stack(stack_name, self.template)
+ self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+ initial_resources = {'random1': 'OS::Heat::RandomString'}
+ self.assertEqual(initial_resources,
+ self.list_resources(stack_identifier))
+
+ # Add one resource via a stack update
+ self.update_stack(stack_identifier, self.update_template)
+ updated_resources = {'random1': 'OS::Heat::RandomString',
+ 'random2': 'OS::Heat::RandomString'}
+ self.assertEqual(updated_resources,
+ self.list_resources(stack_identifier))
+
+ # Then remove it by updating with the original template
+ self.update_stack(stack_identifier, self.template)
+ self.assertEqual(initial_resources,
+ self.list_resources(stack_identifier))
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
new file mode 100644
index 0000000..2544c41
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -0,0 +1,101 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest import test
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class CinderResourcesTest(base.BaseOrchestrationTest):
+
+ @classmethod
+ def setUpClass(cls):
+ super(CinderResourcesTest, cls).setUpClass()
+ if not CONF.service_available.cinder:
+ raise cls.skipException('Cinder support is required')
+
+ def _cinder_verify(self, volume_id):
+ self.assertIsNotNone(volume_id)
+ resp, volume = self.volumes_client.get_volume(volume_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual('available', volume.get('status'))
+ self.assertEqual(1, volume.get('size'))
+ self.assertEqual('a descriptive description',
+ volume.get('display_description'))
+
+ def _outputs_verify(self, stack_identifier):
+ self.assertEqual('available',
+ self.get_stack_output(stack_identifier, 'status'))
+ self.assertEqual('1',
+ self.get_stack_output(stack_identifier, 'size'))
+ self.assertEqual('a descriptive description',
+ self.get_stack_output(stack_identifier,
+ 'display_description'))
+
+ @test.attr(type='gate')
+ def test_cinder_volume_create_delete(self):
+ """Create and delete a volume via OS::Cinder::Volume."""
+ stack_name = data_utils.rand_name('heat')
+ template = self.load_template('cinder_basic')
+ stack_identifier = self.create_stack(stack_name, template)
+ self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+ # Verify with cinder that the volume exists, with matching details
+ volume_id = self.get_stack_output(stack_identifier, 'volume_id')
+ self._cinder_verify(volume_id)
+
+ # Verify the stack outputs are as expected
+ self._outputs_verify(stack_identifier)
+
+ # Delete the stack and ensure the volume is gone
+ self.client.delete_stack(stack_identifier)
+ self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
+ self.assertRaises(exceptions.NotFound,
+ self.volumes_client.get_volume,
+ volume_id)
+
+ def _cleanup_volume(self, volume_id):
+ """Cleanup the volume direct with cinder."""
+ resp = self.volumes_client.delete_volume(volume_id)
+ self.assertEqual(202, resp[0].status)
+ self.volumes_client.wait_for_resource_deletion(volume_id)
+
+ @test.attr(type='gate')
+ def test_cinder_volume_create_delete_retain(self):
+ """Ensure the 'Retain' deletion policy is respected."""
+ stack_name = data_utils.rand_name('heat')
+ template = self.load_template('cinder_basic_delete_retain')
+ stack_identifier = self.create_stack(stack_name, template)
+ self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+ # Verify with cinder that the volume exists, with matching details
+ volume_id = self.get_stack_output(stack_identifier, 'volume_id')
+ self.addCleanup(self._cleanup_volume, volume_id)
+ self._cinder_verify(volume_id)
+
+ # Verify the stack outputs are as expected
+ self._outputs_verify(stack_identifier)
+
+ # Delete the stack and ensure the volume is *not* gone
+ self.client.delete_stack(stack_identifier)
+ self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
+ self._cinder_verify(volume_id)
+
+ # Volume cleanup happens via addCleanup calling _cleanup_volume
diff --git a/tempest/api/queuing/base.py b/tempest/api/queuing/base.py
index 5656850..6c22719 100644
--- a/tempest/api/queuing/base.py
+++ b/tempest/api/queuing/base.py
@@ -47,3 +47,9 @@
"""Wrapper utility that returns a test queue."""
resp, body = cls.client.create_queue(queue_name)
return resp, body
+
+ @classmethod
+ def delete_queue(cls, queue_name):
+ """Wrapper utility that returns a test queue."""
+ resp, body = cls.client.delete_queue(queue_name)
+ return resp, body
diff --git a/tempest/api/queuing/test_queues.py b/tempest/api/queuing/test_queues.py
index 6934b46..4d03f7e 100644
--- a/tempest/api/queuing/test_queues.py
+++ b/tempest/api/queuing/test_queues.py
@@ -35,3 +35,26 @@
self.assertEqual('201', resp['status'])
self.assertEqual('', body)
+
+
+class TestManageQueue(base.BaseQueuingTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestManageQueue, cls).setUpClass()
+ cls.queue_name = data_utils.rand_name('Queues-Test')
+ # Create Queue
+ cls.client.create_queue(cls.queue_name)
+
+ @test.attr(type='smoke')
+ def test_delete_queue(self):
+ # Delete Queue
+ resp, body = self.delete_queue(self.queue_name)
+ self.assertEqual('204', resp['status'])
+ self.assertEqual('', body)
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.client.delete_queue(cls.queue_name)
+ super(TestManageQueue, cls).tearDownClass()
diff --git a/tempest/api/telemetry/test_telemetry_alarming_api.py b/tempest/api/telemetry/test_telemetry_alarming_api.py
index 907d3d0..3472b31 100644
--- a/tempest/api/telemetry/test_telemetry_alarming_api.py
+++ b/tempest/api/telemetry/test_telemetry_alarming_api.py
@@ -11,6 +11,7 @@
# under the License.
from tempest.api.telemetry import base
+from tempest.common.utils import data_utils
from tempest import exceptions
from tempest.test import attr
@@ -18,26 +19,96 @@
class TelemetryAlarmingAPITestJSON(base.BaseTelemetryTest):
_interface = 'json'
- @attr(type="gate")
- def test_alarm_list(self):
- resp, _ = self.telemetry_client.list_alarms()
- self.assertEqual(int(resp['status']), 200)
+ @classmethod
+ def setUpClass(cls):
+ super(TelemetryAlarmingAPITestJSON, cls).setUpClass()
+ cls.rule = {'meter_name': 'cpu_util',
+ 'comparison_operator': 'gt',
+ 'threshold': 80.0,
+ 'period': 70}
+ for i in range(2):
+ cls.create_alarm(threshold_rule=cls.rule)
@attr(type="gate")
- def test_create_alarm(self):
- rules = {'meter_name': 'cpu_util',
- 'comparison_operator': 'gt',
- 'threshold': 80.0,
- 'period': 70}
- resp, body = self.create_alarm(threshold_rule=rules)
- self.alarm_id = body['alarm_id']
- self.assertEqual(int(resp['status']), 201)
- self.assertDictContainsSubset(rules, body['threshold_rule'])
- resp, body = self.telemetry_client.get_alarm(self.alarm_id)
- self.assertEqual(int(resp['status']), 200)
- self.assertDictContainsSubset(rules, body['threshold_rule'])
- resp, _ = self.telemetry_client.delete_alarm(self.alarm_id)
- self.assertEqual(int(resp['status']), 204)
+ def test_alarm_list(self):
+ # List alarms
+ resp, alarm_list = self.telemetry_client.list_alarms()
+ self.assertEqual(200, resp.status)
+
+ # Verify created alarm in the list
+ fetched_ids = [a['alarm_id'] for a in alarm_list]
+ missing_alarms = [a for a in self.alarm_ids if a not in fetched_ids]
+ self.assertEqual(0, len(missing_alarms),
+ "Failed to find the following created alarm(s)"
+ " in a fetched list: %s" %
+ ', '.join(str(a) for a in missing_alarms))
+
+ @attr(type="gate")
+ def test_create_update_get_delete_alarm(self):
+ # Create an alarm
+ alarm_name = data_utils.rand_name('telemetry_alarm')
+ resp, body = self.telemetry_client.create_alarm(
+ name=alarm_name, type='threshold', threshold_rule=self.rule)
+ self.assertEqual(201, resp.status)
+ self.assertEqual(alarm_name, body['name'])
+ alarm_id = body['alarm_id']
+ self.assertDictContainsSubset(self.rule, body['threshold_rule'])
+ # Update alarm with new rule and new name
+ new_rule = {'meter_name': 'cpu',
+ 'comparison_operator': 'eq',
+ 'threshold': 70.0,
+ 'period': 60}
+ alarm_name = data_utils.rand_name('telemetry-alarm-update')
+ resp, body = self.telemetry_client.update_alarm(
+ alarm_id,
+ threshold_rule=new_rule,
+ name=alarm_name,
+ type='threshold')
+ self.assertEqual(200, resp.status)
+ self.assertEqual(alarm_name, body['name'])
+ self.assertDictContainsSubset(new_rule, body['threshold_rule'])
+ # Get and verify details of an alarm after update
+ resp, body = self.telemetry_client.get_alarm(alarm_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(alarm_name, body['name'])
+ self.assertDictContainsSubset(new_rule, body['threshold_rule'])
+ # Delete alarm and verify if deleted
+ resp, _ = self.telemetry_client.delete_alarm(alarm_id)
+ self.assertEqual(204, resp.status)
self.assertRaises(exceptions.NotFound,
- self.telemetry_client.get_alarm,
- self.alarm_id)
+ self.telemetry_client.get_alarm, alarm_id)
+
+ @attr(type="gate")
+ def test_set_get_alarm_state(self):
+ alarm_states = ['ok', 'alarm', 'insufficient data']
+ _, alarm = self.create_alarm(threshold_rule=self.rule)
+ # Set alarm state and verify
+ new_state =\
+ [elem for elem in alarm_states if elem != alarm['state']][0]
+ resp, state = self.telemetry_client.alarm_set_state(alarm['alarm_id'],
+ new_state)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(new_state, state)
+ # Get alarm state and verify
+ resp, state = self.telemetry_client.alarm_get_state(alarm['alarm_id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(new_state, state)
+
+ @attr(type="gate")
+ def test_create_delete_alarm_with_combination_rule(self):
+ rule = {"alarm_ids": self.alarm_ids,
+ "operator": "or"}
+ # Verifies alarm create
+ alarm_name = data_utils.rand_name('combination_alarm')
+ resp, body = self.telemetry_client.create_alarm(name=alarm_name,
+ combination_rule=rule,
+ type='combination')
+ self.assertEqual(201, resp.status)
+ self.assertEqual(alarm_name, body['name'])
+ alarm_id = body['alarm_id']
+ self.assertDictContainsSubset(rule, body['combination_rule'])
+ # Verify alarm delete
+ resp, _ = self.telemetry_client.delete_alarm(alarm_id)
+ self.assertEqual(204, resp.status)
+ self.assertRaises(exceptions.NotFound,
+ self.telemetry_client.get_alarm, alarm_id)
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 6178a1c..e79d23c 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -25,10 +25,10 @@
_interface = "json"
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(VolumeMultiBackendTest, cls).setUpClass()
if not CONF.volume_feature_enabled.multi_backend:
- cls.tearDownClass()
raise cls.skipException("Cinder multi-backend feature disabled")
cls.backend1_name = CONF.volume.backend1_name
@@ -37,40 +37,36 @@
cls.volume_client = cls.os_adm.volumes_client
cls.volume_type_id_list = []
cls.volume_id_list = []
- try:
- # Volume/Type creation (uses backend1_name)
- type1_name = data_utils.rand_name('Type-')
- vol1_name = data_utils.rand_name('Volume-')
- extra_specs1 = {"volume_backend_name": cls.backend1_name}
- resp, cls.type1 = cls.client.create_volume_type(
- type1_name, extra_specs=extra_specs1)
- cls.volume_type_id_list.append(cls.type1['id'])
- resp, cls.volume1 = cls.volume_client.create_volume(
- size=1, display_name=vol1_name, volume_type=type1_name)
- cls.volume_id_list.append(cls.volume1['id'])
- cls.volume_client.wait_for_volume_status(cls.volume1['id'],
+ # Volume/Type creation (uses backend1_name)
+ type1_name = data_utils.rand_name('Type-')
+ vol1_name = data_utils.rand_name('Volume-')
+ extra_specs1 = {"volume_backend_name": cls.backend1_name}
+ resp, cls.type1 = cls.client.create_volume_type(
+ type1_name, extra_specs=extra_specs1)
+ cls.volume_type_id_list.append(cls.type1['id'])
+
+ resp, cls.volume1 = cls.volume_client.create_volume(
+ size=1, display_name=vol1_name, volume_type=type1_name)
+ cls.volume_id_list.append(cls.volume1['id'])
+ cls.volume_client.wait_for_volume_status(cls.volume1['id'],
+ 'available')
+
+ if cls.backend1_name != cls.backend2_name:
+ # Volume/Type creation (uses backend2_name)
+ type2_name = data_utils.rand_name('Type-')
+ vol2_name = data_utils.rand_name('Volume-')
+ extra_specs2 = {"volume_backend_name": cls.backend2_name}
+ resp, cls.type2 = cls.client.create_volume_type(
+ type2_name, extra_specs=extra_specs2)
+ cls.volume_type_id_list.append(cls.type2['id'])
+
+ resp, cls.volume2 = cls.volume_client.create_volume(
+ size=1, display_name=vol2_name, volume_type=type2_name)
+ cls.volume_id_list.append(cls.volume2['id'])
+ cls.volume_client.wait_for_volume_status(cls.volume2['id'],
'available')
- if cls.backend1_name != cls.backend2_name:
- # Volume/Type creation (uses backend2_name)
- type2_name = data_utils.rand_name('Type-')
- vol2_name = data_utils.rand_name('Volume-')
- extra_specs2 = {"volume_backend_name": cls.backend2_name}
- resp, cls.type2 = cls.client.create_volume_type(
- type2_name, extra_specs=extra_specs2)
- cls.volume_type_id_list.append(cls.type2['id'])
-
- resp, cls.volume2 = cls.volume_client.create_volume(
- size=1, display_name=vol2_name, volume_type=type2_name)
- cls.volume_id_list.append(cls.volume2['id'])
- cls.volume_client.wait_for_volume_status(cls.volume2['id'],
- 'available')
- except Exception as e:
- LOG.exception("setup failed: %s" % e)
- cls.tearDownClass()
- raise
-
@classmethod
def tearDownClass(cls):
# volumes deletion
diff --git a/tempest/api/volume/admin/test_snapshots_actions.py b/tempest/api/volume/admin/test_snapshots_actions.py
index e140ad0..594c703 100644
--- a/tempest/api/volume/admin/test_snapshots_actions.py
+++ b/tempest/api/volume/admin/test_snapshots_actions.py
@@ -22,6 +22,7 @@
_interface = "json"
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(SnapshotsActionsTest, cls).setUpClass()
cls.client = cls.snapshots_client
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 31f6730..531e145 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -23,13 +23,13 @@
class VolumeQuotasAdminTestJSON(base.BaseVolumeV1AdminTest):
_interface = "json"
+ force_tenant_isolation = True
@classmethod
def setUpClass(cls):
super(VolumeQuotasAdminTestJSON, cls).setUpClass()
cls.admin_volume_client = cls.os_adm.volumes_client
- cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
- 'tenantId')
+ cls.demo_tenant_id = cls.isolated_creds.get_primary_creds().tenant_id
@test.attr(type='gate')
def test_list_quotas(self):
@@ -60,11 +60,16 @@
self.demo_tenant_id,
**new_quota_set)
- default_quota_set.pop('id')
+ cleanup_quota_set = dict(
+ (k, v) for k, v in default_quota_set.iteritems()
+ if k in QUOTA_KEYS)
self.addCleanup(self.quotas_client.update_quota_set,
- self.demo_tenant_id, **default_quota_set)
+ self.demo_tenant_id, **cleanup_quota_set)
self.assertEqual(200, resp.status)
- self.assertEqual(new_quota_set, quota_set)
+ # test that the specific values we set are actually in
+ # the final result. There is nothing here that ensures there
+ # would be no other values in there.
+ self.assertDictContainsSubset(new_quota_set, quota_set)
@test.attr(type='gate')
def test_show_quota_usage(self):
diff --git a/tempest/api/volume/admin/test_volume_quotas_negative.py b/tempest/api/volume/admin/test_volume_quotas_negative.py
new file mode 100644
index 0000000..ab88b90
--- /dev/null
+++ b/tempest/api/volume/admin/test_volume_quotas_negative.py
@@ -0,0 +1,83 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.volume import base
+from tempest import exceptions
+from tempest import test
+
+
+class VolumeQuotasNegativeTestJSON(base.BaseVolumeV1AdminTest):
+ _interface = "json"
+ force_tenant_isolation = True
+
+ @classmethod
+ @test.safe_setup
+ def setUpClass(cls):
+ super(VolumeQuotasNegativeTestJSON, cls).setUpClass()
+ demo_user = cls.isolated_creds.get_primary_creds()
+ cls.demo_tenant_id = demo_user.tenant_id
+ cls.shared_quota_set = {'gigabytes': 3, 'volumes': 1, 'snapshots': 1}
+
+ # NOTE(gfidente): no need to restore original quota set
+ # after the tests as they only work with tenant isolation.
+ resp, quota_set = cls.quotas_client.update_quota_set(
+ cls.demo_tenant_id,
+ **cls.shared_quota_set)
+
+ # NOTE(gfidente): no need to delete in tearDown as
+ # they are created using utility wrapper methods.
+ cls.volume = cls.create_volume()
+ cls.snapshot = cls.create_snapshot(cls.volume['id'])
+
+ @test.attr(type='negative')
+ def test_quota_volumes(self):
+ self.assertRaises(exceptions.OverLimit,
+ self.volumes_client.create_volume,
+ size=1)
+
+ @test.attr(type='negative')
+ def test_quota_volume_snapshots(self):
+ self.assertRaises(exceptions.OverLimit,
+ self.snapshots_client.create_snapshot,
+ self.volume['id'])
+
+ @test.attr(type='negative')
+ def test_quota_volume_gigabytes(self):
+ # NOTE(gfidente): quota set needs to be changed for this test
+ # or we may be limited by the volumes or snaps quota number, not by
+ # actual gigs usage; next line ensures shared set is restored.
+ self.addCleanup(self.quotas_client.update_quota_set,
+ self.demo_tenant_id,
+ **self.shared_quota_set)
+
+ new_quota_set = {'gigabytes': 2, 'volumes': 2, 'snapshots': 1}
+ resp, quota_set = self.quotas_client.update_quota_set(
+ self.demo_tenant_id,
+ **new_quota_set)
+ self.assertRaises(exceptions.OverLimit,
+ self.volumes_client.create_volume,
+ size=1)
+
+ new_quota_set = {'gigabytes': 2, 'volumes': 1, 'snapshots': 2}
+ resp, quota_set = self.quotas_client.update_quota_set(
+ self.demo_tenant_id,
+ **self.shared_quota_set)
+ self.assertRaises(exceptions.OverLimit,
+ self.snapshots_client.create_snapshot,
+ self.volume['id'])
+
+
+class VolumeQuotasNegativeTestXML(VolumeQuotasNegativeTestJSON):
+ _interface = "xml"
diff --git a/tempest/api/volume/admin/test_volume_services.py b/tempest/api/volume/admin/test_volume_services.py
new file mode 100644
index 0000000..012c231
--- /dev/null
+++ b/tempest/api/volume/admin/test_volume_services.py
@@ -0,0 +1,74 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.volume import base
+from tempest import test
+
+
+class VolumesServicesTestJSON(base.BaseVolumeV1AdminTest):
+ """
+ Tests Volume Services API.
+ volume service list requires admin privileges.
+ """
+ _interface = "json"
+
+ @classmethod
+ def setUpClass(cls):
+ super(VolumesServicesTestJSON, cls).setUpClass()
+ cls.client = cls.os_adm.volume_services_client
+ resp, cls.services = cls.client.list_services()
+ cls.host_name = cls.services[0]['host']
+ cls.binary_name = cls.services[0]['binary']
+
+ @test.attr(type='gate')
+ def test_list_services(self):
+ resp, services = self.client.list_services()
+ self.assertEqual(200, resp.status)
+ self.assertNotEqual(0, len(services))
+
+ @test.attr(type='gate')
+ def test_get_service_by_service_binary_name(self):
+ params = {'binary': self.binary_name}
+ resp, services = self.client.list_services(params)
+ self.assertEqual(200, resp.status)
+ self.assertNotEqual(0, len(services))
+ for service in services:
+ self.assertEqual(self.binary_name, service['binary'])
+
+ @test.attr(type='gate')
+ def test_get_service_by_host_name(self):
+ services_on_host = [service for service in self.services if
+ service['host'] == self.host_name]
+ params = {'host': self.host_name}
+
+ resp, services = self.client.list_services(params)
+
+ # we could have a periodic job checkin between the 2 service
+ # lookups, so only compare binary lists.
+ s1 = map(lambda x: x['binary'], services)
+ s2 = map(lambda x: x['binary'], services_on_host)
+ # sort the lists before comparing, to take out dependency
+ # on order.
+ self.assertEqual(sorted(s1), sorted(s2))
+
+ @test.attr(type='gate')
+ def test_get_service_by_service_and_host_name(self):
+ params = {'host': self.host_name, 'binary': self.binary_name}
+
+ resp, services = self.client.list_services(params)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(1, len(services))
+ self.assertEqual(self.host_name, services[0]['host'])
+ self.assertEqual(self.binary_name, services[0]['binary'])
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index 8183999..ee1d09a 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -116,3 +116,35 @@
self.assertEqual(extra_specs, fetched_volume_type['extra_specs'],
'The fetched Volume_type is different '
'from the created Volume_type')
+
+ @test.attr(type='smoke')
+ def test_volume_type_encryption_create_get(self):
+ # Create/get encryption type.
+ provider = "LuksEncryptor"
+ control_location = "front-end"
+ name = data_utils.rand_name("volume-type-")
+ resp, body = self.client.create_volume_type(name)
+ self.assertEqual(200, resp.status)
+ self.addCleanup(self._delete_volume_type, body['id'])
+ resp, encryption_type = self.client.create_encryption_type(
+ body['id'], provider=provider,
+ control_location=control_location)
+ self.assertEqual(200, resp.status)
+ self.assertIn('volume_type_id', encryption_type)
+ self.assertEqual(provider, encryption_type['provider'],
+ "The created encryption_type provider is not equal "
+ "to the requested provider")
+ self.assertEqual(control_location, encryption_type['control_location'],
+ "The created encryption_type control_location is not "
+ "equal to the requested control_location")
+ resp, fetched_encryption_type = self.client.get_encryption_type(
+ encryption_type['volume_type_id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(provider,
+ fetched_encryption_type['provider'],
+ 'The fetched encryption_type provider is different '
+ 'from the created encryption_type')
+ self.assertEqual(control_location,
+ fetched_encryption_type['control_location'],
+ 'The fetched encryption_type control_location is '
+ 'different from the created encryption_type')
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index aa00700..008f739 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -22,6 +22,7 @@
_interface = "json"
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(VolumesActionsTest, cls).setUpClass()
cls.client = cls.volumes_client
@@ -84,24 +85,6 @@
self.volume['id'])
self.assertEqual('error', volume_get['status'])
- @test.attr(type='gate')
- def test_volume_begin_detaching(self):
- # test volume begin detaching : available -> detaching -> available
- resp, body = self.client.volume_begin_detaching(self.volume['id'])
- self.assertEqual(202, resp.status)
- resp_get, volume_get = self.client.get_volume(self.volume['id'])
- self.assertEqual('detaching', volume_get['status'])
-
- @test.attr(type='gate')
- def test_volume_roll_detaching(self):
- # test volume roll detaching : detaching -> in-use -> available
- resp, body = self.client.volume_begin_detaching(self.volume['id'])
- self.assertEqual(202, resp.status)
- resp, body = self.client.volume_roll_detaching(self.volume['id'])
- self.assertEqual(202, resp.status)
- resp_get, volume_get = self.client.get_volume(self.volume['id'])
- self.assertEqual('in-use', volume_get['status'])
-
def test_volume_force_delete_when_volume_is_creating(self):
# test force delete when status of volume is creating
self._create_reset_and_force_delete_temp_volume('creating')
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index cd6d7a8..f9fbe18 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -27,6 +27,7 @@
_interface = "json"
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(VolumesBackupsTest, cls).setUpClass()
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 2c6050c..4d11d24 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -107,6 +107,7 @@
cls.snapshots_client = cls.os.snapshots_client
cls.volumes_client = cls.os.volumes_client
cls.backups_client = cls.os.backups_client
+ cls.volume_services_client = cls.os.volume_services_client
cls.volumes_extension_client = cls.os.volumes_extension_client
@classmethod
@@ -135,11 +136,7 @@
"in configuration.")
raise cls.skipException(msg)
if CONF.compute.allow_tenant_isolation:
- creds = cls.isolated_creds.get_admin_creds()
- admin_username, admin_tenant_name, admin_password = creds
- cls.os_adm = clients.Manager(username=admin_username,
- password=admin_password,
- tenant_name=admin_tenant_name,
+ cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
interface=cls._interface)
else:
cls.os_adm = clients.AdminManager(interface=cls._interface)
diff --git a/tempest/api/volume/test_snapshot_metadata.py b/tempest/api/volume/test_snapshot_metadata.py
index 1493b37..d2c4ab7 100644
--- a/tempest/api/volume/test_snapshot_metadata.py
+++ b/tempest/api/volume/test_snapshot_metadata.py
@@ -21,6 +21,7 @@
_interface = "json"
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(SnapshotMetadataTest, cls).setUpClass()
cls.client = cls.snapshots_client
diff --git a/tempest/api/volume/test_volume_metadata.py b/tempest/api/volume/test_volume_metadata.py
index e94c700..0d57d47 100644
--- a/tempest/api/volume/test_volume_metadata.py
+++ b/tempest/api/volume/test_volume_metadata.py
@@ -23,16 +23,13 @@
_interface = "json"
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(VolumeMetadataTest, cls).setUpClass()
# Create a volume
cls.volume = cls.create_volume()
cls.volume_id = cls.volume['id']
- @classmethod
- def tearDownClass(cls):
- super(VolumeMetadataTest, cls).tearDownClass()
-
def tearDown(self):
# Update the metadata to {}
self.volumes_client.update_volume_metadata(self.volume_id, {})
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 55a72c1..82d1364 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -32,32 +32,18 @@
# Add another tenant to test volume-transfer
if CONF.compute.allow_tenant_isolation:
- creds = cls.isolated_creds.get_alt_creds()
- username, tenant_name, password = creds
- cls.os_alt = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name,
+ cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds(),
interface=cls._interface)
- cls.alt_tenant_id = cls.isolated_creds.get_alt_tenant()['id']
-
# Add admin tenant to cleanup resources
- adm_creds = cls.isolated_creds.get_admin_creds()
- admin_username, admin_tenant_name, admin_password = adm_creds
- cls.os_adm = clients.Manager(username=admin_username,
- password=admin_password,
- tenant_name=admin_tenant_name,
+ cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
interface=cls._interface)
else:
cls.os_alt = clients.AltManager()
- alt_tenant_name = cls.os_alt.credentials['tenant_name']
- identity_client = cls._get_identity_admin_client()
- _, tenants = identity_client.list_tenants()
- cls.alt_tenant_id = [tnt['id'] for tnt in tenants
- if tnt['name'] == alt_tenant_name][0]
cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
cls.client = cls.volumes_client
cls.alt_client = cls.os_alt.volumes_client
+ cls.alt_tenant_id = cls.alt_client.tenant_id
cls.adm_client = cls.os_adm.volumes_client
def _delete_volume(self, volume_id):
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index a22ad32..cfab0bd 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -25,6 +25,7 @@
_interface = "json"
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(VolumesActionsTest, cls).setUpClass()
cls.client = cls.volumes_client
@@ -44,7 +45,7 @@
def tearDownClass(cls):
# Delete the test instance
cls.servers_client.delete_server(cls.server['id'])
- cls.client.wait_for_resource_deletion(cls.server['id'])
+ cls.servers_client.wait_for_server_termination(cls.server['id'])
super(VolumesActionsTest, cls).tearDownClass()
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 175da01..58da440 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -51,8 +51,7 @@
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'Test'}
# Create a volume
- resp, volume = self.client.create_volume(size=1,
- display_name=v_name,
+ resp, volume = self.client.create_volume(display_name=v_name,
metadata=metadata,
**kwargs)
self.assertEqual(200, resp.status)
@@ -124,8 +123,8 @@
resp, new_volume = \
self.client.create_volume(size=1,
display_description=new_v_desc,
- availability_zone=volume[
- 'availability_zone'])
+ availability_zone=
+ volume['availability_zone'])
self.assertEqual(200, resp.status)
self.assertIn('id', new_volume)
self.addCleanup(self._delete_volume, new_volume['id'])
@@ -133,8 +132,8 @@
resp, update_volume = \
self.client.update_volume(new_volume['id'],
display_name=volume['display_name'],
- display_description=volume[
- 'display_description'])
+ display_description=
+ volume['display_description'])
self.assertEqual(200, resp.status)
# NOTE(jdg): Revert back to strict true/false checking
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index c356342..e2f7a38 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -56,6 +56,7 @@
[str_vol(v) for v in fetched_list]))
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(VolumesListTest, cls).setUpClass()
cls.client = cls.volumes_client
@@ -65,24 +66,10 @@
cls.volume_id_list = []
cls.metadata = {'Type': 'work'}
for i in range(3):
- try:
- volume = cls.create_volume(metadata=cls.metadata)
-
- resp, volume = cls.client.get_volume(volume['id'])
- cls.volume_list.append(volume)
- cls.volume_id_list.append(volume['id'])
- except Exception:
- LOG.exception('Failed to create volume. %d volumes were '
- 'created' % len(cls.volume_id_list))
- if cls.volume_list:
- # We could not create all the volumes, though we were able
- # to create *some* of the volumes. This is typically
- # because the backing file size of the volume group is
- # too small.
- for volid in cls.volume_id_list:
- cls.client.delete_volume(volid)
- cls.client.wait_for_resource_deletion(volid)
- raise
+ volume = cls.create_volume(metadata=cls.metadata)
+ resp, volume = cls.client.get_volume(volume['id'])
+ cls.volume_list.append(volume)
+ cls.volume_id_list.append(volume['id'])
@classmethod
def tearDownClass(cls):
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 82924a5..a8b0a8d 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -25,6 +25,7 @@
_interface = 'json'
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(VolumesNegativeTest, cls).setUpClass()
cls.client = cls.volumes_client
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 2701e84..6294cd9 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -24,14 +24,13 @@
_interface = "json"
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(VolumesSnapshotTest, cls).setUpClass()
- try:
- cls.volume_origin = cls.create_volume()
- except Exception:
- LOG.exception("setup failed")
- cls.tearDownClass()
- raise
+ cls.volume_origin = cls.create_volume()
+
+ if not CONF.volume_feature_enabled.snapshot:
+ raise cls.skipException("Cinder volume snapshots are disabled")
@classmethod
def tearDownClass(cls):
@@ -71,8 +70,8 @@
resp, server = self.servers_client.create_server(server_name,
self.image_ref,
self.flavor_ref)
- self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
self.addCleanup(self.servers_client.delete_server, server['id'])
+ self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
mountpoint = '/dev/%s' % CONF.compute.volume_device_name
resp, body = self.volumes_client.attach_volume(
self.volume_origin['id'], server['id'], mountpoint)
diff --git a/tempest/api/volume/test_volumes_snapshots_negative.py b/tempest/api/volume/test_volumes_snapshots_negative.py
index 9e47c03..61aa307 100644
--- a/tempest/api/volume/test_volumes_snapshots_negative.py
+++ b/tempest/api/volume/test_volumes_snapshots_negative.py
@@ -14,13 +14,23 @@
from tempest.api.volume import base
from tempest.common.utils import data_utils
+from tempest import config
from tempest import exceptions
from tempest import test
+CONF = config.CONF
+
class VolumesSnapshotNegativeTest(base.BaseVolumeV1Test):
_interface = "json"
+ @classmethod
+ def setUpClass(cls):
+ super(VolumesSnapshotNegativeTest, cls).setUpClass()
+
+ if not CONF.volume_feature_enabled.snapshot:
+ raise cls.skipException("Cinder volume snapshots are disabled")
+
@test.attr(type=['negative', 'gate'])
def test_create_snapshot_with_nonexistent_volume_id(self):
# Create a snapshot with nonexistent volume id
diff --git a/tempest/api/volume/v2/test_volumes_list.py b/tempest/api/volume/v2/test_volumes_list.py
index 0e91371..41445d7 100644
--- a/tempest/api/volume/v2/test_volumes_list.py
+++ b/tempest/api/volume/v2/test_volumes_list.py
@@ -56,6 +56,7 @@
[str_vol(v) for v in fetched_list]))
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(VolumesV2ListTestJSON, cls).setUpClass()
cls.client = cls.volumes_client
@@ -65,23 +66,10 @@
cls.volume_id_list = []
cls.metadata = {'Type': 'work'}
for i in range(3):
- try:
- volume = cls.create_volume(metadata=cls.metadata)
- resp, volume = cls.client.get_volume(volume['id'])
- cls.volume_list.append(volume)
- cls.volume_id_list.append(volume['id'])
- except Exception:
- LOG.exception('Failed to create volume. %d volumes were '
- 'created' % len(cls.volume_id_list))
- if cls.volume_list:
- # We could not create all the volumes, though we were able
- # to create *some* of the volumes. This is typically
- # because the backing file size of the volume group is
- # too small.
- for volid in cls.volume_id_list:
- cls.client.delete_volume(volid)
- cls.client.wait_for_resource_deletion(volid)
- raise
+ volume = cls.create_volume(metadata=cls.metadata)
+ resp, volume = cls.client.get_volume(volume['id'])
+ cls.volume_list.append(volume)
+ cls.volume_id_list.append(volume['id'])
@classmethod
def tearDownClass(cls):
@@ -116,8 +104,8 @@
('details' if with_detail else '', key)
if key == 'metadata':
self.assertThat(volume[key].items(),
- matchers.ContainsAll(params[key]
- .items()), msg)
+ matchers.ContainsAll(
+ params[key].items()), msg)
else:
self.assertEqual(params[key], volume[key], msg)
@@ -187,7 +175,7 @@
self._list_by_param_value_and_assert(params)
@test.attr(type='gate')
- def test_volume_list_with_detail_param_metadata(self):
+ def test_volume_list_with_details_param_metadata(self):
# Test to list volumes details when metadata param is given
params = {'metadata': self.metadata}
self._list_by_param_value_and_assert(params, with_detail=True)
@@ -201,7 +189,7 @@
self._list_by_param_value_and_assert(params, expected_list=[volume])
@test.attr(type='gate')
- def test_volume_list_with_detail_param_display_name_and_status(self):
+ def test_volume_list_with_details_param_display_name_and_status(self):
# Test to list volume when name and status param is given
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
params = {'name': volume['name'],
@@ -209,6 +197,37 @@
self._list_by_param_value_and_assert(params, expected_list=[volume],
with_detail=True)
+ @test.attr(type='gate')
+ def test_volume_list_details_with_multiple_params(self):
+ # List volumes detail using combined condition
+ def _list_details_with_multiple_params(limit=2,
+ status='available',
+ sort_dir='asc',
+ sort_key='created_at'):
+ params = {'limit': limit,
+ 'status': status,
+ 'sort_dir': sort_dir,
+ 'sort_key': sort_key
+ }
+ resp, fetched_volume = self.client.list_volumes_with_detail(params)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(limit, len(fetched_volume),
+ "The count of volumes is %s, expected:%s " %
+ (len(fetched_volume), limit))
+ self.assertEqual(status, fetched_volume[0]['status'])
+ self.assertEqual(status, fetched_volume[1]['status'])
+ val0 = fetched_volume[0][sort_key]
+ val1 = fetched_volume[1][sort_key]
+ if sort_dir == 'asc':
+ self.assertTrue(val0 < val1,
+ "%s < %s" % (val0, val1))
+ elif sort_dir == 'desc':
+ self.assertTrue(val0 > val1,
+ "%s > %s" % (val0, val1))
+
+ _list_details_with_multiple_params()
+ _list_details_with_multiple_params(sort_dir='desc')
+
class VolumesV2ListTestXML(VolumesV2ListTestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/api_schema/__init__.py b/tempest/api_schema/__init__.py
similarity index 100%
rename from tempest/api/compute/api_schema/__init__.py
rename to tempest/api_schema/__init__.py
diff --git a/tempest/api/compute/api_schema/__init__.py b/tempest/api_schema/compute/__init__.py
similarity index 100%
copy from tempest/api/compute/api_schema/__init__.py
copy to tempest/api_schema/compute/__init__.py
diff --git a/tempest/api_schema/compute/agents.py b/tempest/api_schema/compute/agents.py
new file mode 100644
index 0000000..b04cf64
--- /dev/null
+++ b/tempest/api_schema/compute/agents.py
@@ -0,0 +1,40 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+list_agents = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'agents': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'agent_id': {'type': ['integer', 'string']},
+ 'hypervisor': {'type': 'string'},
+ 'os': {'type': 'string'},
+ 'architecture': {'type': 'string'},
+ 'version': {'type': 'string'},
+ 'url': {'type': 'string', 'format': 'uri'},
+ 'md5hash': {'type': 'string'}
+ },
+ 'required': ['agent_id', 'hypervisor', 'os',
+ 'architecture', 'version', 'url', 'md5hash']
+ }
+ }
+ },
+ 'required': ['agents']
+ }
+}
diff --git a/tempest/api_schema/compute/aggregates.py b/tempest/api_schema/compute/aggregates.py
new file mode 100644
index 0000000..9393a16
--- /dev/null
+++ b/tempest/api_schema/compute/aggregates.py
@@ -0,0 +1,86 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+aggregate = {
+ 'type': 'object',
+ 'properties': {
+ 'availability_zone': {'type': ['string', 'null']},
+ 'created_at': {'type': 'string'},
+ 'deleted': {'type': 'boolean'},
+ 'deleted_at': {'type': ['string', 'null']},
+ 'hosts': {'type': 'array'},
+ 'id': {'type': 'integer'},
+ 'metadata': {'type': 'object'},
+ 'name': {'type': 'string'},
+ 'updated_at': {'type': ['string', 'null']}
+ },
+ 'required': ['availability_zone', 'created_at', 'deleted',
+ 'deleted_at', 'hosts', 'id', 'metadata',
+ 'name', 'updated_at']
+}
+
+list_aggregates = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'aggregates': {
+ 'type': 'array',
+ 'items': aggregate
+ }
+ },
+ 'required': ['aggregates']
+ }
+}
+
+get_aggregate = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'aggregate': aggregate
+ },
+ 'required': ['aggregate']
+ }
+}
+
+aggregate_set_metadata = get_aggregate
+# The 'updated_at' attribute of 'update_aggregate' can't be null.
+update_aggregate = copy.deepcopy(get_aggregate)
+update_aggregate['response_body']['properties']['aggregate']['properties'][
+ 'updated_at'] = {
+ 'type': 'string'
+ }
+
+common_create_aggregate = {
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'aggregate': aggregate
+ },
+ 'required': ['aggregate']
+ }
+}
+# create-aggregate api doesn't have 'hosts' and 'metadata' attributes.
+del common_create_aggregate['response_body']['properties']['aggregate'][
+ 'properties']['hosts']
+del common_create_aggregate['response_body']['properties']['aggregate'][
+ 'properties']['metadata']
+common_create_aggregate['response_body']['properties']['aggregate'][
+ 'required'] = ['availability_zone', 'created_at', 'deleted', 'deleted_at',
+ 'id', 'name', 'updated_at']
+
+aggregate_add_remove_host = get_aggregate
diff --git a/tempest/api_schema/compute/availability_zone.py b/tempest/api_schema/compute/availability_zone.py
new file mode 100644
index 0000000..c1abc64
--- /dev/null
+++ b/tempest/api_schema/compute/availability_zone.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOTE: This is the detail information for "get az detail" API.
+# The information is the same between v2 and v3 APIs.
+detail = {
+ 'type': 'object',
+ 'patternProperties': {
+ # NOTE: Here is for a hostname
+ '^[a-zA-Z0-9-_.]+$': {
+ 'type': 'object',
+ 'patternProperties': {
+ # NOTE: Here is for a service name
+ '^.*$': {
+ 'type': 'object',
+ 'properties': {
+ 'available': {'type': 'boolean'},
+ 'active': {'type': 'boolean'},
+ 'updated_at': {'type': 'string'}
+ },
+ 'required': ['available', 'active', 'updated_at']
+ }
+ }
+ }
+ }
+}
diff --git a/tempest/api_schema/compute/certificates.py b/tempest/api_schema/compute/certificates.py
new file mode 100644
index 0000000..caac2ab
--- /dev/null
+++ b/tempest/api_schema/compute/certificates.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+_common_schema = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'certificate': {
+ 'type': 'object',
+ 'properties': {
+ 'data': {'type': 'string'},
+ 'private_key': {'type': 'string'},
+ },
+ 'required': ['data', 'private_key'],
+ }
+ },
+ 'required': ['certificate'],
+ }
+}
+
+get_certificate = copy.deepcopy(_common_schema)
+get_certificate['response_body']['properties']['certificate'][
+ 'properties']['private_key'].update({'type': 'null'})
diff --git a/tempest/api_schema/compute/flavors.py b/tempest/api_schema/compute/flavors.py
new file mode 100644
index 0000000..aa019e4
--- /dev/null
+++ b/tempest/api_schema/compute/flavors.py
@@ -0,0 +1,77 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api_schema.compute import parameter_types
+
+list_flavors = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'flavors': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'name': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'id': {'type': 'string'}
+ },
+ 'required': ['name', 'links', 'id']
+ }
+ }
+ },
+ 'required': ['flavors']
+ }
+}
+
+common_flavor_info = {
+ 'type': 'object',
+ 'properties': {
+ 'name': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'ram': {'type': 'integer'},
+ 'vcpus': {'type': 'integer'},
+ 'swap': {'type': 'integer'},
+ 'disk': {'type': 'integer'},
+ 'id': {'type': 'string'}
+ },
+ 'required': ['name', 'links', 'ram', 'vcpus',
+ 'swap', 'disk', 'id']
+}
+
+common_flavor_list_details = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'flavors': {
+ 'type': 'array',
+ 'items': common_flavor_info
+ }
+ },
+ 'required': ['flavors']
+ }
+}
+
+common_flavor_details = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'flavor': common_flavor_info
+ },
+ 'required': ['flavor']
+ }
+}
diff --git a/tempest/api_schema/compute/flavors_access.py b/tempest/api_schema/compute/flavors_access.py
new file mode 100644
index 0000000..cd31b0a
--- /dev/null
+++ b/tempest/api_schema/compute/flavors_access.py
@@ -0,0 +1,34 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+add_remove_list_flavor_access = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'flavor_access': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'flavor_id': {'type': 'string'},
+ 'tenant_id': {'type': 'string'},
+ },
+ 'required': ['flavor_id', 'tenant_id'],
+ }
+ }
+ },
+ 'required': ['flavor_access']
+ }
+}
diff --git a/tempest/api_schema/compute/flavors_extra_specs.py b/tempest/api_schema/compute/flavors_extra_specs.py
new file mode 100644
index 0000000..4003d36
--- /dev/null
+++ b/tempest/api_schema/compute/flavors_extra_specs.py
@@ -0,0 +1,39 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+flavor_extra_specs = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'extra_specs': {
+ 'type': 'object',
+ 'patternProperties': {
+ '^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
+ }
+ }
+ },
+ 'required': ['extra_specs']
+ }
+}
+
+flavor_extra_specs_key = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'patternProperties': {
+ '^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
+ }
+ }
+}
diff --git a/tempest/api_schema/compute/hosts.py b/tempest/api_schema/compute/hosts.py
new file mode 100644
index 0000000..2596c27
--- /dev/null
+++ b/tempest/api_schema/compute/hosts.py
@@ -0,0 +1,85 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+common_start_up_body = {
+ 'type': 'object',
+ 'properties': {
+ 'host': {'type': 'string'},
+ 'power_action': {'enum': ['startup']}
+ },
+ 'required': ['host', 'power_action']
+}
+
+list_hosts = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'hosts': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'host_name': {'type': 'string'},
+ 'service': {'type': 'string'},
+ 'zone': {'type': 'string'}
+ },
+ 'required': ['host_name', 'service', 'zone']
+ }
+ }
+ },
+ 'required': ['hosts']
+ }
+}
+
+show_host_detail = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'host': {
+ 'type': 'array',
+ 'item': {
+ 'type': 'object',
+ 'properties': {
+ 'resource': {
+ 'type': 'object',
+ 'properties': {
+ 'cpu': {'type': 'integer'},
+ 'disk_gb': {'type': 'integer'},
+ 'host': {'type': 'string'},
+ 'memory_mb': {'type': 'integer'},
+ 'project': {'type': 'string'}
+ },
+ 'required': ['cpu', 'disk_gb', 'host',
+ 'memory_mb', 'project']
+ }
+ },
+ 'required': ['resource']
+ }
+ }
+ },
+ 'required': ['host']
+ }
+}
+
+update_host_common = {
+ 'type': 'object',
+ 'properties': {
+ 'host': {'type': 'string'},
+ 'maintenance_mode': {'enum': ['on_maintenance', 'off_maintenance']},
+ 'status': {'enum': ['enabled', 'disabled']}
+ },
+ 'required': ['host', 'maintenance_mode', 'status']
+}
diff --git a/tempest/api_schema/compute/hypervisors.py b/tempest/api_schema/compute/hypervisors.py
new file mode 100644
index 0000000..630901e
--- /dev/null
+++ b/tempest/api_schema/compute/hypervisors.py
@@ -0,0 +1,197 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+hypervisor_statistics = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'hypervisor_statistics': {
+ 'type': 'object',
+ 'properties': {
+ 'count': {'type': 'integer'},
+ 'current_workload': {'type': 'integer'},
+ 'disk_available_least': {'type': 'integer'},
+ 'free_disk_gb': {'type': 'integer'},
+ 'free_ram_mb': {'type': 'integer'},
+ 'local_gb': {'type': 'integer'},
+ 'local_gb_used': {'type': 'integer'},
+ 'memory_mb': {'type': 'integer'},
+ 'memory_mb_used': {'type': 'integer'},
+ 'running_vms': {'type': 'integer'},
+ 'vcpus': {'type': 'integer'},
+ 'vcpus_used': {'type': 'integer'}
+ },
+ 'required': ['count', 'current_workload',
+ 'disk_available_least', 'free_disk_gb',
+ 'free_ram_mb', 'local_gb', 'local_gb_used',
+ 'memory_mb', 'memory_mb_used', 'running_vms',
+ 'vcpus', 'vcpus_used']
+ }
+ },
+ 'required': ['hypervisor_statistics']
+ }
+}
+
+common_list_hypervisors_detail = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'hypervisors': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'cpu_info': {'type': 'string'},
+ 'current_workload': {'type': 'integer'},
+ 'disk_available_least': {'type': ['integer', 'null']},
+ 'host_ip': {
+ 'type': 'string',
+ 'format': 'ip-address'
+ },
+ 'free_disk_gb': {'type': 'integer'},
+ 'free_ram_mb': {'type': 'integer'},
+ 'hypervisor_hostname': {'type': 'string'},
+ 'hypervisor_type': {'type': 'string'},
+ 'hypervisor_version': {'type': 'integer'},
+ 'id': {'type': ['integer', 'string']},
+ 'local_gb': {'type': 'integer'},
+ 'local_gb_used': {'type': 'integer'},
+ 'memory_mb': {'type': 'integer'},
+ 'memory_mb_used': {'type': 'integer'},
+ 'running_vms': {'type': 'integer'},
+ 'service': {
+ 'type': 'object',
+ 'properties': {
+ 'host': {'type': 'string'},
+ 'id': {'type': ['integer', 'string']}
+ },
+ 'required': ['host', 'id']
+ },
+ 'vcpus': {'type': 'integer'},
+ 'vcpus_used': {'type': 'integer'}
+ },
+ 'required': ['cpu_info', 'current_workload',
+ 'disk_available_least', 'host_ip',
+ 'free_disk_gb', 'free_ram_mb',
+ 'hypervisor_hostname', 'hypervisor_type',
+ 'hypervisor_version', 'id', 'local_gb',
+ 'local_gb_used', 'memory_mb',
+ 'memory_mb_used', 'running_vms', 'service',
+ 'vcpus', 'vcpus_used']
+ }
+ }
+ },
+ 'required': ['hypervisors']
+ }
+}
+
+common_show_hypervisor = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'hypervisor': {
+ 'type': 'object',
+ 'properties': {
+ 'cpu_info': {'type': 'string'},
+ 'current_workload': {'type': 'integer'},
+ 'disk_available_least': {'type': 'integer'},
+ 'host_ip': {
+ 'type': 'string',
+ 'format': 'ip-address'
+ },
+ 'free_disk_gb': {'type': 'integer'},
+ 'free_ram_mb': {'type': 'integer'},
+ 'hypervisor_hostname': {'type': 'string'},
+ 'hypervisor_type': {'type': 'string'},
+ 'hypervisor_version': {'type': 'integer'},
+ 'id': {'type': ['integer', 'string']},
+ 'local_gb': {'type': 'integer'},
+ 'local_gb_used': {'type': 'integer'},
+ 'memory_mb': {'type': 'integer'},
+ 'memory_mb_used': {'type': 'integer'},
+ 'running_vms': {'type': 'integer'},
+ 'service': {
+ 'type': 'object',
+ 'properties': {
+ 'host': {'type': 'string'},
+ 'id': {'type': ['integer', 'string']}
+ },
+ 'required': ['host', 'id']
+ },
+ 'vcpus': {'type': 'integer'},
+ 'vcpus_used': {'type': 'integer'}
+ },
+ 'required': ['cpu_info', 'current_workload',
+ 'disk_available_least', 'host_ip',
+ 'free_disk_gb', 'free_ram_mb',
+ 'hypervisor_hostname', 'hypervisor_type',
+ 'hypervisor_version', 'id', 'local_gb',
+ 'local_gb_used', 'memory_mb', 'memory_mb_used',
+ 'running_vms', 'service', 'vcpus', 'vcpus_used']
+ }
+ },
+ 'required': ['hypervisor']
+ }
+}
+
+common_hypervisors_detail = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'hypervisors': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': ['integer', 'string']},
+ 'hypervisor_hostname': {'type': 'string'}
+ },
+ 'required': ['id', 'hypervisor_hostname']
+ }
+ }
+ },
+ 'required': ['hypervisors']
+ }
+}
+
+common_hypervisors_info = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'hypervisor': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': ['integer', 'string']},
+ 'hypervisor_hostname': {'type': 'string'},
+ },
+ 'required': ['id', 'hypervisor_hostname']
+ }
+ },
+ 'required': ['hypervisor']
+ }
+}
+
+
+hypervisor_uptime = copy.deepcopy(common_hypervisors_info)
+hypervisor_uptime['response_body']['properties']['hypervisor'][
+ 'properties']['uptime'] = {'type': 'string'}
+hypervisor_uptime['response_body']['properties']['hypervisor'][
+ 'required'] = ['id', 'hypervisor_hostname', 'uptime']
diff --git a/tempest/api_schema/compute/interfaces.py b/tempest/api_schema/compute/interfaces.py
new file mode 100644
index 0000000..79a8f42
--- /dev/null
+++ b/tempest/api_schema/compute/interfaces.py
@@ -0,0 +1,47 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api_schema.compute import parameter_types
+
+delete_interface = {
+ 'status_code': [202]
+}
+
+interface_common_info = {
+ 'type': 'object',
+ 'properties': {
+ 'port_state': {'type': 'string'},
+ 'fixed_ips': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'subnet_id': {
+ 'type': 'string',
+ 'format': 'uuid'
+ },
+ 'ip_address': {
+ 'type': 'string',
+ 'format': 'ipv4'
+ }
+ },
+ 'required': ['subnet_id', 'ip_address']
+ }
+ },
+ 'port_id': {'type': 'string', 'format': 'uuid'},
+ 'net_id': {'type': 'string', 'format': 'uuid'},
+ 'mac_addr': parameter_types.mac_address
+ },
+ 'required': ['port_state', 'fixed_ips', 'port_id', 'net_id', 'mac_addr']
+}
diff --git a/tempest/api_schema/compute/keypairs.py b/tempest/api_schema/compute/keypairs.py
new file mode 100644
index 0000000..b8f905f
--- /dev/null
+++ b/tempest/api_schema/compute/keypairs.py
@@ -0,0 +1,65 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+list_keypairs = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'keypairs': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'keypair': {
+ 'type': 'object',
+ 'properties': {
+ 'public_key': {'type': 'string'},
+ 'name': {'type': 'string'},
+ 'fingerprint': {'type': 'string'}
+ },
+ 'required': ['public_key', 'name', 'fingerprint']
+ }
+ },
+ 'required': ['keypair']
+ }
+ }
+ },
+ 'required': ['keypairs']
+ }
+}
+
+create_keypair = {
+ 'type': 'object',
+ 'properties': {
+ 'keypair': {
+ 'type': 'object',
+ 'properties': {
+ 'fingerprint': {'type': 'string'},
+ 'name': {'type': 'string'},
+ 'public_key': {'type': 'string'},
+ # NOTE: Now the type of 'user_id' is integer, but here
+ # allows 'string' also because we will be able to change
+ # it to 'uuid' in the future.
+ 'user_id': {'type': ['integer', 'string']},
+ 'private_key': {'type': 'string'}
+ },
+ # When create keypair API is being called with 'Public key'
+ # (Importing keypair) then, response body does not contain
+ # 'private_key' So it is not defined as 'required'
+ 'required': ['fingerprint', 'name', 'public_key', 'user_id']
+ }
+ },
+ 'required': ['keypair']
+}
diff --git a/tempest/api_schema/compute/migrations.py b/tempest/api_schema/compute/migrations.py
new file mode 100644
index 0000000..6723869
--- /dev/null
+++ b/tempest/api_schema/compute/migrations.py
@@ -0,0 +1,56 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+list_migrations = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'migrations': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ # NOTE: Now the type of 'id' is integer, but here
+ # allows 'string' also because we will be able to
+ # change it to 'uuid' in the future.
+ 'id': {'type': ['integer', 'string']},
+ 'status': {'type': 'string'},
+ 'instance_uuid': {'type': 'string'},
+ 'source_node': {'type': 'string'},
+ 'source_compute': {'type': 'string'},
+ 'dest_node': {'type': 'string'},
+ 'dest_compute': {'type': 'string'},
+ 'dest_host': {'type': 'string'},
+ 'old_instance_type_id': {
+ 'type': ['integer', 'string']
+ },
+ 'new_instance_type_id': {
+ 'type': ['integer', 'string']
+ },
+ 'created_at': {'type': 'string'},
+ 'updated_at': {'type': ['string', 'null']}
+ },
+ 'required': [
+ 'id', 'status', 'instance_uuid', 'source_node',
+ 'source_compute', 'dest_node', 'dest_compute',
+ 'dest_host', 'old_instance_type_id',
+ 'new_instance_type_id', 'created_at', 'updated_at'
+ ]
+ }
+ }
+ },
+ 'required': ['migrations']
+ }
+}
diff --git a/tempest/api_schema/compute/parameter_types.py b/tempest/api_schema/compute/parameter_types.py
new file mode 100644
index 0000000..4a1dfdd
--- /dev/null
+++ b/tempest/api_schema/compute/parameter_types.py
@@ -0,0 +1,67 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+links = {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'href': {
+ 'type': 'string',
+ 'format': 'uri'
+ },
+ 'rel': {'type': 'string'}
+ },
+ 'required': ['href', 'rel']
+ }
+}
+
+mac_address = {
+ 'type': 'string',
+ 'pattern': '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+}
+
+access_ip_v4 = {
+ 'type': 'string',
+ 'anyOf': [{'format': 'ipv4'}, {'enum': ['']}]
+}
+
+access_ip_v6 = {
+ 'type': 'string',
+ 'anyOf': [{'format': 'ipv6'}, {'enum': ['']}]
+}
+
+addresses = {
+ 'type': 'object',
+ 'patternProperties': {
+ # NOTE: Here is for 'private' or something.
+ '^[a-zA-Z0-9-_.]+$': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'version': {'type': 'integer'},
+ 'addr': {
+ 'type': 'string',
+ 'anyOf': [
+ {'format': 'ipv4'},
+ {'format': 'ipv6'}
+ ]
+ }
+ },
+ 'required': ['version', 'addr']
+ }
+ }
+ }
+}
diff --git a/tempest/api_schema/compute/quotas.py b/tempest/api_schema/compute/quotas.py
new file mode 100644
index 0000000..f49771e
--- /dev/null
+++ b/tempest/api_schema/compute/quotas.py
@@ -0,0 +1,41 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+common_quota_set = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'quota_set': {
+ 'type': 'object',
+ 'properties': {
+ 'instances': {'type': 'integer'},
+ 'cores': {'type': 'integer'},
+ 'ram': {'type': 'integer'},
+ 'floating_ips': {'type': 'integer'},
+ 'fixed_ips': {'type': 'integer'},
+ 'metadata_items': {'type': 'integer'},
+ 'key_pairs': {'type': 'integer'},
+ 'security_groups': {'type': 'integer'},
+ 'security_group_rules': {'type': 'integer'}
+ },
+ 'required': ['instances', 'cores', 'ram',
+ 'floating_ips', 'fixed_ips',
+ 'metadata_items', 'key_pairs',
+ 'security_groups', 'security_group_rules']
+ }
+ },
+ 'required': ['quota_set']
+ }
+}
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
new file mode 100644
index 0000000..e11f047
--- /dev/null
+++ b/tempest/api_schema/compute/servers.py
@@ -0,0 +1,141 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import parameter_types
+
+get_password = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'password': {'type': 'string'}
+ },
+ 'required': ['password']
+ }
+}
+
+get_vnc_console = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'console': {
+ 'type': 'object',
+ 'properties': {
+ 'type': {'type': 'string'},
+ 'url': {
+ 'type': 'string',
+ 'format': 'uri'
+ }
+ },
+ 'required': ['type', 'url']
+ }
+ },
+ 'required': ['console']
+ }
+}
+
+base_update_server = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'server': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': ['integer', 'string']},
+ 'name': {'type': 'string'},
+ 'status': {'type': 'string'},
+ 'image': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': ['integer', 'string']},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']
+ },
+ 'flavor': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': ['integer', 'string']},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']
+ },
+ 'user_id': {'type': 'string'},
+ 'tenant_id': {'type': 'string'},
+ 'created': {'type': 'string'},
+ 'updated': {'type': 'string'},
+ 'progress': {'type': 'integer'},
+ 'metadata': {'type': 'object'},
+ 'links': parameter_types.links,
+ 'addresses': parameter_types.addresses,
+ },
+ 'required': ['id', 'name', 'status', 'image', 'flavor',
+ 'user_id', 'tenant_id', 'created', 'updated',
+ 'progress', 'metadata', 'links', 'addresses']
+ }
+ }
+ }
+}
+
+delete_server = {
+ 'status_code': [204],
+}
+
+set_server_metadata = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'metadata': {
+ 'type': 'object',
+ 'patternProperties': {
+ '^.+$': {'type': 'string'}
+ }
+ }
+ },
+ 'required': ['metadata']
+ }
+}
+
+list_server_metadata = copy.deepcopy(set_server_metadata)
+
+delete_server_metadata_item = {
+ 'status_code': [204]
+}
+
+list_servers = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'servers': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'name': {'type': 'string'}
+ },
+ 'required': ['id', 'links', 'name']
+ }
+ }
+ },
+ 'required': ['servers']
+ }
+}
diff --git a/tempest/api_schema/compute/services.py b/tempest/api_schema/compute/services.py
new file mode 100644
index 0000000..4c58013
--- /dev/null
+++ b/tempest/api_schema/compute/services.py
@@ -0,0 +1,63 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+list_services = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'services': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ # NOTE: Now the type of 'id' is integer, but here
+ # allows 'string' also because we will be able to
+ # change it to 'uuid' in the future.
+ 'id': {'type': ['integer', 'string']},
+ 'zone': {'type': 'string'},
+ 'host': {'type': 'string'},
+ 'state': {'type': 'string'},
+ 'binary': {'type': 'string'},
+ 'status': {'type': 'string'},
+ 'updated_at': {'type': 'string'},
+ 'disabled_reason': {'type': ['string', 'null']}
+ },
+ 'required': ['id', 'zone', 'host', 'state', 'binary',
+ 'status', 'updated_at', 'disabled_reason']
+ }
+ }
+ },
+ 'required': ['services']
+ }
+}
+
+enable_service = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'service': {
+ 'type': 'object',
+ 'properties': {
+ 'status': {'type': 'string'},
+ 'binary': {'type': 'string'},
+ 'host': {'type': 'string'}
+ },
+ 'required': ['status', 'binary', 'host']
+ }
+ },
+ 'required': ['service']
+ }
+}
diff --git a/tempest/api/compute/api_schema/__init__.py b/tempest/api_schema/compute/v2/__init__.py
similarity index 100%
copy from tempest/api/compute/api_schema/__init__.py
copy to tempest/api_schema/compute/v2/__init__.py
diff --git a/tempest/api_schema/compute/v2/agents.py b/tempest/api_schema/compute/v2/agents.py
new file mode 100644
index 0000000..837731f
--- /dev/null
+++ b/tempest/api_schema/compute/v2/agents.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+delete_agent = {
+ 'status_code': [200]
+}
diff --git a/tempest/api_schema/compute/v2/aggregates.py b/tempest/api_schema/compute/v2/aggregates.py
new file mode 100644
index 0000000..bc36044
--- /dev/null
+++ b/tempest/api_schema/compute/v2/aggregates.py
@@ -0,0 +1,25 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import aggregates
+
+delete_aggregate = {
+ 'status_code': [200]
+}
+
+create_aggregate = copy.deepcopy(aggregates.common_create_aggregate)
+# V2 API's response status_code is 200
+create_aggregate['status_code'] = [200]
diff --git a/tempest/api_schema/compute/v2/availability_zone.py b/tempest/api_schema/compute/v2/availability_zone.py
new file mode 100644
index 0000000..d3d2787
--- /dev/null
+++ b/tempest/api_schema/compute/v2/availability_zone.py
@@ -0,0 +1,54 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import availability_zone as common
+
+
+base = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'availabilityZoneInfo': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'zoneName': {'type': 'string'},
+ 'zoneState': {
+ 'type': 'object',
+ 'properties': {
+ 'available': {'type': 'boolean'}
+ },
+ 'required': ['available']
+ },
+ # NOTE: Here is the difference between detail and
+ # non-detail.
+ 'hosts': {'type': 'null'}
+ },
+ 'required': ['zoneName', 'zoneState', 'hosts']
+ }
+ }
+ },
+ 'required': ['availabilityZoneInfo']
+ }
+}
+
+get_availability_zone_list = copy.deepcopy(base)
+
+get_availability_zone_list_detail = copy.deepcopy(base)
+get_availability_zone_list_detail['response_body']['properties'][
+ 'availabilityZoneInfo']['items']['properties']['hosts'] = common.detail
diff --git a/tempest/api_schema/compute/v2/certificates.py b/tempest/api_schema/compute/v2/certificates.py
new file mode 100644
index 0000000..1eb38ce
--- /dev/null
+++ b/tempest/api_schema/compute/v2/certificates.py
@@ -0,0 +1,19 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import certificates
+
+create_certificate = copy.deepcopy(certificates._common_schema)
diff --git a/tempest/api_schema/compute/v2/extensions.py b/tempest/api_schema/compute/v2/extensions.py
new file mode 100644
index 0000000..570cd03
--- /dev/null
+++ b/tempest/api_schema/compute/v2/extensions.py
@@ -0,0 +1,45 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+list_extensions = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'extensions': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'updated': {
+ 'type': 'string',
+ 'format': 'data-time'
+ },
+ 'name': {'type': 'string'},
+ 'links': {'type': 'array'},
+ 'namespace': {
+ 'type': 'string',
+ 'format': 'uri'
+ },
+ 'alias': {'type': 'string'},
+ 'description': {'type': 'string'}
+ },
+ 'required': ['updated', 'name', 'links', 'namespace',
+ 'alias', 'description']
+ }
+ }
+ },
+ 'required': ['extensions']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/fixed_ips.py b/tempest/api_schema/compute/v2/fixed_ips.py
new file mode 100644
index 0000000..446633f
--- /dev/null
+++ b/tempest/api_schema/compute/v2/fixed_ips.py
@@ -0,0 +1,41 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+fixed_ips = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'fixed_ip': {
+ 'type': 'object',
+ 'properties': {
+ 'address': {
+ 'type': 'string',
+ 'format': 'ip-address'
+ },
+ 'cidr': {'type': 'string'},
+ 'host': {'type': 'string'},
+ 'hostname': {'type': 'string'}
+ },
+ 'required': ['address', 'cidr', 'host', 'hostname']
+ }
+ },
+ 'required': ['fixed_ip']
+ }
+}
+
+fixed_ip_action = {
+ 'status_code': [202],
+ 'response_body': {'type': 'string'}
+}
diff --git a/tempest/api_schema/compute/v2/flavors.py b/tempest/api_schema/compute/v2/flavors.py
new file mode 100644
index 0000000..bee6ecb
--- /dev/null
+++ b/tempest/api_schema/compute/v2/flavors.py
@@ -0,0 +1,57 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import flavors
+
+list_flavors_details = copy.deepcopy(flavors.common_flavor_list_details)
+
+# 'swap' attributes comes as integre value but if it is empty it comes as "".
+# So defining type of as string and integer.
+list_flavors_details['response_body']['properties']['flavors']['items'][
+ 'properties']['swap'] = {'type': ['string', 'integer']}
+
+# Defining extra attributes for V2 flavor schema
+list_flavors_details['response_body']['properties']['flavors']['items'][
+ 'properties'].update({'OS-FLV-DISABLED:disabled': {'type': 'boolean'},
+ 'os-flavor-access:is_public': {'type': 'boolean'},
+ 'rxtx_factor': {'type': 'number'},
+ 'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}})
+# 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and 'OS-FLV-EXT-DATA'
+# are API extensions. So they are not 'required'.
+
+unset_flavor_extra_specs = {
+ 'status_code': [200]
+}
+
+create_get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
+
+# 'swap' attributes comes as integre value but if it is empty it comes as "".
+# So defining type of as string and integer.
+create_get_flavor_details['response_body']['properties']['flavor'][
+ 'properties']['swap'] = {'type': ['string', 'integer']}
+
+# Defining extra attributes for V2 flavor schema
+create_get_flavor_details['response_body']['properties']['flavor'][
+ 'properties'].update({'OS-FLV-DISABLED:disabled': {'type': 'boolean'},
+ 'os-flavor-access:is_public': {'type': 'boolean'},
+ 'rxtx_factor': {'type': 'number'},
+ 'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}})
+# 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and 'OS-FLV-EXT-DATA'
+# are API extensions. So they are not 'required'.
+
+delete_flavor = {
+ 'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/floating_ips.py b/tempest/api_schema/compute/v2/floating_ips.py
new file mode 100644
index 0000000..3ea6320
--- /dev/null
+++ b/tempest/api_schema/compute/v2/floating_ips.py
@@ -0,0 +1,100 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+list_floating_ips = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'floating_ips': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ # NOTE: Now the type of 'id' is integer, but
+ # here allows 'string' also because we will be
+ # able to change it to 'uuid' in the future.
+ 'id': {'type': ['integer', 'string']},
+ 'pool': {'type': ['string', 'null']},
+ 'instance_id': {'type': ['integer', 'string', 'null']},
+ 'ip': {
+ 'type': 'string',
+ 'format': 'ip-address'
+ },
+ 'fixed_ip': {
+ 'type': ['string', 'null'],
+ 'format': 'ip-address'
+ }
+ },
+ 'required': ['id', 'pool', 'instance_id', 'ip', 'fixed_ip']
+ }
+ }
+ },
+ 'required': ['floating_ips']
+ }
+}
+
+floating_ip = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'floating_ip': {
+ 'type': 'object',
+ 'properties': {
+ # NOTE: Now the type of 'id' is integer, but here allows
+ # 'string' also because we will be able to change it to
+ # 'uuid' in the future.
+ 'id': {'type': ['integer', 'string']},
+ 'pool': {'type': ['string', 'null']},
+ 'instance_id': {'type': ['integer', 'string', 'null']},
+ 'ip': {
+ 'type': 'string',
+ 'format': 'ip-address'
+ },
+ 'fixed_ip': {
+ 'type': ['string', 'null'],
+ 'format': 'ip-address'
+ }
+ },
+ 'required': ['id', 'pool', 'instance_id', 'ip', 'fixed_ip']
+ }
+ },
+ 'required': ['floating_ip']
+ }
+}
+
+floating_ip_pools = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'floating_ip_pools': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'name': {'type': 'string'}
+ },
+ 'required': ['name']
+ }
+ }
+ },
+ 'required': ['floating_ip_pools']
+ }
+}
+
+add_remove_floating_ip = {
+ 'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/hosts.py b/tempest/api_schema/compute/v2/hosts.py
new file mode 100644
index 0000000..86efadf
--- /dev/null
+++ b/tempest/api_schema/compute/v2/hosts.py
@@ -0,0 +1,42 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import hosts
+
+
+startup_host = {
+ 'status_code': [200],
+ 'response_body': hosts.common_start_up_body
+}
+
+# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
+shutdown_host = copy.deepcopy(startup_host)
+
+shutdown_host['response_body']['properties']['power_action'] = {
+ 'enum': ['shutdown']
+}
+
+# The 'power_action' attribute of 'reboot_host' API is 'reboot'
+reboot_host = copy.deepcopy(startup_host)
+
+reboot_host['response_body']['properties']['power_action'] = {
+ 'enum': ['reboot']
+}
+
+update_host = {
+ 'status_code': [200],
+ 'response_body': hosts.update_host_common
+}
diff --git a/tempest/api_schema/compute/v2/hypervisors.py b/tempest/api_schema/compute/v2/hypervisors.py
new file mode 100644
index 0000000..6bb43a7
--- /dev/null
+++ b/tempest/api_schema/compute/v2/hypervisors.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+from tempest.api_schema.compute import hypervisors
+
+hypervisors_servers = copy.deepcopy(hypervisors.common_hypervisors_detail)
+
+# Defining extra attributes for V3 show hypervisor schema
+hypervisors_servers['response_body']['properties']['hypervisors']['items'][
+ 'properties']['servers'] = {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ # NOTE: Now the type of 'id' is integer,
+ # but here allows 'string' also because we
+ # will be able to change it to 'uuid' in
+ # the future.
+ 'id': {'type': ['integer', 'string']},
+ 'name': {'type': 'string'}
+ }
+ }
+ }
+# In V2 API, if there is no servers (VM) on the Hypervisor host then 'servers'
+# attribute will not be present in response body So it is not 'required'.
diff --git a/tempest/api_schema/compute/v2/images.py b/tempest/api_schema/compute/v2/images.py
new file mode 100644
index 0000000..d121060
--- /dev/null
+++ b/tempest/api_schema/compute/v2/images.py
@@ -0,0 +1,136 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api_schema.compute import parameter_types
+
+common_image_schema = {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'status': {'type': 'string'},
+ 'updated': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'name': {'type': 'string'},
+ 'created': {'type': 'string'},
+ 'minDisk': {'type': 'integer'},
+ 'minRam': {'type': 'integer'},
+ 'progress': {'type': 'integer'},
+ 'metadata': {'type': 'object'},
+ 'server': {
+ 'type': 'object',
+ 'properties': {
+ # NOTE: Now the type of 'id' is integer, but here
+ # allows 'string' also because we will be able to
+ # change it to 'uuid' in the future.
+ 'id': {'type': ['integer', 'string']},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']
+ },
+ 'OS-EXT-IMG-SIZE:size': {'type': 'integer'}
+ },
+ # 'server' attributes only comes in response body if image is
+ # associated with any server. 'OS-EXT-IMG-SIZE:size' is API
+ # extension, So those are not defined as 'required'.
+ 'required': ['id', 'status', 'updated', 'links', 'name',
+ 'created', 'minDisk', 'minRam', 'progress',
+ 'metadata']
+}
+
+get_image = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'image': common_image_schema
+ },
+ 'required': ['image']
+ }
+}
+
+list_images = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'images': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'name': {'type': 'string'}
+ },
+ 'required': ['id', 'links', 'name']
+ }
+ }
+ },
+ 'required': ['images']
+ }
+}
+
+create_image = {
+ 'status_code': [202],
+ 'response_header': {
+ 'type': 'object',
+ 'properties': {
+ 'location': {
+ 'type': 'string',
+ 'format': 'uri'
+ }
+ },
+ 'required': ['location']
+ }
+}
+
+delete = {
+ 'status_code': [204]
+}
+
+image_metadata = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'metadata': {'type': 'object'}
+ },
+ 'required': ['metadata']
+ }
+}
+
+image_meta_item = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'meta': {'type': 'object'}
+ },
+ 'required': ['meta']
+ }
+}
+
+list_images_details = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'images': {
+ 'type': 'array',
+ 'items': common_image_schema
+ }
+ },
+ 'required': ['images']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/instance_usage_audit_logs.py b/tempest/api_schema/compute/v2/instance_usage_audit_logs.py
new file mode 100644
index 0000000..658f574
--- /dev/null
+++ b/tempest/api_schema/compute/v2/instance_usage_audit_logs.py
@@ -0,0 +1,59 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+common_instance_usage_audit_log = {
+ 'type': 'object',
+ 'properties': {
+ 'hosts_not_run': {
+ 'type': 'array',
+ 'items': {'type': 'string'}
+ },
+ 'log': {'type': 'object'},
+ 'num_hosts': {'type': 'integer'},
+ 'num_hosts_done': {'type': 'integer'},
+ 'num_hosts_not_run': {'type': 'integer'},
+ 'num_hosts_running': {'type': 'integer'},
+ 'overall_status': {'type': 'string'},
+ 'period_beginning': {'type': 'string'},
+ 'period_ending': {'type': 'string'},
+ 'total_errors': {'type': 'integer'},
+ 'total_instances': {'type': 'integer'}
+ },
+ 'required': ['hosts_not_run', 'log', 'num_hosts', 'num_hosts_done',
+ 'num_hosts_not_run', 'num_hosts_running', 'overall_status',
+ 'period_beginning', 'period_ending', 'total_errors',
+ 'total_instances']
+}
+
+get_instance_usage_audit_log = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'instance_usage_audit_log': common_instance_usage_audit_log
+ },
+ 'required': ['instance_usage_audit_log']
+ }
+}
+
+list_instance_usage_audit_log = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'instance_usage_audit_logs': common_instance_usage_audit_log
+ },
+ 'required': ['instance_usage_audit_logs']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/interfaces.py b/tempest/api_schema/compute/v2/interfaces.py
new file mode 100644
index 0000000..7fca791
--- /dev/null
+++ b/tempest/api_schema/compute/v2/interfaces.py
@@ -0,0 +1,29 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api_schema.compute import interfaces as common_schema
+
+list_interfaces = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'interfaceAttachments': {
+ 'type': 'array',
+ 'items': common_schema.interface_common_info
+ }
+ },
+ 'required': ['interfaceAttachments']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/keypairs.py b/tempest/api_schema/compute/v2/keypairs.py
new file mode 100644
index 0000000..9a025c3
--- /dev/null
+++ b/tempest/api_schema/compute/v2/keypairs.py
@@ -0,0 +1,58 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api_schema.compute import keypairs
+
+get_keypair = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'keypair': {
+ 'type': 'object',
+ 'properties': {
+ 'public_key': {'type': 'string'},
+ 'name': {'type': 'string'},
+ 'fingerprint': {'type': 'string'},
+ # NOTE: Now the type of 'user_id' is integer, but here
+ # allows 'string' also because we will be able to change
+ # it to 'uuid' in the future.
+ 'user_id': {'type': ['integer', 'string']},
+ 'deleted': {'type': 'boolean'},
+ 'created_at': {'type': 'string'},
+ 'updated_at': {'type': ['string', 'null']},
+ 'deleted_at': {'type': ['string', 'null']},
+ 'id': {'type': 'integer'}
+
+ },
+ # When we run the get keypair API, response body includes
+ # all the above mentioned attributes.
+ # But in Nova API sample file, response body includes only
+ # 'public_key', 'name' & 'fingerprint'. So only 'public_key',
+ # 'name' & 'fingerprint' are defined as 'required'.
+ 'required': ['public_key', 'name', 'fingerprint']
+ }
+ },
+ 'required': ['keypair']
+ }
+}
+
+create_keypair = {
+ 'status_code': [200],
+ 'response_body': keypairs.create_keypair
+}
+
+delete_keypair = {
+ 'status_code': [202],
+}
diff --git a/tempest/api_schema/compute/v2/limits.py b/tempest/api_schema/compute/v2/limits.py
new file mode 100644
index 0000000..b9857f1
--- /dev/null
+++ b/tempest/api_schema/compute/v2/limits.py
@@ -0,0 +1,94 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+get_limit = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'limits': {
+ 'type': 'object',
+ 'properties': {
+ 'absolute': {
+ 'type': 'object',
+ 'properties': {
+ 'maxTotalRAMSize': {'type': 'integer'},
+ 'totalCoresUsed': {'type': 'integer'},
+ 'maxTotalInstances': {'type': 'integer'},
+ 'maxTotalFloatingIps': {'type': 'integer'},
+ 'totalSecurityGroupsUsed': {'type': 'integer'},
+ 'maxTotalCores': {'type': 'integer'},
+ 'totalFloatingIpsUsed': {'type': 'integer'},
+ 'maxSecurityGroups': {'type': 'integer'},
+ 'maxServerMeta': {'type': 'integer'},
+ 'maxPersonality': {'type': 'integer'},
+ 'maxImageMeta': {'type': 'integer'},
+ 'maxPersonalitySize': {'type': 'integer'},
+ 'maxSecurityGroupRules': {'type': 'integer'},
+ 'maxTotalKeypairs': {'type': 'integer'},
+ 'totalRAMUsed': {'type': 'integer'},
+ 'totalInstancesUsed': {'type': 'integer'}
+ },
+ 'required': ['maxImageMeta',
+ 'maxPersonality',
+ 'maxPersonalitySize',
+ 'maxSecurityGroupRules',
+ 'maxSecurityGroups',
+ 'maxServerMeta',
+ 'maxTotalCores',
+ 'maxTotalFloatingIps',
+ 'maxTotalInstances',
+ 'maxTotalKeypairs',
+ 'maxTotalRAMSize',
+ 'totalCoresUsed',
+ 'totalFloatingIpsUsed',
+ 'totalInstancesUsed',
+ 'totalRAMUsed',
+ 'totalSecurityGroupsUsed']
+ },
+ 'rate': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'limit': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'next-available':
+ {'type': 'string'},
+ 'remaining':
+ {'type': 'integer'},
+ 'unit':
+ {'type': 'string'},
+ 'value':
+ {'type': 'integer'},
+ 'verb':
+ {'type': 'string'}
+ }
+ }
+ },
+ 'regex': {'type': 'string'},
+ 'uri': {'type': 'string'}
+ }
+ }
+ }
+ },
+ 'required': ['absolute', 'rate']
+ }
+ },
+ 'required': ['limits']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/quotas.py b/tempest/api_schema/compute/v2/quotas.py
new file mode 100644
index 0000000..31c0458
--- /dev/null
+++ b/tempest/api_schema/compute/v2/quotas.py
@@ -0,0 +1,48 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import quotas
+
+quota_set = copy.deepcopy(quotas.common_quota_set)
+quota_set['response_body']['properties']['quota_set']['properties'][
+ 'id'] = {'type': 'string'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+ 'injected_files'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+ 'injected_file_content_bytes'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+ 'injected_file_path_bytes'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['required'].extend([
+ 'id',
+ 'injected_files',
+ 'injected_file_content_bytes',
+ 'injected_file_path_bytes'])
+
+quota_set_update = copy.deepcopy(quotas.common_quota_set)
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+ 'injected_files'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+ 'injected_file_content_bytes'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+ 'injected_file_path_bytes'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set'][
+ 'required'].extend(['injected_files',
+ 'injected_file_content_bytes',
+ 'injected_file_path_bytes'])
+
+delete_quota = {
+ 'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/security_groups.py b/tempest/api_schema/compute/v2/security_groups.py
new file mode 100644
index 0000000..8b4bead
--- /dev/null
+++ b/tempest/api_schema/compute/v2/security_groups.py
@@ -0,0 +1,101 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+common_security_group_rule = {
+ 'from_port': {'type': ['integer', 'null']},
+ 'to_port': {'type': ['integer', 'null']},
+ 'group': {
+ 'type': 'object',
+ 'properties': {
+ 'tenant_id': {'type': 'string'},
+ 'name': {'type': 'string'}
+ }
+ },
+ 'ip_protocol': {'type': ['string', 'null']},
+ # 'parent_group_id' can be UUID so defining it as 'string' also.
+ 'parent_group_id': {'type': ['string', 'integer', 'null']},
+ 'ip_range': {
+ 'type': 'object',
+ 'properties': {
+ 'cidr': {'type': 'string'}
+ }
+ # When optional argument is provided in request body
+ # like 'group_id' then, attribute 'cidr' does not
+ # comes in response body. So it is not 'required'.
+ },
+ 'id': {'type': ['string', 'integer']}
+}
+
+common_security_group = {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': ['integer', 'string']},
+ 'name': {'type': 'string'},
+ 'tenant_id': {'type': 'string'},
+ 'rules': {
+ 'type': 'array',
+ 'items': {
+ 'type': ['object', 'null'],
+ 'properties': common_security_group_rule
+ }
+ },
+ 'description': {'type': 'string'},
+ },
+ 'required': ['id', 'name', 'tenant_id', 'rules', 'description'],
+}
+
+list_security_groups = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'security_groups': {
+ 'type': 'array',
+ 'items': common_security_group
+ }
+ },
+ 'required': ['security_groups']
+ }
+}
+
+get_security_group = create_security_group = update_security_group = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'security_group': common_security_group
+ },
+ 'required': ['security_group']
+ }
+}
+
+create_security_group_rule = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'security_group_rule': {
+ 'type': 'object',
+ 'properties': common_security_group_rule,
+ 'required': ['from_port', 'to_port', 'group', 'ip_protocol',
+ 'parent_group_id', 'id', 'ip_range']
+ }
+ },
+ 'required': ['security_group_rule']
+ }
+}
+
+delete_security_group_rule = {
+ 'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/servers.py b/tempest/api_schema/compute/v2/servers.py
new file mode 100644
index 0000000..e90f436
--- /dev/null
+++ b/tempest/api_schema/compute/v2/servers.py
@@ -0,0 +1,130 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import parameter_types
+from tempest.api_schema.compute import servers
+
+create_server = {
+ 'status_code': [202],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'server': {
+ 'type': 'object',
+ 'properties': {
+ # NOTE: Now the type of 'id' is uuid, but here allows
+ # 'integer' also because old OpenStack uses 'integer'
+ # as a server id.
+ 'id': {'type': ['integer', 'string']},
+ 'security_groups': {'type': 'array'},
+ 'links': parameter_types.links,
+ 'adminPass': {'type': 'string'},
+ 'OS-DCF:diskConfig': {'type': 'string'}
+ },
+ # NOTE: OS-DCF:diskConfig is API extension, and some
+ # environments return a response without the attribute.
+ # So it is not 'required'.
+ # NOTE: adminPass is not required because it can be deactivated
+ # with nova API flag enable_instance_password=False
+ 'required': ['id', 'security_groups', 'links']
+ }
+ },
+ 'required': ['server']
+ }
+}
+
+update_server = copy.deepcopy(servers.base_update_server)
+update_server['response_body']['properties']['server']['properties'].update({
+ 'hostId': {'type': 'string'},
+ 'OS-DCF:diskConfig': {'type': 'string'},
+ 'accessIPv4': parameter_types.access_ip_v4,
+ 'accessIPv6': parameter_types.access_ip_v6
+})
+update_server['response_body']['properties']['server']['required'].append(
+ # NOTE: OS-DCF:diskConfig and accessIPv4/v6 are API
+ # extensions, and some environments return a response
+ # without these attributes. So they are not 'required'.
+ 'hostId'
+)
+
+list_virtual_interfaces = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'virtual_interfaces': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'mac_address': parameter_types.mac_address,
+ 'OS-EXT-VIF-NET:net_id': {'type': 'string'}
+ },
+ # 'OS-EXT-VIF-NET:net_id' is API extension So it is
+ # not defined as 'required'
+ 'required': ['id', 'mac_address']
+ }
+ }
+ },
+ 'required': ['virtual_interfaces']
+ }
+}
+
+attach_volume = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'volumeAttachment': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'device': {'type': 'string'},
+ 'volumeId': {'type': 'string'},
+ 'serverId': {'type': ['integer', 'string']}
+ },
+ 'required': ['id', 'device', 'volumeId', 'serverId']
+ }
+ },
+ 'required': ['volumeAttachment']
+ }
+}
+
+detach_volume = {
+ 'status_code': [202]
+}
+
+set_get_server_metadata_item = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'meta': {
+ 'type': 'object',
+ 'patternProperties': {
+ '^.+$': {'type': 'string'}
+ }
+ }
+ },
+ 'required': ['meta']
+ }
+}
+
+list_addresses_by_network = {
+ 'status_code': [200],
+ 'response_body': parameter_types.addresses
+}
diff --git a/tempest/api_schema/compute/v2/tenant_usages.py b/tempest/api_schema/compute/v2/tenant_usages.py
new file mode 100644
index 0000000..0b824a1
--- /dev/null
+++ b/tempest/api_schema/compute/v2/tenant_usages.py
@@ -0,0 +1,92 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+_server_usages = {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'ended_at': {
+ 'oneOf': [
+ {'type': 'string'},
+ {'type': 'null'}
+ ]
+ },
+ 'flavor': {'type': 'string'},
+ 'hours': {'type': 'number'},
+ 'instance_id': {'type': 'string'},
+ 'local_gb': {'type': 'integer'},
+ 'memory_mb': {'type': 'integer'},
+ 'name': {'type': 'string'},
+ 'started_at': {'type': 'string'},
+ 'state': {'type': 'string'},
+ 'tenant_id': {'type': 'string'},
+ 'uptime': {'type': 'integer'},
+ 'vcpus': {'type': 'integer'},
+ },
+ 'required': ['ended_at', 'flavor', 'hours', 'instance_id', 'local_gb',
+ 'memory_mb', 'name', 'started_at', 'state', 'tenant_id',
+ 'uptime', 'vcpus']
+ }
+}
+
+_tenant_usage_list = {
+ 'type': 'object',
+ 'properties': {
+ 'server_usages': _server_usages,
+ 'start': {'type': 'string'},
+ 'stop': {'type': 'string'},
+ 'tenant_id': {'type': 'string'},
+ 'total_hours': {'type': 'number'},
+ 'total_local_gb_usage': {'type': 'number'},
+ 'total_memory_mb_usage': {'type': 'number'},
+ 'total_vcpus_usage': {'type': 'number'},
+ },
+ 'required': ['start', 'stop', 'tenant_id',
+ 'total_hours', 'total_local_gb_usage',
+ 'total_memory_mb_usage', 'total_vcpus_usage']
+}
+
+# 'required' of get_tenant is different from list_tenant's.
+_tenant_usage_get = copy.deepcopy(_tenant_usage_list)
+_tenant_usage_get['required'] = ['server_usages', 'start', 'stop', 'tenant_id',
+ 'total_hours', 'total_local_gb_usage',
+ 'total_memory_mb_usage', 'total_vcpus_usage']
+
+list_tenant = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'tenant_usages': {
+ 'type': 'array',
+ 'items': _tenant_usage_list
+ }
+ },
+ 'required': ['tenant_usages']
+ }
+}
+
+get_tenant = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'tenant_usage': _tenant_usage_get
+ },
+ 'required': ['tenant_usage']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/volumes.py b/tempest/api_schema/compute/v2/volumes.py
new file mode 100644
index 0000000..84a659c
--- /dev/null
+++ b/tempest/api_schema/compute/v2/volumes.py
@@ -0,0 +1,114 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+create_get_volume = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'volume': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'status': {'type': 'string'},
+ 'displayName': {'type': ['string', 'null']},
+ 'availabilityZone': {'type': 'string'},
+ 'createdAt': {'type': 'string'},
+ 'displayDescription': {'type': ['string', 'null']},
+ 'volumeType': {'type': 'string'},
+ 'snapshotId': {'type': ['string', 'null']},
+ 'metadata': {'type': 'object'},
+ 'size': {'type': 'integer'},
+ 'attachments': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'device': {'type': 'string'},
+ 'volumeId': {'type': 'string'},
+ 'serverId': {'type': ['integer', 'string']}
+ }
+ # NOTE- If volume is not attached to any server
+ # then, 'attachments' attributes comes as array
+ # with empty objects "[{}]" due to that elements
+ # of 'attachments' cannot defined as 'required'.
+ # If it would come as empty array "[]" then,
+ # those elements can be defined as 'required'.
+ }
+ }
+ },
+ 'required': ['id', 'status', 'displayName', 'availabilityZone',
+ 'createdAt', 'displayDescription', 'volumeType',
+ 'snapshotId', 'metadata', 'size', 'attachments']
+ }
+ },
+ 'required': ['volume']
+ }
+}
+
+list_volumes = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'volumes': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'status': {'type': 'string'},
+ 'displayName': {'type': ['string', 'null']},
+ 'availabilityZone': {'type': 'string'},
+ 'createdAt': {'type': 'string'},
+ 'displayDescription': {'type': ['string', 'null']},
+ 'volumeType': {'type': 'string'},
+ 'snapshotId': {'type': ['string', 'null']},
+ 'metadata': {'type': 'object'},
+ 'size': {'type': 'integer'},
+ 'attachments': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'device': {'type': 'string'},
+ 'volumeId': {'type': 'string'},
+ 'serverId': {'type': ['integer', 'string']}
+ }
+ # NOTE- If volume is not attached to any server
+ # then, 'attachments' attributes comes as array
+ # with empty object "[{}]" due to that elements
+ # of 'attachments' cannot defined as 'required'
+ # If it would come as empty array "[]" then,
+ # those elements can be defined as 'required'.
+ }
+ }
+ },
+ 'required': ['id', 'status', 'displayName',
+ 'availabilityZone', 'createdAt',
+ 'displayDescription', 'volumeType',
+ 'snapshotId', 'metadata', 'size',
+ 'attachments']
+ }
+ }
+ },
+ 'required': ['volumes']
+ }
+}
+
+delete_volume = {
+ 'status_code': [202]
+}
diff --git a/tempest/api/compute/api_schema/__init__.py b/tempest/api_schema/compute/v3/__init__.py
similarity index 100%
copy from tempest/api/compute/api_schema/__init__.py
copy to tempest/api_schema/compute/v3/__init__.py
diff --git a/tempest/api_schema/compute/v3/agents.py b/tempest/api_schema/compute/v3/agents.py
new file mode 100644
index 0000000..63d1c46
--- /dev/null
+++ b/tempest/api_schema/compute/v3/agents.py
@@ -0,0 +1,17 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+delete_agent = {
+ 'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/aggregates.py b/tempest/api_schema/compute/v3/aggregates.py
new file mode 100644
index 0000000..0272641
--- /dev/null
+++ b/tempest/api_schema/compute/v3/aggregates.py
@@ -0,0 +1,29 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import aggregates
+
+delete_aggregate = {
+ 'status_code': [204]
+}
+
+create_aggregate = copy.deepcopy(aggregates.common_create_aggregate)
+# V3 API's response status_code is 201
+create_aggregate['status_code'] = [201]
+
+aggregate_add_remove_host = copy.deepcopy(aggregates.aggregate_add_remove_host)
+# V3 API's response status_code is 202
+aggregate_add_remove_host['status_code'] = [202]
diff --git a/tempest/api_schema/compute/v3/availability_zone.py b/tempest/api_schema/compute/v3/availability_zone.py
new file mode 100644
index 0000000..5f36c33
--- /dev/null
+++ b/tempest/api_schema/compute/v3/availability_zone.py
@@ -0,0 +1,53 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import availability_zone as common
+
+
+base = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'availability_zone_info': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'zone_name': {'type': 'string'},
+ 'zone_state': {
+ 'type': 'object',
+ 'properties': {
+ 'available': {'type': 'boolean'}
+ },
+ 'required': ['available']
+ },
+ # NOTE: Here is the difference between detail and
+ # non-detail
+ 'hosts': {'type': 'null'}
+ },
+ 'required': ['zone_name', 'zone_state', 'hosts']
+ }
+ }
+ },
+ 'required': ['availability_zone_info']
+ }
+}
+
+get_availability_zone_list = copy.deepcopy(base)
+get_availability_zone_list_detail = copy.deepcopy(base)
+get_availability_zone_list_detail['response_body']['properties'][
+ 'availability_zone_info']['items']['properties']['hosts'] = common.detail
diff --git a/tempest/api_schema/compute/v3/certificates.py b/tempest/api_schema/compute/v3/certificates.py
new file mode 100644
index 0000000..0723a16
--- /dev/null
+++ b/tempest/api_schema/compute/v3/certificates.py
@@ -0,0 +1,20 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import certificates
+
+create_certificate = copy.deepcopy(certificates._common_schema)
+create_certificate['status_code'] = [201]
diff --git a/tempest/api_schema/compute/v3/extensions.py b/tempest/api_schema/compute/v3/extensions.py
new file mode 100644
index 0000000..ceb0ce2
--- /dev/null
+++ b/tempest/api_schema/compute/v3/extensions.py
@@ -0,0 +1,36 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+list_extensions = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'extensions': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'name': {'type': 'string'},
+ 'alias': {'type': 'string'},
+ 'description': {'type': 'string'},
+ 'version': {'type': 'integer'}
+ },
+ 'required': ['name', 'alias', 'description', 'version']
+ }
+ }
+ },
+ 'required': ['extensions']
+ }
+}
diff --git a/tempest/api_schema/compute/v3/flavors.py b/tempest/api_schema/compute/v3/flavors.py
new file mode 100644
index 0000000..52010f5
--- /dev/null
+++ b/tempest/api_schema/compute/v3/flavors.py
@@ -0,0 +1,68 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import flavors
+from tempest.api_schema.compute import flavors_extra_specs
+
+list_flavors_details = copy.deepcopy(flavors.common_flavor_list_details)
+
+# NOTE- In v3 API, 'swap' comes as '0' not empty string '""'
+# (In V2 API, it comes as empty string) So leaving 'swap'as integer type only.
+
+# Defining extra attributes for V3 flavor schema
+list_flavors_details['response_body']['properties']['flavors']['items'][
+ 'properties'].update({'disabled': {'type': 'boolean'},
+ 'ephemeral': {'type': 'integer'},
+ 'flavor-access:is_public': {'type': 'boolean'},
+ 'os-flavor-rxtx:rxtx_factor': {'type': 'number'}})
+# 'flavor-access' and 'os-flavor-rxtx' are API extensions.
+# So they are not 'required'.
+list_flavors_details['response_body']['properties']['flavors']['items'][
+ 'required'].extend(['disabled', 'ephemeral'])
+
+set_flavor_extra_specs = copy.deepcopy(flavors_extra_specs.flavor_extra_specs)
+set_flavor_extra_specs['status_code'] = [201]
+
+unset_flavor_extra_specs = {
+ 'status_code': [204]
+}
+
+get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
+
+# NOTE- In v3 API, 'swap' comes as '0' not empty string '""'
+# (In V2 API, it comes as empty string) So leaving 'swap'as integer type only.
+
+# Defining extra attributes for V3 flavor schema
+get_flavor_details['response_body']['properties']['flavor'][
+ 'properties'].update({'disabled': {'type': 'boolean'},
+ 'ephemeral': {'type': 'integer'},
+ 'flavor-access:is_public': {'type': 'boolean'},
+ 'os-flavor-rxtx:rxtx_factor': {'type': 'number'}})
+
+# 'flavor-access' and 'os-flavor-rxtx' are API extensions.
+# So they are not 'required'.
+get_flavor_details['response_body']['properties']['flavor'][
+ 'required'].extend(['disabled', 'ephemeral'])
+
+
+create_flavor_details = copy.deepcopy(get_flavor_details)
+
+# Overriding the status code for create flavor V3 API.
+create_flavor_details['status_code'] = [201]
+
+delete_flavor = {
+ 'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/hosts.py b/tempest/api_schema/compute/v3/hosts.py
new file mode 100644
index 0000000..eb689d1
--- /dev/null
+++ b/tempest/api_schema/compute/v3/hosts.py
@@ -0,0 +1,53 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import hosts
+
+startup_host = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'host': hosts.common_start_up_body
+ },
+ 'required': ['host']
+ }
+}
+
+# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
+shutdown_host = copy.deepcopy(startup_host)
+
+shutdown_host['response_body']['properties']['power_action'] = {
+ 'enum': ['shutdown']
+}
+
+# The 'power_action' attribute of 'reboot_host' API is 'reboot'
+reboot_host = copy.deepcopy(startup_host)
+
+reboot_host['response_body']['properties']['power_action'] = {
+ 'enum': ['reboot']
+}
+
+update_host = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'host': hosts.update_host_common
+ },
+ 'required': ['host']
+ }
+}
diff --git a/tempest/api_schema/compute/v3/hypervisors.py b/tempest/api_schema/compute/v3/hypervisors.py
new file mode 100644
index 0000000..aa31827
--- /dev/null
+++ b/tempest/api_schema/compute/v3/hypervisors.py
@@ -0,0 +1,50 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+from tempest.api_schema.compute import hypervisors
+
+list_hypervisors_detail = copy.deepcopy(
+ hypervisors.common_list_hypervisors_detail)
+# Defining extra attributes for V3 show hypervisor schema
+list_hypervisors_detail['response_body']['properties']['hypervisors'][
+ 'items']['properties']['os-pci:pci_stats'] = {'type': 'array'}
+
+show_hypervisor = copy.deepcopy(hypervisors.common_show_hypervisor)
+# Defining extra attributes for V3 show hypervisor schema
+show_hypervisor['response_body']['properties']['hypervisor']['properties'][
+ 'os-pci:pci_stats'] = {'type': 'array'}
+
+hypervisors_servers = copy.deepcopy(hypervisors.common_hypervisors_info)
+
+# Defining extra attributes for V3 show hypervisor schema
+hypervisors_servers['response_body']['properties']['hypervisor']['properties'][
+ 'servers'] = {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ # NOTE: Now the type of 'id' is integer,
+ # but here allows 'string' also because we
+ # will be able to change it to 'uuid' in
+ # the future.
+ 'id': {'type': ['integer', 'string']},
+ 'name': {'type': 'string'}
+ }
+ }
+ }
+# V3 API response body always contains the 'servers' attribute even there
+# is no server (VM) are present on Hypervisor host.
+hypervisors_servers['response_body']['properties']['hypervisor'][
+ 'required'] = ['id', 'hypervisor_hostname', 'servers']
diff --git a/tempest/api_schema/compute/v3/interfaces.py b/tempest/api_schema/compute/v3/interfaces.py
new file mode 100644
index 0000000..5e1cee2
--- /dev/null
+++ b/tempest/api_schema/compute/v3/interfaces.py
@@ -0,0 +1,29 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api_schema.compute import interfaces as common_schema
+
+list_interfaces = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'interface_attachments': {
+ 'type': 'array',
+ 'items': common_schema.interface_common_info
+ }
+ },
+ 'required': ['interface_attachments']
+ }
+}
diff --git a/tempest/api_schema/compute/v3/keypairs.py b/tempest/api_schema/compute/v3/keypairs.py
new file mode 100644
index 0000000..de5f4ba
--- /dev/null
+++ b/tempest/api_schema/compute/v3/keypairs.py
@@ -0,0 +1,43 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api_schema.compute import keypairs
+
+get_keypair = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'keypair': {
+ 'type': 'object',
+ 'properties': {
+ 'public_key': {'type': 'string'},
+ 'name': {'type': 'string'},
+ 'fingerprint': {'type': 'string'}
+ },
+ 'required': ['public_key', 'name', 'fingerprint']
+ }
+ },
+ 'required': ['keypair']
+ }
+}
+
+create_keypair = {
+ 'status_code': [201],
+ 'response_body': keypairs.create_keypair
+}
+
+delete_keypair = {
+ 'status_code': [204],
+}
diff --git a/tempest/api_schema/compute/v3/quotas.py b/tempest/api_schema/compute/v3/quotas.py
new file mode 100644
index 0000000..a3212ed
--- /dev/null
+++ b/tempest/api_schema/compute/v3/quotas.py
@@ -0,0 +1,59 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import quotas
+
+quota_set = copy.deepcopy(quotas.common_quota_set)
+quota_set['response_body']['properties']['quota_set']['properties'][
+ 'id'] = {'type': 'string'}
+quota_set['response_body']['properties']['quota_set'][
+ 'required'].extend(['id'])
+
+quota_common_info = {
+ 'type': 'object',
+ 'properties': {
+ 'reserved': {'type': 'integer'},
+ 'limit': {'type': 'integer'},
+ 'in_use': {'type': 'integer'}
+ },
+ 'required': ['reserved', 'limit', 'in_use']
+}
+
+quota_set_detail = copy.deepcopy(quotas.common_quota_set)
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'id'] = {'type': 'string'}
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'instances'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'cores'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'ram'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'floating_ips'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'fixed_ips'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'metadata_items'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'key_pairs'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'security_groups'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'security_group_rules'] = quota_common_info
+
+delete_quota = {
+ 'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/servers.py b/tempest/api_schema/compute/v3/servers.py
new file mode 100644
index 0000000..956b5ad
--- /dev/null
+++ b/tempest/api_schema/compute/v3/servers.py
@@ -0,0 +1,83 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import parameter_types
+from tempest.api_schema.compute import servers
+
+create_server = {
+ 'status_code': [202],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'server': {
+ 'type': 'object',
+ 'properties': {
+ # NOTE: Now the type of 'id' is uuid, but here allows
+ # 'integer' also because old OpenStack uses 'integer'
+ # as a server id.
+ 'id': {'type': ['integer', 'string']},
+ 'os-security-groups:security_groups': {'type': 'array'},
+ 'links': parameter_types.links,
+ 'admin_password': {'type': 'string'},
+ 'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
+ 'os-access-ips:access_ip_v6': parameter_types.access_ip_v6
+ },
+ # NOTE: os-access-ips:access_ip_v4/v6 are API extension,
+ # and some environments return a response without these
+ # attributes. So they are not 'required'.
+ 'required': ['id', 'os-security-groups:security_groups',
+ 'links', 'admin_password']
+ }
+ },
+ 'required': ['server']
+ }
+}
+
+addresses_v3 = copy.deepcopy(parameter_types.addresses)
+addresses_v3['patternProperties']['^[a-zA-Z0-9-_.]+$']['items'][
+ 'properties'].update({
+ 'type': {'type': 'string'},
+ 'mac_addr': {'type': 'string'}
+ })
+addresses_v3['patternProperties']['^[a-zA-Z0-9-_.]+$']['items'][
+ 'required'].extend(
+ ['type', 'mac_addr']
+ )
+
+update_server = copy.deepcopy(servers.base_update_server)
+update_server['response_body']['properties']['server']['properties'].update({
+ 'addresses': addresses_v3,
+ 'host_id': {'type': 'string'},
+ 'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
+ 'os-access-ips:access_ip_v6': parameter_types.access_ip_v6
+})
+update_server['response_body']['properties']['server']['required'].append(
+ # NOTE: os-access-ips:access_ip_v4/v6 are API extension,
+ # and some environments return a response without these
+ # attributes. So they are not 'required'.
+ 'host_id'
+)
+
+attach_detach_volume = {
+ 'status_code': [202]
+}
+
+set_get_server_metadata_item = copy.deepcopy(servers.set_server_metadata)
+
+list_addresses_by_network = {
+ 'status_code': [200],
+ 'response_body': addresses_v3
+}
diff --git a/tempest/api_schema/compute/version.py b/tempest/api_schema/compute/version.py
new file mode 100644
index 0000000..32c6d96
--- /dev/null
+++ b/tempest/api_schema/compute/version.py
@@ -0,0 +1,55 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+version = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'version': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'links': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'href': {'type': 'string', 'format': 'uri'},
+ 'rel': {'type': 'string'},
+ 'type': {'type': 'string'}
+ },
+ 'required': ['href', 'rel']
+ }
+ },
+ 'media-types': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'base': {'type': 'string'},
+ 'type': {'type': 'string'}
+ },
+ 'required': ['base', 'type']
+ }
+ },
+ 'status': {'type': 'string'},
+ 'updated': {'type': 'string', 'format': 'date-time'}
+ },
+ 'required': ['id', 'links', 'media-types', 'status', 'updated']
+ }
+ },
+ 'required': ['version']
+ }
+}
diff --git a/tempest/auth.py b/tempest/auth.py
index 0e45161..9c51edb 100644
--- a/tempest/auth.py
+++ b/tempest/auth.py
@@ -43,11 +43,11 @@
:param client_type: 'tempest' or 'official'
:param interface: 'json' or 'xml'. Applicable for tempest client only
"""
+ credentials = self._convert_credentials(credentials)
if self.check_credentials(credentials):
self.credentials = credentials
else:
raise TypeError("Invalid credentials")
- self.credentials = credentials
self.client_type = client_type
self.interface = interface
if self.client_type == 'tempest' and self.interface is None:
@@ -56,6 +56,13 @@
self.alt_auth_data = None
self.alt_part = None
+ def _convert_credentials(self, credentials):
+ # Support dict credentials for backwards compatibility
+ if isinstance(credentials, dict):
+ return get_credentials(**credentials)
+ else:
+ return credentials
+
def __str__(self):
return "Creds :{creds}, client type: {client_type}, interface: " \
"{interface}, cached auth data: {cache}".format(
@@ -73,29 +80,55 @@
def _get_auth(self):
raise NotImplementedError
+ def _fill_credentials(self, auth_data_body):
+ raise NotImplementedError
+
+ def fill_credentials(self):
+ """
+ Fill credentials object with data from auth
+ """
+ auth_data = self.get_auth()
+ self._fill_credentials(auth_data[1])
+ return self.credentials
+
@classmethod
def check_credentials(cls, credentials):
"""
- Verify credentials are valid. Subclasses can do a better check.
+ Verify credentials are valid.
"""
- return isinstance(credentials, dict)
+ return isinstance(credentials, Credentials) and credentials.is_valid()
@property
def auth_data(self):
- if self.cache is None or self.is_expired(self.cache):
- self.cache = self._get_auth()
- return self.cache
+ return self.get_auth()
@auth_data.deleter
def auth_data(self):
self.clear_auth()
+ def get_auth(self):
+ """
+ Returns auth from cache if available, else auth first
+ """
+ if self.cache is None or self.is_expired(self.cache):
+ self.set_auth()
+ return self.cache
+
+ def set_auth(self):
+ """
+ Forces setting auth, ignores cache if it exists.
+ Refills credentials
+ """
+ self.cache = self._get_auth()
+ self._fill_credentials(self.cache[1])
+
def clear_auth(self):
"""
Can be called to clear the access cache so that next request
will fetch a new token and base_url.
"""
self.cache = None
+ self.credentials.reset()
def is_expired(self, auth_data):
raise NotImplementedError
@@ -164,6 +197,8 @@
class KeystoneAuthProvider(AuthProvider):
+ token_expiry_threshold = datetime.timedelta(seconds=60)
+
def __init__(self, credentials, client_type='tempest', interface=None):
super(KeystoneAuthProvider, self).__init__(credentials, client_type,
interface)
@@ -216,16 +251,6 @@
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
- @classmethod
- def check_credentials(cls, credentials, scoped=True):
- # tenant_name is optional if not scoped
- valid = super(KeystoneV2AuthProvider, cls).check_credentials(
- credentials) and 'username' in credentials and \
- 'password' in credentials
- if scoped:
- valid = valid and 'tenant_name' in credentials
- return valid
-
def _auth_client(self):
if self.client_type == 'tempest':
if self.interface == 'json':
@@ -238,13 +263,25 @@
def _auth_params(self):
if self.client_type == 'tempest':
return dict(
- user=self.credentials['username'],
- password=self.credentials['password'],
- tenant=self.credentials.get('tenant_name', None),
+ user=self.credentials.username,
+ password=self.credentials.password,
+ tenant=self.credentials.tenant_name,
auth_data=True)
else:
raise NotImplementedError
+ def _fill_credentials(self, auth_data_body):
+ tenant = auth_data_body['token']['tenant']
+ user = auth_data_body['user']
+ if self.credentials.tenant_name is None:
+ self.credentials.tenant_name = tenant['name']
+ if self.credentials.tenant_id is None:
+ self.credentials.tenant_id = tenant['id']
+ if self.credentials.username is None:
+ self.credentials.username = user['name']
+ if self.credentials.user_id is None:
+ self.credentials.user_id = user['id']
+
def base_url(self, filters, auth_data=None):
"""
Filters can be:
@@ -293,23 +330,14 @@
_, access = auth_data
expiry = datetime.datetime.strptime(access['token']['expires'],
self.EXPIRY_DATE_FORMAT)
- return expiry <= datetime.datetime.now()
+ return expiry - self.token_expiry_threshold <= \
+ datetime.datetime.utcnow()
class KeystoneV3AuthProvider(KeystoneAuthProvider):
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
- @classmethod
- def check_credentials(cls, credentials, scoped=True):
- # tenant_name is optional if not scoped
- valid = super(KeystoneV3AuthProvider, cls).check_credentials(
- credentials) and 'username' in credentials and \
- 'password' in credentials and 'domain_name' in credentials
- if scoped:
- valid = valid and 'tenant_name' in credentials
- return valid
-
def _auth_client(self):
if self.client_type == 'tempest':
if self.interface == 'json':
@@ -322,14 +350,47 @@
def _auth_params(self):
if self.client_type == 'tempest':
return dict(
- user=self.credentials['username'],
- password=self.credentials['password'],
- tenant=self.credentials.get('tenant_name', None),
- domain=self.credentials['domain_name'],
+ user=self.credentials.username,
+ password=self.credentials.password,
+ tenant=self.credentials.tenant_name,
+ domain=self.credentials.user_domain_name,
auth_data=True)
else:
raise NotImplementedError
+ def _fill_credentials(self, auth_data_body):
+ # project or domain, depending on the scope
+ project = auth_data_body.get('project', None)
+ domain = auth_data_body.get('domain', None)
+ # user is always there
+ user = auth_data_body['user']
+ # Set project fields
+ if project is not None:
+ if self.credentials.project_name is None:
+ self.credentials.project_name = project['name']
+ if self.credentials.project_id is None:
+ self.credentials.project_id = project['id']
+ if self.credentials.project_domain_id is None:
+ self.credentials.project_domain_id = project['domain']['id']
+ if self.credentials.project_domain_name is None:
+ self.credentials.project_domain_name = \
+ project['domain']['name']
+ # Set domain fields
+ if domain is not None:
+ if self.credentials.domain_id is None:
+ self.credentials.domain_id = domain['id']
+ if self.credentials.domain_name is None:
+ self.credentials.domain_name = domain['name']
+ # Set user fields
+ if self.credentials.username is None:
+ self.credentials.username = user['name']
+ if self.credentials.user_id is None:
+ self.credentials.user_id = user['id']
+ if self.credentials.user_domain_id is None:
+ self.credentials.user_domain_id = user['domain']['id']
+ if self.credentials.user_domain_name is None:
+ self.credentials.user_domain_name = user['domain']['name']
+
def base_url(self, filters, auth_data=None):
"""
Filters can be:
@@ -393,4 +454,250 @@
_, access = auth_data
expiry = datetime.datetime.strptime(access['expires_at'],
self.EXPIRY_DATE_FORMAT)
- return expiry <= datetime.datetime.now()
+ return expiry - self.token_expiry_threshold <= \
+ datetime.datetime.utcnow()
+
+
+def get_default_credentials(credential_type, fill_in=True):
+ """
+ Returns configured credentials of the specified type
+ based on the configured auth_version
+ """
+ return get_credentials(fill_in=fill_in, credential_type=credential_type)
+
+
+def get_credentials(credential_type=None, fill_in=True, **kwargs):
+ """
+ Builds a credentials object based on the configured auth_version
+
+ :param credential_type (string): requests credentials from tempest
+ configuration file. Valid values are defined in
+ Credentials.TYPE.
+ :param kwargs (dict): take into account only if credential_type is
+ not specified or None. Dict of credential key/value pairs
+
+ Examples:
+
+ Returns credentials from the provided parameters:
+ >>> get_credentials(username='foo', password='bar')
+
+ Returns credentials from tempest configuration:
+ >>> get_credentials(credential_type='user')
+ """
+ if CONF.identity.auth_version == 'v2':
+ credential_class = KeystoneV2Credentials
+ auth_provider_class = KeystoneV2AuthProvider
+ elif CONF.identity.auth_version == 'v3':
+ credential_class = KeystoneV3Credentials
+ auth_provider_class = KeystoneV3AuthProvider
+ else:
+ raise exceptions.InvalidConfiguration('Unsupported auth version')
+ if credential_type is not None:
+ creds = credential_class.get_default(credential_type)
+ else:
+ creds = credential_class(**kwargs)
+ # Fill in the credentials fields that were not specified
+ if fill_in:
+ auth_provider = auth_provider_class(creds)
+ creds = auth_provider.fill_credentials()
+ return creds
+
+
+class Credentials(object):
+ """
+ Set of credentials for accessing OpenStack services
+
+ ATTRIBUTES: list of valid class attributes representing credentials.
+
+ TYPES: types of credentials available in the configuration file.
+ For each key there's a tuple (section, prefix) to match the
+ configuration options.
+ """
+
+ ATTRIBUTES = []
+ TYPES = {
+ 'identity_admin': ('identity', 'admin'),
+ 'compute_admin': ('compute_admin', None),
+ 'user': ('identity', None),
+ 'alt_user': ('identity', 'alt')
+ }
+
+ def __init__(self, **kwargs):
+ """
+ Enforce the available attributes at init time (only).
+ Additional attributes can still be set afterwards if tests need
+ to do so.
+ """
+ self._initial = kwargs
+ self._apply_credentials(kwargs)
+
+ def _apply_credentials(self, attr):
+ for key in attr.keys():
+ if key in self.ATTRIBUTES:
+ setattr(self, key, attr[key])
+ else:
+ raise exceptions.InvalidCredentials
+
+ def __str__(self):
+ """
+ Represent only attributes included in self.ATTRIBUTES
+ """
+ _repr = dict((k, getattr(self, k)) for k in self.ATTRIBUTES)
+ return str(_repr)
+
+ def __eq__(self, other):
+ """
+ Credentials are equal if attributes in self.ATTRIBUTES are equal
+ """
+ return str(self) == str(other)
+
+ def __getattr__(self, key):
+ # If an attribute is set, __getattr__ is not invoked
+ # If an attribute is not set, and it is a known one, return None
+ if key in self.ATTRIBUTES:
+ return None
+ else:
+ raise AttributeError
+
+ def __delitem__(self, key):
+ # For backwards compatibility, support dict behaviour
+ if key in self.ATTRIBUTES:
+ delattr(self, key)
+ else:
+ raise AttributeError
+
+ def get(self, item, default):
+ # In this patch act as dict for backward compatibility
+ try:
+ return getattr(self, item)
+ except AttributeError:
+ return default
+
+ @classmethod
+ def get_default(cls, credentials_type):
+ if credentials_type not in cls.TYPES:
+ raise exceptions.InvalidCredentials()
+ creds = cls._get_default(credentials_type)
+ if not creds.is_valid():
+ raise exceptions.InvalidConfiguration()
+ return creds
+
+ @classmethod
+ def _get_default(cls, credentials_type):
+ raise NotImplementedError
+
+ def is_valid(self):
+ raise NotImplementedError
+
+ def reset(self):
+ # First delete all known attributes
+ for key in self.ATTRIBUTES:
+ if getattr(self, key) is not None:
+ delattr(self, key)
+ # Then re-apply initial setup
+ self._apply_credentials(self._initial)
+
+
+class KeystoneV2Credentials(Credentials):
+
+ CONF_ATTRIBUTES = ['username', 'password', 'tenant_name']
+ ATTRIBUTES = ['user_id', 'tenant_id']
+ ATTRIBUTES.extend(CONF_ATTRIBUTES)
+
+ @classmethod
+ def _get_default(cls, credentials_type='user'):
+ params = {}
+ section, prefix = cls.TYPES[credentials_type]
+ for attr in cls.CONF_ATTRIBUTES:
+ _section = getattr(CONF, section)
+ if prefix is None:
+ params[attr] = getattr(_section, attr)
+ else:
+ params[attr] = getattr(_section, prefix + "_" + attr)
+ return cls(**params)
+
+ def is_valid(self):
+ """
+ Minimum set of valid credentials, are username and password.
+ Tenant is optional.
+ """
+ return None not in (self.username, self.password)
+
+
+class KeystoneV3Credentials(KeystoneV2Credentials):
+ """
+ Credentials suitable for the Keystone Identity V3 API
+ """
+
+ CONF_ATTRIBUTES = ['domain_name', 'password', 'tenant_name', 'username']
+ ATTRIBUTES = ['project_domain_id', 'project_domain_name', 'project_id',
+ 'project_name', 'tenant_id', 'tenant_name', 'user_domain_id',
+ 'user_domain_name', 'user_id']
+ ATTRIBUTES.extend(CONF_ATTRIBUTES)
+
+ def __init__(self, **kwargs):
+ """
+ If domain is not specified, load the one configured for the
+ identity manager.
+ """
+ domain_fields = set(x for x in self.ATTRIBUTES if 'domain' in x)
+ if not domain_fields.intersection(kwargs.keys()):
+ kwargs['user_domain_name'] = CONF.identity.admin_domain_name
+ super(KeystoneV3Credentials, self).__init__(**kwargs)
+
+ def __setattr__(self, key, value):
+ parent = super(KeystoneV3Credentials, self)
+ # for tenant_* set both project and tenant
+ if key == 'tenant_id':
+ parent.__setattr__('project_id', value)
+ elif key == 'tenant_name':
+ parent.__setattr__('project_name', value)
+ # for project_* set both project and tenant
+ if key == 'project_id':
+ parent.__setattr__('tenant_id', value)
+ elif key == 'project_name':
+ parent.__setattr__('tenant_name', value)
+ # for *_domain_* set both user and project if not set yet
+ if key == 'user_domain_id':
+ if self.project_domain_id is None:
+ parent.__setattr__('project_domain_id', value)
+ if key == 'project_domain_id':
+ if self.user_domain_id is None:
+ parent.__setattr__('user_domain_id', value)
+ if key == 'user_domain_name':
+ if self.project_domain_name is None:
+ parent.__setattr__('project_domain_name', value)
+ if key == 'project_domain_name':
+ if self.user_domain_name is None:
+ parent.__setattr__('user_domain_name', value)
+ # support domain_name coming from config
+ if key == 'domain_name':
+ parent.__setattr__('user_domain_name', value)
+ parent.__setattr__('project_domain_name', value)
+ # finally trigger default behaviour for all attributes
+ parent.__setattr__(key, value)
+
+ def is_valid(self):
+ """
+ Valid combinations of v3 credentials (excluding token, scope)
+ - User id, password (optional domain)
+ - User name, password and its domain id/name
+ For the scope, valid combinations are:
+ - None
+ - Project id (optional domain)
+ - Project name and its domain id/name
+ """
+ valid_user_domain = any(
+ [self.user_domain_id is not None,
+ self.user_domain_name is not None])
+ valid_project_domain = any(
+ [self.project_domain_id is not None,
+ self.project_domain_name is not None])
+ valid_user = any(
+ [self.user_id is not None,
+ self.username is not None and valid_user_domain])
+ valid_project = any(
+ [self.project_name is None and self.project_id is None,
+ self.project_id is not None,
+ self.project_name is not None and valid_project_domain])
+ return all([self.password is not None, valid_user, valid_project])
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index 8c4ec45..6aa98c4 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -42,6 +42,7 @@
def nova(self, action, flags='', params='', admin=True, fail_ok=False):
"""Executes nova command for the given action."""
+ flags += ' --endpoint-type %s' % CONF.compute.endpoint_type
return self.cmd_with_auth(
'nova', action, flags, params, admin, fail_ok)
@@ -58,42 +59,48 @@
def glance(self, action, flags='', params='', admin=True, fail_ok=False):
"""Executes glance command for the given action."""
+ flags += ' --os-endpoint-type %s' % CONF.image.endpoint_type
return self.cmd_with_auth(
'glance', action, flags, params, admin, fail_ok)
def ceilometer(self, action, flags='', params='', admin=True,
fail_ok=False):
"""Executes ceilometer command for the given action."""
+ flags += ' --os-endpoint-type %s' % CONF.telemetry.endpoint_type
return self.cmd_with_auth(
'ceilometer', action, flags, params, admin, fail_ok)
def heat(self, action, flags='', params='', admin=True,
fail_ok=False):
"""Executes heat command for the given action."""
+ flags += ' --os-endpoint-type %s' % CONF.orchestration.endpoint_type
return self.cmd_with_auth(
'heat', action, flags, params, admin, fail_ok)
def cinder(self, action, flags='', params='', admin=True, fail_ok=False):
"""Executes cinder command for the given action."""
+ flags += ' --endpoint-type %s' % CONF.volume.endpoint_type
return self.cmd_with_auth(
'cinder', action, flags, params, admin, fail_ok)
def neutron(self, action, flags='', params='', admin=True, fail_ok=False):
"""Executes neutron command for the given action."""
+ flags += ' --endpoint-type %s' % CONF.network.endpoint_type
return self.cmd_with_auth(
'neutron', action, flags, params, admin, fail_ok)
- def savanna(self, action, flags='', params='', admin=True, fail_ok=False):
- """Executes savanna command for the given action."""
+ def sahara(self, action, flags='', params='', admin=True, fail_ok=False):
+ """Executes sahara command for the given action."""
+ flags += ' --endpoint-type %s' % CONF.data_processing.endpoint_type
return self.cmd_with_auth(
- 'savanna', action, flags, params, admin, fail_ok)
+ 'sahara', action, flags, params, admin, fail_ok)
def cmd_with_auth(self, cmd, action, flags='', params='',
admin=True, fail_ok=False):
"""Executes given command with auth attributes appended."""
# TODO(jogo) make admin=False work
creds = ('--os-username %s --os-tenant-name %s --os-password %s '
- '--os-auth-url %s ' %
+ '--os-auth-url %s' %
(CONF.identity.admin_username,
CONF.identity.admin_tenant_name,
CONF.identity.admin_password,
@@ -107,25 +114,19 @@
cmd = ' '.join([os.path.join(CONF.cli.cli_dir, cmd),
flags, action, params])
LOG.info("running: '%s'" % cmd)
- cmd_str = cmd
cmd = shlex.split(cmd)
result = ''
result_err = ''
- try:
- stdout = subprocess.PIPE
- stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
- proc = subprocess.Popen(
- cmd, stdout=stdout, stderr=stderr)
- result, result_err = proc.communicate()
- if not fail_ok and proc.returncode != 0:
- raise CommandFailed(proc.returncode,
- cmd,
- result,
- stderr=result_err)
- finally:
- LOG.debug('output of %s:\n%s' % (cmd_str, result))
- if not merge_stderr and result_err:
- LOG.debug('error output of %s:\n%s' % (cmd_str, result_err))
+ stdout = subprocess.PIPE
+ stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
+ proc = subprocess.Popen(
+ cmd, stdout=stdout, stderr=stderr)
+ result, result_err = proc.communicate()
+ if not fail_ok and proc.returncode != 0:
+ raise CommandFailed(proc.returncode,
+ cmd,
+ result,
+ stderr=result_err)
return result
def assertTableStruct(self, items, field_names):
diff --git a/tempest/cli/output_parser.py b/tempest/cli/output_parser.py
index 4edcd47..80234a3 100644
--- a/tempest/cli/output_parser.py
+++ b/tempest/cli/output_parser.py
@@ -17,6 +17,7 @@
import re
+from tempest import exceptions
from tempest.openstack.common import log as logging
@@ -37,7 +38,7 @@
for table_ in tables_:
if 'Property' not in table_['headers'] \
or 'Value' not in table_['headers']:
- raise Exception('Invalid structure of table with details')
+ raise exceptions.InvalidStructure()
item = {}
for value in table_['values']:
item[value[0]] = value[1]
diff --git a/tempest/cli/simple_read_only/test_ceilometer.py b/tempest/cli/simple_read_only/test_ceilometer.py
index 0b6ae22..1d2822d 100644
--- a/tempest/cli/simple_read_only/test_ceilometer.py
+++ b/tempest/cli/simple_read_only/test_ceilometer.py
@@ -16,6 +16,7 @@
from tempest import cli
from tempest import config
from tempest.openstack.common import log as logging
+from tempest import test
CONF = config.CONF
@@ -41,6 +42,7 @@
def test_ceilometer_meter_list(self):
self.ceilometer('meter-list')
+ @test.attr(type='slow')
def test_ceilometer_resource_list(self):
self.ceilometer('resource-list')
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/test_cinder.py
index afbd732..723333b 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/test_cinder.py
@@ -16,6 +16,7 @@
import logging
import re
import subprocess
+import testtools
import tempest.cli
from tempest import config
@@ -86,6 +87,8 @@
def test_cinder_rate_limits(self):
self.cinder('rate-limits')
+ @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
+ 'Volume snapshot not available.')
def test_cinder_snapshot_list(self):
self.cinder('snapshot-list')
diff --git a/tempest/cli/simple_read_only/test_nova.py b/tempest/cli/simple_read_only/test_nova.py
index d0b6028..a3787ab 100644
--- a/tempest/cli/simple_read_only/test_nova.py
+++ b/tempest/cli/simple_read_only/test_nova.py
@@ -182,6 +182,10 @@
self.nova('agent-list')
self.nova('agent-list', flags='--debug')
+ def test_migration_list(self):
+ self.nova('migration-list')
+ self.nova('migration-list', flags='--debug')
+
# Optional arguments:
def test_admin_version(self):
diff --git a/tempest/cli/simple_read_only/test_sahara.py b/tempest/cli/simple_read_only/test_sahara.py
new file mode 100644
index 0000000..36cc324
--- /dev/null
+++ b/tempest/cli/simple_read_only/test_sahara.py
@@ -0,0 +1,140 @@
+# Copyright (c) 2013 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import subprocess
+
+from tempest import cli
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+LOG = logging.getLogger(__name__)
+
+
+class SimpleReadOnlySaharaClientTest(cli.ClientTestBase):
+ """Basic, read-only tests for Sahara CLI client.
+
+ Checks return values and output of read-only commands.
+ These tests do not presume any content, nor do they create
+ their own. They only verify the structure of output if present.
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ if not CONF.service_available.sahara:
+ msg = "Skipping all Sahara cli tests because it is not available"
+ raise cls.skipException(msg)
+ super(SimpleReadOnlySaharaClientTest, cls).setUpClass()
+
+ @test.attr(type='negative')
+ def test_sahara_fake_action(self):
+ self.assertRaises(subprocess.CalledProcessError,
+ self.sahara,
+ 'this-does-not-exist')
+
+ def test_sahara_plugins_list(self):
+ plugins = self.parser.listing(self.sahara('plugin-list'))
+ self.assertTableStruct(plugins, [
+ 'name',
+ 'versions',
+ 'title'
+ ])
+
+ def test_sahara_plugins_show(self):
+ result = self.sahara('plugin-show', params='--name vanilla')
+ plugin = self.parser.listing(result)
+ self.assertTableStruct(plugin, [
+ 'Property',
+ 'Value'
+ ])
+
+ def test_sahara_node_group_template_list(self):
+ result = self.sahara('node-group-template-list')
+ node_group_templates = self.parser.listing(result)
+ self.assertTableStruct(node_group_templates, [
+ 'name',
+ 'id',
+ 'plugin_name',
+ 'node_processes',
+ 'description'
+ ])
+
+ def test_sahara_cluster_template_list(self):
+ result = self.sahara('cluster-template-list')
+ cluster_templates = self.parser.listing(result)
+ self.assertTableStruct(cluster_templates, [
+ 'name',
+ 'id',
+ 'plugin_name',
+ 'node_groups',
+ 'description'
+ ])
+
+ def test_sahara_cluster_list(self):
+ result = self.sahara('cluster-list')
+ clusters = self.parser.listing(result)
+ self.assertTableStruct(clusters, [
+ 'name',
+ 'id',
+ 'status',
+ 'node_count'
+ ])
+
+ def test_sahara_data_source_list(self):
+ result = self.sahara('data-source-list')
+ data_sources = self.parser.listing(result)
+ self.assertTableStruct(data_sources, [
+ 'name',
+ 'id',
+ 'type',
+ 'description'
+ ])
+
+ def test_sahara_job_binary_data_list(self):
+ result = self.sahara('job-binary-data-list')
+ job_binary_data_list = self.parser.listing(result)
+ self.assertTableStruct(job_binary_data_list, [
+ 'id',
+ 'name'
+ ])
+
+ def test_sahara_job_binary_list(self):
+ result = self.sahara('job-binary-list')
+ job_binaries = self.parser.listing(result)
+ self.assertTableStruct(job_binaries, [
+ 'id',
+ 'name',
+ 'description'
+ ])
+
+ def test_sahara_job_template_list(self):
+ result = self.sahara('job-template-list')
+ job_templates = self.parser.listing(result)
+ self.assertTableStruct(job_templates, [
+ 'id',
+ 'name',
+ 'description'
+ ])
+
+ def test_sahara_job_list(self):
+ result = self.sahara('job-list')
+ jobs = self.parser.listing(result)
+ self.assertTableStruct(jobs, [
+ 'id',
+ 'cluster_id',
+ 'status'
+ ])
diff --git a/tempest/cli/simple_read_only/test_savanna.py b/tempest/cli/simple_read_only/test_savanna.py
deleted file mode 100644
index 1e30978..0000000
--- a/tempest/cli/simple_read_only/test_savanna.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (c) 2013 Mirantis Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import subprocess
-
-from tempest import cli
-from tempest import config
-from tempest import test
-
-CONF = config.CONF
-
-LOG = logging.getLogger(__name__)
-
-
-class SimpleReadOnlySavannaClientTest(cli.ClientTestBase):
- """Basic, read-only tests for Savanna CLI client.
-
- Checks return values and output of read-only commands.
- These tests do not presume any content, nor do they create
- their own. They only verify the structure of output if present.
- """
-
- @classmethod
- def setUpClass(cls):
- if not CONF.service_available.savanna:
- msg = "Skipping all Savanna cli tests because it is not available"
- raise cls.skipException(msg)
- super(SimpleReadOnlySavannaClientTest, cls).setUpClass()
-
- @test.attr(type='negative')
- def test_savanna_fake_action(self):
- self.assertRaises(subprocess.CalledProcessError,
- self.savanna,
- 'this-does-not-exist')
-
- def test_savanna_plugins_list(self):
- plugins = self.parser.listing(self.savanna('plugin-list'))
- self.assertTableStruct(plugins, ['name', 'versions', 'title'])
-
- def test_savanna_plugins_show(self):
- plugin = self.parser.listing(self.savanna('plugin-show',
- params='--name vanilla'))
- self.assertTableStruct(plugin, ['Property', 'Value'])
-
- def test_savanna_node_group_template_list(self):
- plugins = self.parser.listing(self.savanna('node-group-template-list'))
- self.assertTableStruct(plugins, ['name', 'id', 'plugin_name',
- 'node_processes', 'description'])
-
- def test_savanna_cluster_template_list(self):
- plugins = self.parser.listing(self.savanna('cluster-template-list'))
- self.assertTableStruct(plugins, ['name', 'id', 'plugin_name',
- 'node_groups', 'description'])
-
- def test_savanna_cluster_list(self):
- plugins = self.parser.listing(self.savanna('cluster-list'))
- self.assertTableStruct(plugins, ['name', 'id', 'status', 'node_count'])
diff --git a/tempest/clients.py b/tempest/clients.py
index 7ebd983..e50a0c3 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -13,16 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Default client libs
-import cinderclient.client
-import glanceclient
-import heatclient.client
import keystoneclient.exceptions
import keystoneclient.v2_0.client
-import neutronclient.v2_0.client
-import novaclient.client
-import swiftclient
+from tempest import auth
from tempest.common.rest_client import NegativeRestClient
from tempest import config
from tempest import exceptions
@@ -30,6 +24,8 @@
from tempest.openstack.common import log as logging
from tempest.services.baremetal.v1.client_json import BaremetalClientJSON
from tempest.services import botoclients
+from tempest.services.compute.json.agents_client import \
+ AgentsClientJSON
from tempest.services.compute.json.aggregates_client import \
AggregatesClientJSON
from tempest.services.compute.json.availability_zone_client import \
@@ -52,6 +48,8 @@
InterfacesClientJSON
from tempest.services.compute.json.keypairs_client import KeyPairsClientJSON
from tempest.services.compute.json.limits_client import LimitsClientJSON
+from tempest.services.compute.json.migrations_client import \
+ MigrationsClientJSON
from tempest.services.compute.json.quotas_client import QuotasClientJSON
from tempest.services.compute.json.security_groups_client import \
SecurityGroupsClientJSON
@@ -78,6 +76,8 @@
InterfacesV3ClientJSON
from tempest.services.compute.v3.json.keypairs_client import \
KeyPairsV3ClientJSON
+from tempest.services.compute.v3.json.migration_client import \
+ MigrationsV3ClientJSON
from tempest.services.compute.v3.json.quotas_client import \
QuotasV3ClientJSON
from tempest.services.compute.v3.json.servers_client import \
@@ -117,6 +117,8 @@
from tempest.services.data_processing.v1_1.client import DataProcessingClient
from tempest.services.database.json.flavors_client import \
DatabaseFlavorsClientJSON
+from tempest.services.database.json.versions_client import \
+ DatabaseVersionsClientJSON
from tempest.services.identity.json.identity_client import IdentityClientJSON
from tempest.services.identity.json.identity_client import TokenClientJSON
from tempest.services.identity.v3.json.credentials_client import \
@@ -162,6 +164,8 @@
VolumeHostsClientJSON
from tempest.services.volume.json.admin.volume_quotas_client import \
VolumeQuotasClientJSON
+from tempest.services.volume.json.admin.volume_services_client import \
+ VolumesServicesClientJSON
from tempest.services.volume.json.admin.volume_types_client import \
VolumeTypesClientJSON
from tempest.services.volume.json.backups_client import BackupsClientJSON
@@ -175,6 +179,8 @@
VolumeHostsClientXML
from tempest.services.volume.xml.admin.volume_quotas_client import \
VolumeQuotasClientXML
+from tempest.services.volume.xml.admin.volume_services_client import \
+ VolumesServicesClientXML
from tempest.services.volume.xml.admin.volume_types_client import \
VolumeTypesClientXML
from tempest.services.volume.xml.backups_client import BackupsClientXML
@@ -193,22 +199,12 @@
Top level manager for OpenStack tempest clients
"""
- def __init__(self, username=None, password=None, tenant_name=None,
- interface='json', service=None):
- """
- We allow overriding of the credentials used within the various
- client classes managed by the Manager object. Left as None, the
- standard username/password/tenant_name is used.
-
- :param username: Override of the username
- :param password: Override of the password
- :param tenant_name: Override of the tenant name
- """
+ def __init__(self, credentials=None, interface='json', service=None):
+ # Set interface and client type first
self.interface = interface
self.client_type = 'tempest'
# super cares for credentials validation
- super(Manager, self).__init__(
- username=username, password=password, tenant_name=tenant_name)
+ super(Manager, self).__init__(credentials=credentials)
if self.interface == 'xml':
self.certificates_client = CertificatesClientXML(
@@ -241,6 +237,8 @@
self.availability_zone_client = AvailabilityZoneClientXML(
self.auth_provider)
self.service_client = ServiceClientXML(self.auth_provider)
+ self.volume_services_client = VolumesServicesClientXML(
+ self.auth_provider)
self.aggregates_client = AggregatesClientXML(self.auth_provider)
self.services_client = ServicesClientXML(self.auth_provider)
self.tenant_usages_client = TenantUsagesClientXML(
@@ -316,6 +314,8 @@
self.services_v3_client = ServicesV3ClientJSON(
self.auth_provider)
self.service_client = ServiceClientJSON(self.auth_provider)
+ self.volume_services_client = VolumesServicesClientJSON(
+ self.auth_provider)
self.agents_v3_client = AgentsV3ClientJSON(self.auth_provider)
self.aggregates_v3_client = AggregatesV3ClientJSON(
self.auth_provider)
@@ -325,6 +325,8 @@
self.tenant_usages_client = TenantUsagesClientJSON(
self.auth_provider)
self.version_v3_client = VersionV3ClientJSON(self.auth_provider)
+ self.migrations_v3_client = MigrationsV3ClientJSON(
+ self.auth_provider)
self.policy_client = PolicyClientJSON(self.auth_provider)
self.hosts_client = HostsClientJSON(self.auth_provider)
self.hypervisor_v3_client = HypervisorV3ClientJSON(
@@ -345,6 +347,8 @@
self.hosts_v3_client = HostsV3ClientJSON(self.auth_provider)
self.database_flavors_client = DatabaseFlavorsClientJSON(
self.auth_provider)
+ self.database_versions_client = DatabaseVersionsClientJSON(
+ self.auth_provider)
self.queuing_client = QueuingClientJSON(self.auth_provider)
if CONF.service_available.ceilometer:
self.telemetry_client = TelemetryClientJSON(
@@ -359,13 +363,14 @@
raise exceptions.InvalidConfiguration(msg)
# TODO(andreaf) EC2 client still do their auth, v2 only
- ec2_client_args = (self.credentials.get('username'),
- self.credentials.get('password'),
+ ec2_client_args = (self.credentials.username,
+ self.credentials.password,
CONF.identity.uri,
- self.credentials.get('tenant_name'))
+ self.credentials.tenant_name)
# common clients
self.account_client = AccountClient(self.auth_provider)
+ self.agents_client = AgentsClientJSON(self.auth_provider)
if CONF.service_available.glance:
self.image_client = ImageClientJSON(self.auth_provider)
self.image_client_v2 = ImageClientV2JSON(self.auth_provider)
@@ -381,6 +386,7 @@
AccountClientCustomizedHeader(self.auth_provider)
self.data_processing_client = DataProcessingClient(
self.auth_provider)
+ self.migrations_client = MigrationsClientJSON(self.auth_provider)
class AltManager(Manager):
@@ -391,11 +397,10 @@
"""
def __init__(self, interface='json', service=None):
- super(AltManager, self).__init__(CONF.identity.alt_username,
- CONF.identity.alt_password,
- CONF.identity.alt_tenant_name,
- interface=interface,
- service=service)
+ super(AltManager, self).__init__(
+ credentials=auth.get_default_credentials('alt_user'),
+ interface=interface,
+ service=service)
class AdminManager(Manager):
@@ -406,11 +411,10 @@
"""
def __init__(self, interface='json', service=None):
- super(AdminManager, self).__init__(CONF.identity.admin_username,
- CONF.identity.admin_password,
- CONF.identity.admin_tenant_name,
- interface=interface,
- service=service)
+ super(AdminManager, self).__init__(
+ credentials=auth.get_default_credentials('identity_admin'),
+ interface=interface,
+ service=service)
class ComputeAdminManager(Manager):
@@ -422,29 +426,10 @@
def __init__(self, interface='json', service=None):
base = super(ComputeAdminManager, self)
- base.__init__(CONF.compute_admin.username,
- CONF.compute_admin.password,
- CONF.compute_admin.tenant_name,
- interface=interface,
- service=service)
-
-
-class OrchestrationManager(Manager):
- """
- Manager object that uses the admin credentials for its
- so that heat templates can create users
- """
- def __init__(self, interface='json', service=None):
- base = super(OrchestrationManager, self)
- # heat currently needs an admin user so that stacks can create users
- # however the tests need the demo tenant so that the neutron
- # private network is the default. DO NOT change this auth combination
- # until heat can run with the demo user.
- base.__init__(CONF.identity.admin_username,
- CONF.identity.admin_password,
- CONF.identity.tenant_name,
- interface=interface,
- service=service)
+ base.__init__(
+ credentials=auth.get_default_credentials('compute_admin'),
+ interface=interface,
+ service=service)
class OfficialClientManager(manager.Manager):
@@ -456,46 +441,57 @@
NOVACLIENT_VERSION = '2'
CINDERCLIENT_VERSION = '1'
HEATCLIENT_VERSION = '1'
+ IRONICCLIENT_VERSION = '1'
+ SAHARACLIENT_VERSION = '1.1'
- def __init__(self, username, password, tenant_name):
+ def __init__(self, credentials):
# FIXME(andreaf) Auth provider for client_type 'official' is
# not implemented yet, setting to 'tempest' for now.
self.client_type = 'tempest'
self.interface = None
# super cares for credentials validation
- super(OfficialClientManager, self).__init__(
- username=username, password=password, tenant_name=tenant_name)
- self.compute_client = self._get_compute_client(username,
- password,
- tenant_name)
- self.identity_client = self._get_identity_client(username,
- password,
- tenant_name)
+ super(OfficialClientManager, self).__init__(credentials=credentials)
+ self.baremetal_client = self._get_baremetal_client()
+ self.compute_client = self._get_compute_client(credentials)
+ self.identity_client = self._get_identity_client(credentials)
self.image_client = self._get_image_client()
self.network_client = self._get_network_client()
- self.volume_client = self._get_volume_client(username,
- password,
- tenant_name)
+ self.volume_client = self._get_volume_client(credentials)
self.object_storage_client = self._get_object_storage_client(
- username,
- password,
- tenant_name)
+ credentials)
self.orchestration_client = self._get_orchestration_client(
- username,
- password,
- tenant_name)
+ credentials)
+ self.data_processing_client = self._get_data_processing_client(
+ credentials)
- def _get_compute_client(self, username, password, tenant_name):
+ def _get_roles(self):
+ admin_credentials = auth.get_default_credentials('identity_admin')
+ keystone_admin = self._get_identity_client(admin_credentials)
+
+ username = self.credentials.username
+ tenant_name = self.credentials.tenant_name
+ user_id = keystone_admin.users.find(name=username).id
+ tenant_id = keystone_admin.tenants.find(name=tenant_name).id
+
+ roles = keystone_admin.roles.roles_for_user(
+ user=user_id, tenant=tenant_id)
+
+ return [r.name for r in roles]
+
+ def _get_compute_client(self, credentials):
# Novaclient will not execute operations for anyone but the
# identified user, so a new client needs to be created for
# each user that operations need to be performed for.
- self._validate_credentials(username, password, tenant_name)
+ if not CONF.service_available.nova:
+ return None
+ import novaclient.client
auth_url = CONF.identity.uri
dscv = CONF.identity.disable_ssl_certificate_validation
region = CONF.identity.region
- client_args = (username, password, tenant_name, auth_url)
+ client_args = (credentials.username, credentials.password,
+ credentials.tenant_name, auth_url)
# Create our default Nova client to use in testing
service_type = CONF.compute.catalog_type
@@ -510,6 +506,9 @@
http_log_debug=True)
def _get_image_client(self):
+ if not CONF.service_available.glance:
+ return None
+ import glanceclient
token = self.identity_client.auth_token
region = CONF.identity.region
endpoint_type = CONF.image.endpoint_type
@@ -520,26 +519,32 @@
return glanceclient.Client('1', endpoint=endpoint, token=token,
insecure=dscv)
- def _get_volume_client(self, username, password, tenant_name):
+ def _get_volume_client(self, credentials):
+ if not CONF.service_available.cinder:
+ return None
+ import cinderclient.client
auth_url = CONF.identity.uri
region = CONF.identity.region
endpoint_type = CONF.volume.endpoint_type
+ dscv = CONF.identity.disable_ssl_certificate_validation
return cinderclient.client.Client(self.CINDERCLIENT_VERSION,
- username,
- password,
- tenant_name,
+ credentials.username,
+ credentials.password,
+ credentials.tenant_name,
auth_url,
region_name=region,
endpoint_type=endpoint_type,
+ insecure=dscv,
http_log_debug=True)
- def _get_object_storage_client(self, username, password, tenant_name):
+ def _get_object_storage_client(self, credentials):
+ if not CONF.service_available.swift:
+ return None
+ import swiftclient
auth_url = CONF.identity.uri
# add current tenant to swift operator role group.
- keystone_admin = self._get_identity_client(
- CONF.identity.admin_username,
- CONF.identity.admin_password,
- CONF.identity.admin_tenant_name)
+ admin_credentials = auth.get_default_credentials('identity_admin')
+ keystone_admin = self._get_identity_client(admin_credentials)
# enable test user to operate swift by adding operator role to him.
roles = keystone_admin.roles.list()
@@ -556,23 +561,18 @@
endpoint_type = CONF.object_storage.endpoint_type
os_options = {'endpoint_type': endpoint_type}
- return swiftclient.Connection(auth_url, username, password,
- tenant_name=tenant_name,
+ return swiftclient.Connection(auth_url, credentials.username,
+ credentials.password,
+ tenant_name=credentials.tenant_name,
auth_version='2',
os_options=os_options)
- def _get_orchestration_client(self, username=None, password=None,
- tenant_name=None):
- if not username:
- username = CONF.identity.admin_username
- if not password:
- password = CONF.identity.admin_password
- if not tenant_name:
- tenant_name = CONF.identity.tenant_name
+ def _get_orchestration_client(self, credentials):
+ if not CONF.service_available.heat:
+ return None
+ import heatclient.client
- self._validate_credentials(username, password, tenant_name)
-
- keystone = self._get_identity_client(username, password, tenant_name)
+ keystone = self._get_identity_client(credentials)
region = CONF.identity.region
endpoint_type = CONF.orchestration.endpoint_type
token = keystone.auth_token
@@ -589,22 +589,53 @@
return heatclient.client.Client(self.HEATCLIENT_VERSION,
endpoint,
token=token,
- username=username,
- password=password)
+ username=credentials.username,
+ password=credentials.password)
- def _get_identity_client(self, username, password, tenant_name):
+ def _get_identity_client(self, credentials):
# This identity client is not intended to check the security
# of the identity service, so use admin credentials by default.
- self._validate_credentials(username, password, tenant_name)
auth_url = CONF.identity.uri
dscv = CONF.identity.disable_ssl_certificate_validation
- return keystoneclient.v2_0.client.Client(username=username,
- password=password,
- tenant_name=tenant_name,
- auth_url=auth_url,
- insecure=dscv)
+ return keystoneclient.v2_0.client.Client(
+ username=credentials.username,
+ password=credentials.password,
+ tenant_name=credentials.tenant_name,
+ auth_url=auth_url,
+ insecure=dscv)
+
+ def _get_baremetal_client(self):
+ # ironic client is currently intended to by used by admin users
+ if not CONF.service_available.ironic:
+ return None
+ import ironicclient.client
+ roles = self._get_roles()
+ if CONF.identity.admin_role not in roles:
+ return None
+
+ auth_url = CONF.identity.uri
+ api_version = self.IRONICCLIENT_VERSION
+ insecure = CONF.identity.disable_ssl_certificate_validation
+ service_type = CONF.baremetal.catalog_type
+ endpoint_type = CONF.baremetal.endpoint_type
+ creds = {
+ 'os_username': self.credentials.username,
+ 'os_password': self.credentials.password,
+ 'os_tenant_name': self.credentials.tenant_name
+ }
+
+ try:
+ return ironicclient.client.get_client(
+ api_version=api_version,
+ os_auth_url=auth_url,
+ insecure=insecure,
+ os_service_type=service_type,
+ os_endpoint_type=endpoint_type,
+ **creds)
+ except keystoneclient.exceptions.EndpointNotFound:
+ return None
def _get_network_client(self):
# The intended configuration is for the network client to have
@@ -613,19 +644,42 @@
# preferable to authenticating as a specific user because
# working with certain resources (public routers and networks)
# often requires admin privileges anyway.
- username = CONF.identity.admin_username
- password = CONF.identity.admin_password
- tenant_name = CONF.identity.admin_tenant_name
+ if not CONF.service_available.neutron:
+ return None
+ import neutronclient.v2_0.client
- self._validate_credentials(username, password, tenant_name)
+ credentials = auth.get_default_credentials('identity_admin')
auth_url = CONF.identity.uri
dscv = CONF.identity.disable_ssl_certificate_validation
endpoint_type = CONF.network.endpoint_type
- return neutronclient.v2_0.client.Client(username=username,
- password=password,
- tenant_name=tenant_name,
- endpoint_type=endpoint_type,
- auth_url=auth_url,
- insecure=dscv)
+ return neutronclient.v2_0.client.Client(
+ username=credentials.username,
+ password=credentials.password,
+ tenant_name=credentials.tenant_name,
+ endpoint_type=endpoint_type,
+ auth_url=auth_url,
+ insecure=dscv)
+
+ def _get_data_processing_client(self, credentials):
+ if not CONF.service_available.sahara:
+ # Sahara isn't available
+ return None
+
+ import saharaclient.client
+
+ endpoint_type = CONF.data_processing.endpoint_type
+ catalog_type = CONF.data_processing.catalog_type
+ auth_url = CONF.identity.uri
+
+ client = saharaclient.client.Client(
+ self.SAHARACLIENT_VERSION,
+ credentials.username,
+ credentials.password,
+ project_name=credentials.tenant_name,
+ endpoint_type=endpoint_type,
+ service_type=catalog_type,
+ auth_url=auth_url)
+
+ return client
diff --git a/tempest/api/compute/api_schema/__init__.py b/tempest/cmd/__init__.py
similarity index 100%
copy from tempest/api/compute/api_schema/__init__.py
copy to tempest/cmd/__init__.py
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
new file mode 100755
index 0000000..7b2e60b
--- /dev/null
+++ b/tempest/cmd/verify_tempest_config.py
@@ -0,0 +1,351 @@
+#!/usr/bin/env python
+
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import json
+import os
+import sys
+import urlparse
+
+import httplib2
+from six.moves import configparser
+
+from tempest import clients
+from tempest import config
+
+
+CONF = config.CONF
+RAW_HTTP = httplib2.Http()
+CONF_FILE = None
+OUTFILE = sys.stdout
+
+
+def _get_config_file():
+ default_config_dir = os.path.join(os.path.abspath(
+ os.path.dirname(os.path.dirname(__file__))), "etc")
+ default_config_file = "tempest.conf"
+
+ conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', default_config_dir)
+ conf_file = os.environ.get('TEMPEST_CONFIG', default_config_file)
+ path = os.path.join(conf_dir, conf_file)
+ fd = open(path, 'rw')
+ return fd
+
+
+def change_option(option, group, value):
+ config_parse = configparser.SafeConfigParser()
+ config_parse.optionxform = str
+ config_parse.readfp(CONF_FILE)
+ if not config_parse.has_section(group):
+ config_parse.add_section(group)
+ config_parse.set(group, option, str(value))
+ global OUTFILE
+ config_parse.write(OUTFILE)
+
+
+def print_and_or_update(option, group, value, update):
+ print('Config option %s in group %s should be changed to: %s'
+ % (option, group, value))
+ if update:
+ change_option(option, group, value)
+
+
+def verify_glance_api_versions(os, update):
+ # Check glance api versions
+ __, versions = os.image_client.get_versions()
+ if CONF.image_feature_enabled.api_v1 != ('v1.1' in versions or 'v1.0' in
+ versions):
+ print_and_or_update('api_v1', 'image_feature_enabled',
+ not CONF.image_feature_enabled.api_v1, update)
+ if CONF.image_feature_enabled.api_v2 != ('v2.0' in versions):
+ print_and_or_update('api_v2', 'image_feature_enabled',
+ not CONF.image_feature_enabled.api_v2, update)
+
+
+def _get_unversioned_endpoint(base_url):
+ endpoint_parts = urlparse.urlparse(base_url)
+ endpoint = endpoint_parts.scheme + '://' + endpoint_parts.netloc
+ return endpoint
+
+
+def _get_api_versions(os, service):
+ client_dict = {
+ 'nova': os.servers_client,
+ 'keystone': os.identity_client,
+ 'cinder': os.volumes_client,
+ }
+ client_dict[service].skip_path()
+ endpoint = _get_unversioned_endpoint(client_dict[service].base_url)
+ __, body = RAW_HTTP.request(endpoint, 'GET')
+ client_dict[service].reset_path()
+ body = json.loads(body)
+ if service == 'keystone':
+ versions = map(lambda x: x['id'], body['versions']['values'])
+ else:
+ versions = map(lambda x: x['id'], body['versions'])
+ return versions
+
+
+def verify_keystone_api_versions(os, update):
+ # Check keystone api versions
+ versions = _get_api_versions(os, 'keystone')
+ if CONF.identity_feature_enabled.api_v2 != ('v2.0' in versions):
+ print_and_or_update('api_v2', 'identity_feature_enabled',
+ not CONF.identity_feature_enabled.api_v2, update)
+ if CONF.identity_feature_enabled.api_v3 != ('v3.0' in versions):
+ print_and_or_update('api_v3', 'identity_feature_enabled',
+ not CONF.identity_feature_enabled.api_v3, update)
+
+
+def verify_nova_api_versions(os, update):
+ versions = _get_api_versions(os, 'nova')
+ if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
+ print_and_or_update('api_v3', 'compute_feature_enabled',
+ not CONF.compute_feature_enabled.api_v3, update)
+
+
+def verify_cinder_api_versions(os, update):
+ # Check cinder api versions
+ versions = _get_api_versions(os, 'cinder')
+ if CONF.volume_feature_enabled.api_v1 != ('v1.0' in versions):
+ print_and_or_update('api_v1', 'volume_feature_enabled',
+ not CONF.volume_feature_enabled.api_v1, update)
+ if CONF.volume_feature_enabled.api_v2 != ('v2.0' in versions):
+ print_and_or_update('api_v2', 'volume_feature_enabled',
+ not CONF.volume_feature_enabled.api_v2, update)
+
+
+def get_extension_client(os, service):
+ extensions_client = {
+ 'nova': os.extensions_client,
+ 'nova_v3': os.extensions_v3_client,
+ 'cinder': os.volumes_extension_client,
+ 'neutron': os.network_client,
+ 'swift': os.account_client,
+ }
+ if service not in extensions_client:
+ print('No tempest extensions client for %s' % service)
+ exit(1)
+ return extensions_client[service]
+
+
+def get_enabled_extensions(service):
+ extensions_options = {
+ 'nova': CONF.compute_feature_enabled.api_extensions,
+ 'nova_v3': CONF.compute_feature_enabled.api_v3_extensions,
+ 'cinder': CONF.volume_feature_enabled.api_extensions,
+ 'neutron': CONF.network_feature_enabled.api_extensions,
+ 'swift': CONF.object_storage_feature_enabled.discoverable_apis,
+ }
+ if service not in extensions_options:
+ print('No supported extensions list option for %s' % service)
+ exit(1)
+ return extensions_options[service]
+
+
+def verify_extensions(os, service, results):
+ extensions_client = get_extension_client(os, service)
+ __, resp = extensions_client.list_extensions()
+ if isinstance(resp, dict):
+ # Neutron's extension 'name' field has is not a single word (it has
+ # spaces in the string) Since that can't be used for list option the
+ # api_extension option in the network-feature-enabled group uses alias
+ # instead of name.
+ if service == 'neutron':
+ extensions = map(lambda x: x['alias'], resp['extensions'])
+ elif service == 'swift':
+ # Remove Swift general information from extensions list
+ resp.pop('swift')
+ extensions = resp.keys()
+ else:
+ extensions = map(lambda x: x['name'], resp['extensions'])
+
+ else:
+ extensions = map(lambda x: x['name'], resp)
+ if not results.get(service):
+ results[service] = {}
+ extensions_opt = get_enabled_extensions(service)
+ if extensions_opt[0] == 'all':
+ results[service]['extensions'] = extensions
+ return results
+ # Verify that all configured extensions are actually enabled
+ for extension in extensions_opt:
+ results[service][extension] = extension in extensions
+ # Verify that there aren't additional extensions enabled that aren't
+ # specified in the config list
+ for extension in extensions:
+ if extension not in extensions_opt:
+ results[service][extension] = False
+ return results
+
+
+def display_results(results, update, replace):
+ update_dict = {
+ 'swift': 'object-storage-feature-enabled',
+ 'nova': 'compute-feature-enabled',
+ 'nova_v3': 'compute-feature-enabled',
+ 'cinder': 'volume-feature-enabled',
+ 'neutron': 'network-feature-enabled',
+ }
+ for service in results:
+ # If all extensions are specified as being enabled there is no way to
+ # verify this so we just assume this to be true
+ if results[service].get('extensions'):
+ if replace:
+ output_list = results[service].get('extensions')
+ else:
+ output_list = ['all']
+ else:
+ extension_list = get_enabled_extensions(service)
+ output_list = []
+ for extension in results[service]:
+ if not results[service][extension]:
+ if extension in extension_list:
+ print("%s extension: %s should not be included in the "
+ "list of enabled extensions" % (service,
+ extension))
+ else:
+ print("%s extension: %s should be included in the list"
+ " of enabled extensions" % (service, extension))
+ output_list.append(extension)
+ else:
+ output_list.append(extension)
+ if update:
+ # Sort List
+ output_list.sort()
+ # Convert list to a string
+ output_string = ', '.join(output_list)
+ if service == 'swift':
+ change_option('discoverable_apis', update_dict[service],
+ output_string)
+ elif service == 'nova_v3':
+ change_option('api_v3_extensions', update_dict[service],
+ output_string)
+ else:
+ change_option('api_extensions', update_dict[service],
+ output_string)
+
+
+def check_service_availability(os, update):
+ services = []
+ avail_services = []
+ codename_match = {
+ 'volume': 'cinder',
+ 'network': 'neutron',
+ 'image': 'glance',
+ 'object_storage': 'swift',
+ 'compute': 'nova',
+ 'orchestration': 'heat',
+ 'metering': 'ceilometer',
+ 'telemetry': 'ceilometer',
+ 'data_processing': 'sahara',
+ 'baremetal': 'ironic',
+ 'identity': 'keystone',
+ 'queuing': 'marconi',
+ 'database': 'trove'
+ }
+ # Get catalog list for endpoints to use for validation
+ __, endpoints = os.endpoints_client.list_endpoints()
+ for endpoint in endpoints:
+ __, service = os.service_client.get_service(endpoint['service_id'])
+ services.append(service['type'])
+ # Pull all catalog types from config file and compare against endpoint list
+ for cfgname in dir(CONF._config):
+ cfg = getattr(CONF, cfgname)
+ catalog_type = getattr(cfg, 'catalog_type', None)
+ if not catalog_type:
+ continue
+ else:
+ if cfgname == 'identity':
+ # Keystone is a required service for tempest
+ continue
+ if catalog_type not in services:
+ if getattr(CONF.service_available, codename_match[cfgname]):
+ print('Endpoint type %s not found either disable service '
+ '%s or fix the catalog_type in the config file' % (
+ catalog_type, codename_match[cfgname]))
+ if update:
+ change_option(codename_match[cfgname],
+ 'service_available', False)
+ else:
+ if not getattr(CONF.service_available,
+ codename_match[cfgname]):
+ print('Endpoint type %s is available, service %s should be'
+ ' set as available in the config file.' % (
+ catalog_type, codename_match[cfgname]))
+ if update:
+ change_option(codename_match[cfgname],
+ 'service_available', True)
+ else:
+ avail_services.append(codename_match[cfgname])
+ return avail_services
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-u', '--update', action='store_true',
+ help='Update the config file with results from api '
+ 'queries. This assumes whatever is set in the '
+ 'config file is incorrect. In the case of '
+ 'endpoint checks where it could either be the '
+ 'incorrect catalog type or the service available '
+ 'option the service available option is assumed '
+ 'to be incorrect and is thus changed')
+ parser.add_argument('-o', '--output',
+ help="Output file to write an updated config file to. "
+ "This has to be a separate file from the "
+ "original config file. If one isn't specified "
+ "with -u the new config file will be printed to "
+ "STDOUT")
+ parser.add_argument('-r', '--replace-ext', action='store_true',
+ help="If specified the all option will be replaced "
+ "with a full list of extensions")
+ args = parser.parse_args()
+ return args
+
+
+def main():
+ print('Running config verification...')
+ opts = parse_args()
+ update = opts.update
+ replace = opts.replace_ext
+ global CONF_FILE
+ global OUTFILE
+ if update:
+ CONF_FILE = _get_config_file()
+ if opts.output:
+ OUTFILE = open(opts.output, 'w+')
+ os = clients.ComputeAdminManager(interface='json')
+ services = check_service_availability(os, update)
+ results = {}
+ for service in ['nova', 'nova_v3', 'cinder', 'neutron', 'swift']:
+ if service == 'nova_v3' and 'nova' not in services:
+ continue
+ elif service not in services:
+ continue
+ results = verify_extensions(os, service, results)
+ verify_keystone_api_versions(os, update)
+ verify_glance_api_versions(os, update)
+ verify_nova_api_versions(os, update)
+ verify_cinder_api_versions(os, update)
+ display_results(results, update, replace)
+ if CONF_FILE:
+ CONF_FILE.close()
+ OUTFILE.close()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tempest/common/commands.py b/tempest/common/commands.py
index 6405eaa..c31a038 100644
--- a/tempest/common/commands.py
+++ b/tempest/common/commands.py
@@ -73,3 +73,7 @@
def iptables_ns(ns, table):
return ip_ns_exec(ns, "iptables -v -S -t " + table)
+
+
+def ovs_db_dump():
+ return sudo_cmd_call("ovsdb-client dump")
diff --git a/tempest/common/debug.py b/tempest/common/debug.py
index 8325d4d..228be7a 100644
--- a/tempest/common/debug.py
+++ b/tempest/common/debug.py
@@ -20,7 +20,7 @@
CONF = config.CONF
LOG = logging.getLogger(__name__)
-tables = ['filter', 'nat', 'mangle']
+TABLES = ['filter', 'nat', 'mangle']
def log_ip_ns():
@@ -28,13 +28,25 @@
return
LOG.info("Host Addr:\n" + commands.ip_addr_raw())
LOG.info("Host Route:\n" + commands.ip_route_raw())
- for table in ['filter', 'nat', 'mangle']:
+ for table in TABLES:
LOG.info('Host %s table:\n%s', table, commands.iptables_raw(table))
ns_list = commands.ip_ns_list()
LOG.info("Host ns list" + str(ns_list))
for ns in ns_list:
LOG.info("ns(%s) Addr:\n%s", ns, commands.ip_ns_addr(ns))
LOG.info("ns(%s) Route:\n%s", ns, commands.ip_ns_route(ns))
- for table in ['filter', 'nat', 'mangle']:
+ for table in TABLES:
LOG.info('ns(%s) table(%s):\n%s', ns, table,
commands.iptables_ns(ns, table))
+
+
+def log_ovs_db():
+ if not CONF.debug.enable or not CONF.service_available.neutron:
+ return
+ db_dump = commands.ovs_db_dump()
+ LOG.info("OVS DB:\n" + db_dump)
+
+
+def log_net_debug():
+ log_ip_ns()
+ log_ovs_db()
diff --git a/tempest/common/generator/base_generator.py b/tempest/common/generator/base_generator.py
index 7e7a2d6..57b98f7 100644
--- a/tempest/common/generator/base_generator.py
+++ b/tempest/common/generator/base_generator.py
@@ -62,7 +62,7 @@
"admin_client": {"type": "boolean"},
"url": {"type": "string"},
"default_result_code": {"type": "integer"},
- "json-schema": jsonschema._utils.load_schema("draft4"),
+ "json-schema": {},
"resources": {
"type": "array",
"items": {
@@ -87,12 +87,6 @@
"additionalProperties": False,
}
- def __new__(cls, *args, **kwargs):
- if not cls._instance:
- cls._instance = super(BasicGeneratorSet, cls).__new__(cls, *args,
- **kwargs)
- return cls._instance
-
def __init__(self):
self.types_dict = {}
for m in dir(self):
@@ -105,6 +99,8 @@
self.types_dict[type].append(method)
def validate_schema(self, schema):
+ if "json-schema" in schema:
+ jsonschema.Draft4Validator.check_schema(schema['json-schema'])
jsonschema.validate(schema, self.schema)
def generate(self, schema):
@@ -127,7 +123,7 @@
raise Exception("non-integer list types not supported")
result = []
if schema_type not in self.types_dict:
- raise Exception("generator (%s) doesn't support type: %s"
+ raise TypeError("generator (%s) doesn't support type: %s"
% (self.__class__.__name__, schema_type))
for generator in self.types_dict[schema_type]:
ret = generator(schema)
diff --git a/tempest/common/glance_http.py b/tempest/common/glance_http.py
index b4ba933..9358851 100644
--- a/tempest/common/glance_http.py
+++ b/tempest/common/glance_http.py
@@ -19,6 +19,7 @@
import hashlib
import httplib
import json
+import OpenSSL
import posixpath
import re
from six import moves
@@ -27,14 +28,6 @@
import struct
import urlparse
-
-# Python 2.5 compat fix
-if not hasattr(urlparse, 'parse_qsl'):
- import cgi
- urlparse.parse_qsl = cgi.parse_qsl
-
-import OpenSSL
-
from tempest import exceptions as exc
from tempest.openstack.common import log as logging
@@ -50,7 +43,7 @@
self.auth_provider = auth_provider
self.filters = filters
self.endpoint = auth_provider.base_url(filters)
- endpoint_parts = self.parse_endpoint(self.endpoint)
+ endpoint_parts = urlparse.urlparse(self.endpoint)
self.endpoint_scheme = endpoint_parts.scheme
self.endpoint_hostname = endpoint_parts.hostname
self.endpoint_port = endpoint_parts.port
@@ -61,10 +54,6 @@
self.endpoint_scheme, **kwargs)
@staticmethod
- def parse_endpoint(endpoint):
- return urlparse.urlparse(endpoint)
-
- @staticmethod
def get_connection_class(scheme):
if scheme == 'https':
return VerifiedHTTPSConnection
@@ -107,7 +96,7 @@
conn = self.get_connection()
try:
- url_parts = self.parse_endpoint(url)
+ url_parts = urlparse.urlparse(url)
conn_url = posixpath.normpath(url_parts.path)
LOG.debug('Actual Path: {path}'.format(path=conn_url))
if kwargs['headers'].get('Transfer-Encoding') == 'chunked':
@@ -134,7 +123,6 @@
raise exc.TimeoutException(message)
body_iter = ResponseBodyIterator(resp)
-
# Read body into string if it isn't obviously image data
if resp.getheader('content-type', None) != 'application/octet-stream':
body_str = ''.join([body_chunk for body_chunk in body_iter])
@@ -178,7 +166,7 @@
resp, body_iter = self._http_request(url, method, **kwargs)
- if 'application/json' in resp.getheader('content-type', None):
+ if 'application/json' in resp.getheader('content-type', ''):
body = ''.join([chunk for chunk in body_iter])
try:
body = json.loads(body)
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index c54a8e8..b4618ed 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -14,9 +14,7 @@
import netaddr
-import keystoneclient.v2_0.client as keystoneclient
-import neutronclient.v2_0.client as neutronclient
-
+from tempest import auth
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
@@ -42,24 +40,6 @@
self.identity_admin_client, self.network_admin_client = (
self._get_admin_clients())
- def _get_official_admin_clients(self):
- username = CONF.identity.admin_username
- password = CONF.identity.admin_password
- tenant_name = CONF.identity.admin_tenant_name
- auth_url = CONF.identity.uri
- dscv = CONF.identity.disable_ssl_certificate_validation
- identity_client = keystoneclient.Client(username=username,
- password=password,
- tenant_name=tenant_name,
- auth_url=auth_url,
- insecure=dscv)
- network_client = neutronclient.Client(username=username,
- password=password,
- tenant_name=tenant_name,
- auth_url=auth_url,
- insecure=dscv)
- return identity_client, network_client
-
def _get_admin_clients(self):
"""
Returns a tuple with instances of the following admin clients (in this
@@ -69,11 +49,11 @@
"""
if self.tempest_client:
os = clients.AdminManager(interface=self.interface)
- admin_clients = (os.identity_client,
- os.network_client,)
else:
- admin_clients = self._get_official_admin_clients()
- return admin_clients
+ os = clients.OfficialClientManager(
+ auth.get_default_credentials('identity_admin')
+ )
+ return os.identity_client, os.network_client
def _create_tenant(self, name, description):
if self.tempest_client:
@@ -185,22 +165,19 @@
self._assign_user_role(tenant['id'], user['id'], role['id'])
else:
self._assign_user_role(tenant.id, user.id, role.id)
- return user, tenant
+ return self._get_credentials(user, tenant)
- def _get_cred_names(self, user, tenant):
+ def _get_credentials(self, user, tenant):
if self.tempest_client:
- username = user.get('name')
- tenant_name = tenant.get('name')
+ user_get = user.get
+ tenant_get = tenant.get
else:
- username = user.name
- tenant_name = tenant.name
- return username, tenant_name
-
- def _get_tenant_id(self, tenant):
- if self.tempest_client:
- return tenant.get('id')
- else:
- return tenant.id
+ user_get = user.__dict__.get
+ tenant_get = tenant.__dict__.get
+ return auth.get_credentials(
+ username=user_get('name'), user_id=user_get('id'),
+ tenant_name=tenant_get('name'), tenant_id=tenant_get('id'),
+ password=self.password)
def _create_network_resources(self, tenant_id):
network = None
@@ -314,24 +291,6 @@
body = {'subnet_id': subnet_id}
self.network_admin_client.add_interface_router(router_id, body)
- def get_primary_tenant(self):
- return self.isolated_creds.get('primary')[1]
-
- def get_primary_user(self):
- return self.isolated_creds.get('primary')[0]
-
- def get_alt_tenant(self):
- return self.isolated_creds.get('alt')[1]
-
- def get_alt_user(self):
- return self.isolated_creds.get('alt')[0]
-
- def get_admin_tenant(self):
- return self.isolated_creds.get('admin')[1]
-
- def get_admin_user(self):
- return self.isolated_creds.get('admin')[0]
-
def get_primary_network(self):
return self.isolated_net_resources.get('primary')[0]
@@ -359,62 +318,33 @@
def get_alt_router(self):
return self.isolated_net_resources.get('alt')[2]
- def get_primary_creds(self):
- if self.isolated_creds.get('primary'):
- user, tenant = self.isolated_creds['primary']
- username, tenant_name = self._get_cred_names(user, tenant)
+ def get_credentials(self, credential_type):
+ if self.isolated_creds.get(credential_type):
+ credentials = self.isolated_creds[credential_type]
else:
- user, tenant = self._create_creds()
- username, tenant_name = self._get_cred_names(user, tenant)
- self.isolated_creds['primary'] = (user, tenant)
- LOG.info("Acquired isolated creds:\n user: %s, tenant: %s"
- % (username, tenant_name))
+ is_admin = (credential_type == 'admin')
+ credentials = self._create_creds(admin=is_admin)
+ self.isolated_creds[credential_type] = credentials
+ # Maintained until tests are ported
+ LOG.info("Acquired isolated creds:\n credentials: %s"
+ % credentials)
if CONF.service_available.neutron:
network, subnet, router = self._create_network_resources(
- self._get_tenant_id(tenant))
- self.isolated_net_resources['primary'] = (
+ credentials.tenant_id)
+ self.isolated_net_resources[credential_type] = (
network, subnet, router,)
LOG.info("Created isolated network resources for : \n"
- + " user: %s, tenant: %s" % (username, tenant_name))
- return username, tenant_name, self.password
+ + " credentials: %s" % credentials)
+ return credentials
+
+ def get_primary_creds(self):
+ return self.get_credentials('primary')
def get_admin_creds(self):
- if self.isolated_creds.get('admin'):
- user, tenant = self.isolated_creds['admin']
- username, tenant_name = self._get_cred_names(user, tenant)
- else:
- user, tenant = self._create_creds(admin=True)
- username, tenant_name = self._get_cred_names(user, tenant)
- self.isolated_creds['admin'] = (user, tenant)
- LOG.info("Acquired admin isolated creds:\n user: %s, tenant: %s"
- % (username, tenant_name))
- if CONF.service_available.neutron:
- network, subnet, router = self._create_network_resources(
- self._get_tenant_id(tenant))
- self.isolated_net_resources['admin'] = (
- network, subnet, router,)
- LOG.info("Created isolated network resources for : \n"
- + " user: %s, tenant: %s" % (username, tenant_name))
- return username, tenant_name, self.password
+ return self.get_credentials('admin')
def get_alt_creds(self):
- if self.isolated_creds.get('alt'):
- user, tenant = self.isolated_creds['alt']
- username, tenant_name = self._get_cred_names(user, tenant)
- else:
- user, tenant = self._create_creds()
- username, tenant_name = self._get_cred_names(user, tenant)
- self.isolated_creds['alt'] = (user, tenant)
- LOG.info("Acquired alt isolated creds:\n user: %s, tenant: %s"
- % (username, tenant_name))
- if CONF.service_available.neutron:
- network, subnet, router = self._create_network_resources(
- self._get_tenant_id(tenant))
- self.isolated_net_resources['alt'] = (
- network, subnet, router,)
- LOG.info("Created isolated network resources for : \n"
- + " user: %s, tenant: %s" % (username, tenant_name))
- return username, tenant_name, self.password
+ return self.get_credentials('alt')
def _clear_isolated_router(self, router_id, router_name):
net_client = self.network_admin_client
@@ -423,7 +353,6 @@
except exceptions.NotFound:
LOG.warn('router with name: %s not found for delete' %
router_name)
- pass
def _clear_isolated_subnet(self, subnet_id, subnet_name):
net_client = self.network_admin_client
@@ -432,7 +361,6 @@
except exceptions.NotFound:
LOG.warn('subnet with name: %s not found for delete' %
subnet_name)
- pass
def _clear_isolated_network(self, network_id, network_name):
net_client = self.network_admin_client
@@ -441,7 +369,6 @@
except exceptions.NotFound:
LOG.warn('network with name: %s not found for delete' %
network_name)
- pass
def _cleanup_ports(self, network_id):
# TODO(mlavalle) This method will be removed once patch
@@ -487,7 +414,6 @@
except exceptions.NotFound:
LOG.warn('router with name: %s not found for delete' %
router['name'])
- pass
self._clear_isolated_router(router['id'], router['name'])
if (not self.network_resources or
self.network_resources.get('network')):
@@ -505,29 +431,14 @@
if not self.isolated_creds:
return
self._clear_isolated_net_resources()
- for cred in self.isolated_creds:
- user, tenant = self.isolated_creds.get(cred)
+ for creds in self.isolated_creds.itervalues():
try:
- if self.tempest_client:
- self._delete_user(user['id'])
- else:
- self._delete_user(user.id)
+ self._delete_user(creds.user_id)
except exceptions.NotFound:
- if self.tempest_client:
- name = user['name']
- else:
- name = user.name
- LOG.warn("user with name: %s not found for delete" % name)
- pass
+ LOG.warn("user with name: %s not found for delete" %
+ creds.username)
try:
- if self.tempest_client:
- self._delete_tenant(tenant['id'])
- else:
- self._delete_tenant(tenant.id)
+ self._delete_tenant(creds.tenant_id)
except exceptions.NotFound:
- if self.tempest_client:
- name = tenant['name']
- else:
- name = tenant.name
- LOG.warn("tenant with name: %s not found for delete" % name)
- pass
+ LOG.warn("tenant with name: %s not found for delete" %
+ creds.tenant_name)
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 36ddb40..10223a0 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -15,17 +15,19 @@
# under the License.
import collections
-import hashlib
import json
from lxml import etree
import re
import time
+import jsonschema
+
from tempest.common import http
+from tempest.common.utils import misc as misc_utils
+from tempest.common import xml_utils as common
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
-from tempest.services.compute.xml import common
CONF = config.CONF
@@ -138,15 +140,23 @@
@property
def user(self):
- return self.auth_provider.credentials.get('username', None)
+ return self.auth_provider.credentials.username
+
+ @property
+ def user_id(self):
+ return self.auth_provider.credentials.user_id
@property
def tenant_name(self):
- return self.auth_provider.credentials.get('tenant_name', None)
+ return self.auth_provider.credentials.tenant_name
+
+ @property
+ def tenant_id(self):
+ return self.auth_provider.credentials.tenant_id
@property
def password(self):
- return self.auth_provider.credentials.get('password', None)
+ return self.auth_provider.credentials.password
@property
def base_url(self):
@@ -195,26 +205,26 @@
details = pattern.format(read_code, expected_code)
raise exceptions.InvalidHttpSuccessCode(details)
- def post(self, url, body, headers=None):
- return self.request('POST', url, headers, body)
+ def post(self, url, body, headers=None, extra_headers=False):
+ return self.request('POST', url, extra_headers, headers, body)
- def get(self, url, headers=None):
- return self.request('GET', url, headers)
+ def get(self, url, headers=None, extra_headers=False):
+ return self.request('GET', url, extra_headers, headers)
- def delete(self, url, headers=None, body=None):
- return self.request('DELETE', url, headers, body)
+ def delete(self, url, headers=None, body=None, extra_headers=False):
+ return self.request('DELETE', url, extra_headers, headers, body)
- def patch(self, url, body, headers=None):
- return self.request('PATCH', url, headers, body)
+ def patch(self, url, body, headers=None, extra_headers=False):
+ return self.request('PATCH', url, extra_headers, headers, body)
- def put(self, url, body, headers=None):
- return self.request('PUT', url, headers, body)
+ def put(self, url, body, headers=None, extra_headers=False):
+ return self.request('PUT', url, extra_headers, headers, body)
- def head(self, url, headers=None):
- return self.request('HEAD', url, headers)
+ def head(self, url, headers=None, extra_headers=False):
+ return self.request('HEAD', url, extra_headers, headers)
- def copy(self, url, headers=None):
- return self.request('COPY', url, headers)
+ def copy(self, url, headers=None, extra_headers=False):
+ return self.request('COPY', url, extra_headers, headers)
def get_versions(self):
resp, body = self.get('')
@@ -222,44 +232,57 @@
versions = map(lambda x: x['id'], body)
return resp, versions
- def _log_request(self, method, req_url, headers, body):
- self.LOG.info('Request: ' + method + ' ' + req_url)
- if headers:
- print_headers = headers
- if 'X-Auth-Token' in headers and headers['X-Auth-Token']:
- token = headers['X-Auth-Token']
- if len(token) > 64 and TOKEN_CHARS_RE.match(token):
- print_headers = headers.copy()
- print_headers['X-Auth-Token'] = "<Token omitted>"
- self.LOG.debug('Request Headers: ' + str(print_headers))
- if body:
- str_body = str(body)
- length = len(str_body)
- self.LOG.debug('Request Body: ' + str_body[:2048])
- if length >= 2048:
- self.LOG.debug("Large body (%d) md5 summary: %s", length,
- hashlib.md5(str_body).hexdigest())
+ def _get_request_id(self, resp):
+ for i in ('x-openstack-request-id', 'x-compute-request-id'):
+ if i in resp:
+ return resp[i]
+ return ""
- def _log_response(self, resp, resp_body):
- status = resp['status']
- self.LOG.info("Response Status: " + status)
- headers = resp.copy()
- del headers['status']
- if headers.get('x-compute-request-id'):
- self.LOG.info("Nova/Cinder request id: %s" %
- headers.pop('x-compute-request-id'))
- elif headers.get('x-openstack-request-id'):
- self.LOG.info("OpenStack request id %s" %
- headers.pop('x-openstack-request-id'))
- if len(headers):
- self.LOG.debug('Response Headers: ' + str(headers))
- if resp_body:
- str_body = str(resp_body)
- length = len(str_body)
- self.LOG.debug('Response Body: ' + str_body[:2048])
- if length >= 2048:
- self.LOG.debug("Large body (%d) md5 summary: %s", length,
- hashlib.md5(str_body).hexdigest())
+ def _log_request(self, method, req_url, resp,
+ secs="", req_headers={},
+ req_body=None, resp_body=None):
+ # if we have the request id, put it in the right part of the log
+ extra = dict(request_id=self._get_request_id(resp))
+ # NOTE(sdague): while we still have 6 callers to this function
+ # we're going to just provide work around on who is actually
+ # providing timings by gracefully adding no content if they don't.
+ # Once we're down to 1 caller, clean this up.
+ caller_name = misc_utils.find_test_caller()
+ if secs:
+ secs = " %.3fs" % secs
+ self.LOG.info(
+ 'Request (%s): %s %s %s%s' % (
+ caller_name,
+ resp['status'],
+ method,
+ req_url,
+ secs),
+ extra=extra)
+
+ # We intentionally duplicate the info content because in a parallel
+ # world this is important to match
+ trace_regex = CONF.debug.trace_requests
+ if trace_regex and re.search(trace_regex, caller_name):
+ if 'X-Auth-Token' in req_headers:
+ req_headers['X-Auth-Token'] = '<omitted>'
+ log_fmt = """Request (%s): %s %s %s%s
+ Request - Headers: %s
+ Body: %s
+ Response - Headers: %s
+ Body: %s"""
+
+ self.LOG.debug(
+ log_fmt % (
+ caller_name,
+ resp['status'],
+ method,
+ req_url,
+ secs,
+ str(req_headers),
+ str(req_body)[:2048],
+ str(resp),
+ str(resp_body)[:2048]),
+ extra=extra)
def _parse_resp(self, body):
if self._get_type() is "json":
@@ -305,7 +328,7 @@
# Parse one-item-like xmls (user, role, etc)
return common.xml_to_json(element)
- def response_checker(self, method, url, headers, body, resp, resp_body):
+ def response_checker(self, method, resp, resp_body):
if (resp.status in set((204, 205, 304)) or resp.status < 200 or
method.upper() == 'HEAD') and resp_body:
raise exceptions.ResponseWithNonEmptyBody(status=resp.status)
@@ -338,24 +361,37 @@
# Authenticate the request with the auth provider
req_url, req_headers, req_body = self.auth_provider.auth_request(
method, url, headers, body, self.filters)
- self._log_request(method, req_url, req_headers, req_body)
- # Do the actual request
+
+ # Do the actual request, and time it
+ start = time.time()
resp, resp_body = self.http_obj.request(
req_url, method, headers=req_headers, body=req_body)
- self._log_response(resp, resp_body)
+ end = time.time()
+ self._log_request(method, req_url, resp, secs=(end - start),
+ req_headers=req_headers, req_body=req_body,
+ resp_body=resp_body)
+
# Verify HTTP response codes
- self.response_checker(method, url, req_headers, req_body, resp,
- resp_body)
+ self.response_checker(method, resp, resp_body)
return resp, resp_body
- def request(self, method, url, headers=None, body=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None):
+ # if extra_headers is True
+ # default headers would be added to headers
retry = 0
if headers is None:
# NOTE(vponomaryov): if some client do not need headers,
# it should explicitly pass empty dict
headers = self.get_headers()
+ elif extra_headers:
+ try:
+ headers = headers.copy()
+ headers.update(self.get_headers())
+ except (ValueError, TypeError):
+ headers = self.get_headers()
resp, resp_body = self._request(method, url,
headers=headers, body=body)
@@ -412,7 +448,7 @@
raise exceptions.InvalidContentType(str(resp.status))
if resp.status == 401 or resp.status == 403:
- raise exceptions.Unauthorized()
+ raise exceptions.Unauthorized(resp_body)
if resp.status == 404:
raise exceptions.NotFound(resp_body)
@@ -502,6 +538,42 @@
% self.__class__.__name__)
raise NotImplementedError(message)
+ @classmethod
+ def validate_response(cls, schema, resp, body):
+ # Only check the response if the status code is a success code
+ # TODO(cyeoh): Eventually we should be able to verify that a failure
+ # code if it exists is something that we expect. This is explicitly
+ # declared in the V3 API and so we should be able to export this in
+ # the response schema. For now we'll ignore it.
+ if resp.status in HTTP_SUCCESS:
+ response_code = schema['status_code']
+ if resp.status not in response_code:
+ msg = ("The status code(%s) is different than the expected "
+ "one(%s)") % (resp.status, response_code)
+ raise exceptions.InvalidHttpSuccessCode(msg)
+
+ # Check the body of a response
+ body_schema = schema.get('response_body')
+ if body_schema:
+ try:
+ jsonschema.validate(body, body_schema)
+ except jsonschema.ValidationError as ex:
+ msg = ("HTTP response body is invalid (%s)") % ex
+ raise exceptions.InvalidHTTPResponseBody(msg)
+ else:
+ if body:
+ msg = ("HTTP response body should not exist (%s)") % body
+ raise exceptions.InvalidHTTPResponseBody(msg)
+
+ # Check the header of a response
+ header_schema = schema.get('response_header')
+ if header_schema:
+ try:
+ jsonschema.validate(resp, header_schema)
+ except jsonschema.ValidationError as ex:
+ msg = ("HTTP response header is invalid (%s)") % ex
+ raise exceptions.InvalidHTTPResponseHeader(msg)
+
class NegativeRestClient(RestClient):
"""
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 00e5e0d..95b6833 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -112,3 +112,8 @@
def turn_nic_on(self, nic):
cmd = "sudo /bin/ip link set {nic} up".format(nic=nic)
return self.exec_command(cmd)
+
+ def get_pids(self, pr_name):
+ # Get pid(s) of a process/program
+ cmd = "ps -ef | grep %s | grep -v 'grep' | awk {'print $1'}" % pr_name
+ return self.exec_command(cmd).split('\n')
diff --git a/tempest/common/utils/misc.py b/tempest/common/utils/misc.py
index a0b0c0a..b9f411b 100644
--- a/tempest/common/utils/misc.py
+++ b/tempest/common/utils/misc.py
@@ -13,6 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+import inspect
+import re
+
+from tempest.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
def singleton(cls):
"""Simple wrapper for classes that should only have a single instance."""
@@ -23,3 +30,55 @@
instances[cls] = cls()
return instances[cls]
return getinstance
+
+
+def find_test_caller():
+ """Find the caller class and test name.
+
+ Because we know that the interesting things that call us are
+ test_* methods, and various kinds of setUp / tearDown, we
+ can look through the call stack to find appropriate methods,
+ and the class we were in when those were called.
+ """
+ caller_name = None
+ names = []
+ frame = inspect.currentframe()
+ is_cleanup = False
+ # Start climbing the ladder until we hit a good method
+ while True:
+ try:
+ frame = frame.f_back
+ name = frame.f_code.co_name
+ names.append(name)
+ if re.search("^(test_|setUp|tearDown)", name):
+ cname = ""
+ if 'self' in frame.f_locals:
+ cname = frame.f_locals['self'].__class__.__name__
+ if 'cls' in frame.f_locals:
+ cname = frame.f_locals['cls'].__name__
+ caller_name = cname + ":" + name
+ break
+ elif re.search("^_run_cleanup", name):
+ is_cleanup = True
+ else:
+ cname = ""
+ if 'self' in frame.f_locals:
+ cname = frame.f_locals['self'].__class__.__name__
+ if 'cls' in frame.f_locals:
+ cname = frame.f_locals['cls'].__name__
+
+ # the fact that we are running cleanups is indicated pretty
+ # deep in the stack, so if we see that we want to just
+ # start looking for a real class name, and declare victory
+ # once we do.
+ if is_cleanup and cname:
+ if not re.search("^RunTest", cname):
+ caller_name = cname + ":_run_cleanups"
+ break
+ except Exception:
+ break
+ # prevents frame leaks
+ del frame
+ if caller_name is None:
+ LOG.debug("Sane call name not found in %s" % names)
+ return caller_name
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 8e6b9fb..d52ed7c 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -13,6 +13,7 @@
import time
+from tempest.common.utils import misc as misc_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
@@ -86,6 +87,9 @@
'timeout': timeout})
message += ' Current status: %s.' % server_status
message += ' Current task state: %s.' % task_state
+ caller = misc_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
old_status = server_status
old_task_state = task_state
@@ -119,4 +123,7 @@
'status': status,
'timeout': client.build_timeout})
message += ' Current status: %s.' % image['status']
+ caller = misc_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
diff --git a/tempest/services/compute/xml/common.py b/tempest/common/xml_utils.py
similarity index 98%
rename from tempest/services/compute/xml/common.py
rename to tempest/common/xml_utils.py
index b29b932..b1bf789 100644
--- a/tempest/services/compute/xml/common.py
+++ b/tempest/common/xml_utils.py
@@ -19,6 +19,7 @@
XMLNS_V3 = "http://docs.openstack.org/compute/api/v1.1"
NEUTRON_NAMESPACES = {
+ 'binding': "http://docs.openstack.org/ext/binding/api/v1.0",
'router': "http://docs.openstack.org/ext/neutron/router/api/v1.0",
'provider': 'http://docs.openstack.org/ext/provider/api/v1.0',
}
diff --git a/tempest/config.py b/tempest/config.py
index 212ee8a..f9be90d 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -60,18 +60,22 @@
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the identity service."),
cfg.StrOpt('username',
- default='demo',
+ default=None,
help="Username to use for Nova API requests."),
cfg.StrOpt('tenant_name',
- default='demo',
+ default=None,
help="Tenant name to use for Nova API requests."),
cfg.StrOpt('admin_role',
default='admin',
help="Role required to administrate keystone."),
cfg.StrOpt('password',
- default='pass',
+ default=None,
help="API key to use when authenticating.",
secret=True),
+ cfg.StrOpt('domain_name',
+ default=None,
+ help="Domain name for authentication (Keystone V3)."
+ "The same domain applies to user and project"),
cfg.StrOpt('alt_username',
default=None,
help="Username of alternate user to use for Nova API "
@@ -84,18 +88,26 @@
default=None,
help="API key to use when authenticating as alternate user.",
secret=True),
+ cfg.StrOpt('alt_domain_name',
+ default=None,
+ help="Alternate domain name for authentication (Keystone V3)."
+ "The same domain applies to user and project"),
cfg.StrOpt('admin_username',
- default='admin',
+ default=None,
help="Administrative Username to use for "
"Keystone API requests."),
cfg.StrOpt('admin_tenant_name',
- default='admin',
+ default=None,
help="Administrative Tenant name to use for Keystone API "
"requests."),
cfg.StrOpt('admin_password',
- default='pass',
+ default=None,
help="API key to use when authenticating as admin.",
secret=True),
+ cfg.StrOpt('admin_domain_name',
+ default=None,
+ help="Admin domain name for authentication (Keystone V3)."
+ "The same domain applies to user and project"),
]
identity_feature_group = cfg.OptGroup(name='identity-feature-enabled',
@@ -126,7 +138,7 @@
"OpenStack Identity API admin credentials are known."),
cfg.StrOpt('image_ref',
default="{$IMAGE_ID}",
- help="Valid secondary image reference to be used in tests."),
+ help="Valid primary image reference to be used in tests."),
cfg.StrOpt('image_ref_alt',
default="{$IMAGE_ID_ALT}",
help="Valid secondary image reference to be used in tests."),
@@ -159,6 +171,19 @@
cfg.BoolOpt('run_ssh',
default=False,
help="Should the tests ssh to instances?"),
+ cfg.StrOpt('ssh_auth_method',
+ default='keypair',
+ help="Auth method used for authenticate to the instance. "
+ "Valid choices are: keypair, configured, adminpass. "
+ "keypair: start the servers with an ssh keypair. "
+ "configured: use the configured user and password. "
+ "adminpass: use the injected adminPass. "
+ "disabled: avoid using ssh when it is an option."),
+ cfg.StrOpt('ssh_connect_method',
+ default='fixed',
+ help="How to connect to the instance? "
+ "fixed: using the first ip belongs the fixed network "
+ "floating: creating and using a floating ip"),
cfg.StrOpt('ssh_user',
default='root',
help="User name used to authenticate to an instance."),
@@ -189,7 +214,7 @@
help="IP version used for SSH connections."),
cfg.BoolOpt('use_floatingip_for_ssh',
default=True,
- help="Dose the SSH uses Floating IP?"),
+ help="Does SSH use Floating IPs?"),
cfg.StrOpt('catalog_type',
default='compute',
help="Catalog type of the Compute service."),
@@ -246,12 +271,15 @@
default=False,
help="Does the test environment support changing the admin "
"password?"),
- cfg.BoolOpt('create_image',
- default=False,
- help="Does the test environment support snapshots?"),
cfg.BoolOpt('resize',
default=False,
help="Does the test environment support resizing?"),
+ cfg.BoolOpt('pause',
+ default=True,
+ help="Does the test environment support pausing?"),
+ cfg.BoolOpt('suspend',
+ default=True,
+ help="Does the test environment support suspend/resume?"),
cfg.BoolOpt('live_migration',
default=False,
help="Does the test environment support live migration "
@@ -267,7 +295,15 @@
cfg.BoolOpt('vnc_console',
default=False,
help='Enable VNC console. This configuration value should '
- 'be same as [nova.vnc]->vnc_enabled in nova.conf')
+ 'be same as [nova.vnc]->vnc_enabled in nova.conf'),
+ cfg.BoolOpt('spice_console',
+ default=False,
+ help='Enable Spice console. This configuration value should '
+ 'be same as [nova.spice]->enabled in nova.conf'),
+ cfg.BoolOpt('rdp_console',
+ default=False,
+ help='Enable RDP console. This configuration value should '
+ 'be same as [nova.rdp]->enabled in nova.conf')
]
@@ -276,16 +312,20 @@
ComputeAdminGroup = [
cfg.StrOpt('username',
- default='admin',
+ default=None,
help="Administrative Username to use for Nova API requests."),
cfg.StrOpt('tenant_name',
- default='admin',
+ default=None,
help="Administrative Tenant name to use for Nova API "
"requests."),
cfg.StrOpt('password',
- default='pass',
+ default=None,
help="API key to use when authenticating as admin.",
secret=True),
+ cfg.StrOpt('domain_name',
+ default=None,
+ help="Domain name for authentication as admin (Keystone V3)."
+ "The same domain applies to user and project"),
]
image_group = cfg.OptGroup(name='image',
@@ -438,6 +478,9 @@
cfg.StrOpt('disk_format',
default='raw',
help='Disk format to use when copying a volume to image'),
+ cfg.IntOpt('volume_size',
+ default=1,
+ help='Default size in GB for volumes created by volumes tests'),
]
volume_feature_group = cfg.OptGroup(name='volume-feature-enabled',
@@ -450,6 +493,9 @@
cfg.BoolOpt('backup',
default=True,
help='Runs Cinder volumes backup test'),
+ cfg.BoolOpt('snapshot',
+ default=True,
+ help='Runs Cinder volume snapshot test'),
cfg.ListOpt('api_extensions',
default=['all'],
help='A list of enabled volume extensions with a special '
@@ -520,6 +566,9 @@
cfg.StrOpt('db_flavor_ref',
default="1",
help="Valid primary flavor to use in database tests."),
+ cfg.StrOpt('db_current_version',
+ default="v1.0",
+ help="Current database version to use in database tests."),
]
orchestration_group = cfg.OptGroup(name='orchestration',
@@ -550,7 +599,7 @@
default=1,
help="Time in seconds between build status checks."),
cfg.IntOpt('build_timeout',
- default=600,
+ default=1200,
help="Timeout in seconds to wait for a stack to build."),
cfg.StrOpt('instance_type',
default='m1.micro',
@@ -566,6 +615,9 @@
cfg.IntOpt('max_template_size',
default=524288,
help="Value must match heat configuration of the same name."),
+ cfg.IntOpt('max_resources_per_stack',
+ default=1000,
+ help="Value must match heat configuration of the same name."),
]
@@ -629,6 +681,9 @@
cfg.StrOpt('aws_access',
default=None,
help="AWS Access Key"),
+ cfg.StrOpt('aws_zone',
+ default="nova",
+ help="AWS Zone for EC2 tests"),
cfg.StrOpt('s3_materials_path',
default="/opt/stack/devstack/files/images/"
"s3-materials/cirros-0.3.0",
@@ -761,9 +816,9 @@
cfg.BoolOpt('horizon',
default=True,
help="Whether or not Horizon is expected to be available"),
- cfg.BoolOpt('savanna',
+ cfg.BoolOpt('sahara',
default=False,
- help="Whether or not Savanna is expected to be available"),
+ help="Whether or not Sahara is expected to be available"),
cfg.BoolOpt('ironic',
default=False,
help="Whether or not Ironic is expected to be available"),
@@ -782,6 +837,26 @@
cfg.BoolOpt('enable',
default=True,
help="Enable diagnostic commands"),
+ cfg.StrOpt('trace_requests',
+ default='',
+ help="""A regex to determine which requests should be traced.
+
+This is a regex to match the caller for rest client requests to be able to
+selectively trace calls out of specific classes and methods. It largely
+exists for test development, and is not expected to be used in a real deploy
+of tempest. This will be matched against the discovered ClassName:method
+in the test environment.
+
+Expected values for this field are:
+
+ * ClassName:test_method_name - traces one test_method
+ * ClassName:setUp(Class) - traces specific setup functions
+ * ClassName:tearDown(Class) - traces specific teardown functions
+ * ClassName:_run_cleanups - traces the cleanup functions
+
+If nothing is specified, this feature is not enabled. To trace everything
+specify .* as the regex.
+""")
]
input_scenario_group = cfg.OptGroup(name="input-scenario",
@@ -812,13 +887,29 @@
BaremetalGroup = [
cfg.StrOpt('catalog_type',
default='baremetal',
- help="Catalog type of the baremetal provisioning service."),
+ help="Catalog type of the baremetal provisioning service"),
+ cfg.BoolOpt('driver_enabled',
+ default=False,
+ help="Whether the Ironic nova-compute driver is enabled"),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the baremetal provisioning "
- "service."),
+ "service"),
+ cfg.IntOpt('active_timeout',
+ default=300,
+ help="Timeout for Ironic node to completely provision"),
+ cfg.IntOpt('association_timeout',
+ default=10,
+ help="Timeout for association of Nova instance and Ironic "
+ "node"),
+ cfg.IntOpt('power_timeout',
+ default=20,
+ help="Timeout for Ironic power transitions."),
+ cfg.IntOpt('unprovision_timeout',
+ default=20,
+ help="Timeout for unprovisioning an Ironic node.")
]
cli_group = cfg.OptGroup(name='cli', title="cli Configuration Options")
@@ -932,6 +1023,13 @@
self.compute_admin.username = self.identity.admin_username
self.compute_admin.password = self.identity.admin_password
self.compute_admin.tenant_name = self.identity.admin_tenant_name
+ cfg.CONF.set_default('domain_name', self.identity.admin_domain_name,
+ group='identity')
+ cfg.CONF.set_default('alt_domain_name',
+ self.identity.admin_domain_name,
+ group='identity')
+ cfg.CONF.set_default('domain_name', self.identity.admin_domain_name,
+ group='compute-admin')
def __init__(self, parse_conf=True):
"""Initialize a configuration from a conf directory and conf file."""
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
new file mode 100644
index 0000000..4eb1cea
--- /dev/null
+++ b/tempest/exceptions.py
@@ -0,0 +1,213 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+
+class TempestException(Exception):
+ """
+ Base Tempest Exception
+
+ To correctly use this class, inherit from it and define
+ a 'message' property. That message will get printf'd
+ with the keyword arguments provided to the constructor.
+ """
+ message = "An unknown exception occurred"
+
+ def __init__(self, *args, **kwargs):
+ super(TempestException, self).__init__()
+ try:
+ self._error_string = self.message % kwargs
+ except Exception:
+ # at least get the core message out if something happened
+ self._error_string = self.message
+ if len(args) > 0:
+ # If there is a non-kwarg parameter, assume it's the error
+ # message or reason description and tack it on to the end
+ # of the exception message
+ # Convert all arguments into their string representations...
+ args = ["%s" % arg for arg in args]
+ self._error_string = (self._error_string +
+ "\nDetails: %s" % '\n'.join(args))
+
+ def __str__(self):
+ return self._error_string
+
+
+class RestClientException(TempestException,
+ testtools.TestCase.failureException):
+ pass
+
+
+class RFCViolation(RestClientException):
+ message = "RFC Violation"
+
+
+class InvalidConfiguration(TempestException):
+ message = "Invalid Configuration"
+
+
+class InvalidCredentials(TempestException):
+ message = "Invalid Credentials"
+
+
+class InvalidHttpSuccessCode(RestClientException):
+ message = "The success code is different than the expected one"
+
+
+class NotFound(RestClientException):
+ message = "Object not found"
+
+
+class Unauthorized(RestClientException):
+ message = 'Unauthorized'
+
+
+class InvalidServiceTag(RestClientException):
+ message = "Invalid service tag"
+
+
+class TimeoutException(TempestException):
+ message = "Request timed out"
+
+
+class BuildErrorException(TempestException):
+ message = "Server %(server_id)s failed to build and is in ERROR status"
+
+
+class ImageKilledException(TempestException):
+ message = "Image %(image_id)s 'killed' while waiting for '%(status)s'"
+
+
+class AddImageException(TempestException):
+ message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
+
+
+class EC2RegisterImageException(TempestException):
+ message = ("Image %(image_id)s failed to become 'available' "
+ "in the allotted time")
+
+
+class VolumeBuildErrorException(TempestException):
+ message = "Volume %(volume_id)s failed to build and is in ERROR status"
+
+
+class SnapshotBuildErrorException(TempestException):
+ message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
+
+
+class VolumeBackupException(TempestException):
+ message = "Volume backup %(backup_id)s failed and is in ERROR status"
+
+
+class StackBuildErrorException(TempestException):
+ message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
+ "due to '%(stack_status_reason)s'")
+
+
+class StackResourceBuildErrorException(TempestException):
+ message = ("Resource %(resource_name)s in stack %(stack_identifier)s is "
+ "in %(resource_status)s status due to "
+ "'%(resource_status_reason)s'")
+
+
+class BadRequest(RestClientException):
+ message = "Bad request"
+
+
+class UnprocessableEntity(RestClientException):
+ message = "Unprocessable entity"
+
+
+class AuthenticationFailure(RestClientException):
+ message = ("Authentication with user %(user)s and password "
+ "%(password)s failed auth using tenant %(tenant)s.")
+
+
+class EndpointNotFound(TempestException):
+ message = "Endpoint not found"
+
+
+class RateLimitExceeded(TempestException):
+ message = "Rate limit exceeded"
+
+
+class OverLimit(TempestException):
+ message = "Quota exceeded"
+
+
+class ServerFault(TempestException):
+ message = "Got server fault"
+
+
+class ImageFault(TempestException):
+ message = "Got image fault"
+
+
+class IdentityError(TempestException):
+ message = "Got identity error"
+
+
+class Conflict(RestClientException):
+ message = "An object with that identifier already exists"
+
+
+class SSHTimeout(TempestException):
+ message = ("Connection to the %(host)s via SSH timed out.\n"
+ "User: %(user)s, Password: %(password)s")
+
+
+class SSHExecCommandFailed(TempestException):
+ """Raised when remotely executed command returns nonzero status."""
+ message = ("Command '%(command)s', exit status: %(exit_status)d, "
+ "Error:\n%(strerror)s")
+
+
+class ServerUnreachable(TempestException):
+ message = "The server is not reachable via the configured network"
+
+
+class TearDownException(TempestException):
+ message = "%(num)d cleanUp operation failed"
+
+
+class ResponseWithNonEmptyBody(RFCViolation):
+ message = ("RFC Violation! Response with %(status)d HTTP Status Code "
+ "MUST NOT have a body")
+
+
+class ResponseWithEntity(RFCViolation):
+ message = ("RFC Violation! Response with 205 HTTP Status Code "
+ "MUST NOT have an entity")
+
+
+class InvalidHTTPResponseBody(RestClientException):
+ message = "HTTP response body is invalid json or xml"
+
+
+class InvalidHTTPResponseHeader(RestClientException):
+ message = "HTTP response header is invalid"
+
+
+class InvalidContentType(RestClientException):
+ message = "Invalid content type provided"
+
+
+class UnexpectedResponseCode(RestClientException):
+ message = "Unexpected response code received"
+
+
+class InvalidStructure(TempestException):
+ message = "Invalid structure of table with details"
diff --git a/tempest/exceptions/README.rst b/tempest/exceptions/README.rst
deleted file mode 100644
index dbe42b2..0000000
--- a/tempest/exceptions/README.rst
+++ /dev/null
@@ -1,27 +0,0 @@
-Tempest Field Guide to Exceptions
-=================================
-
-
-What are these exceptions?
---------------------------
-
-These exceptions are used by Tempest for covering OpenStack specific exceptional
-cases.
-
-How to add new exceptions?
---------------------------
-
-Each exception-template for inheritance purposes should be added into 'base'
-submodule.
-All other exceptions can be added in two ways:
-- in main module
-- in submodule
-But only in one of the ways. Need to make sure, that new exception is not
-present already.
-
-How to use exceptions?
-----------------------
-
-Any exceptions from this module or its submodules should be used in appropriate
-places to handle exceptional cases.
-Classes from 'base' module should be used only for inheritance.
diff --git a/tempest/exceptions/__init__.py b/tempest/exceptions/__init__.py
deleted file mode 100644
index 06dee71..0000000
--- a/tempest/exceptions/__init__.py
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.exceptions import base
-
-
-class InvalidConfiguration(base.TempestException):
- message = "Invalid Configuration"
-
-
-class InvalidHttpSuccessCode(base.RestClientException):
- message = "The success code is different than the expected one"
-
-
-class NotFound(base.RestClientException):
- message = "Object not found"
-
-
-class Unauthorized(base.RestClientException):
- message = 'Unauthorized'
-
-
-class InvalidServiceTag(base.RestClientException):
- message = "Invalid service tag"
-
-
-class TimeoutException(base.TempestException):
- message = "Request timed out"
-
-
-class BuildErrorException(base.TempestException):
- message = "Server %(server_id)s failed to build and is in ERROR status"
-
-
-class ImageKilledException(base.TempestException):
- message = "Image %(image_id)s 'killed' while waiting for '%(status)s'"
-
-
-class AddImageException(base.TempestException):
- message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
-
-
-class EC2RegisterImageException(base.TempestException):
- message = ("Image %(image_id)s failed to become 'available' "
- "in the allotted time")
-
-
-class VolumeBuildErrorException(base.TempestException):
- message = "Volume %(volume_id)s failed to build and is in ERROR status"
-
-
-class SnapshotBuildErrorException(base.TempestException):
- message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
-
-
-class VolumeBackupException(base.TempestException):
- message = "Volume backup %(backup_id)s failed and is in ERROR status"
-
-
-class StackBuildErrorException(base.TempestException):
- message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
- "due to '%(stack_status_reason)s'")
-
-
-class BadRequest(base.RestClientException):
- message = "Bad request"
-
-
-class UnprocessableEntity(base.RestClientException):
- message = "Unprocessable entity"
-
-
-class AuthenticationFailure(base.RestClientException):
- message = ("Authentication with user %(user)s and password "
- "%(password)s failed auth using tenant %(tenant)s.")
-
-
-class EndpointNotFound(base.TempestException):
- message = "Endpoint not found"
-
-
-class RateLimitExceeded(base.TempestException):
- message = "Rate limit exceeded"
-
-
-class OverLimit(base.TempestException):
- message = "Quota exceeded"
-
-
-class ServerFault(base.TempestException):
- message = "Got server fault"
-
-
-class ImageFault(base.TempestException):
- message = "Got image fault"
-
-
-class IdentityError(base.TempestException):
- message = "Got identity error"
-
-
-class Conflict(base.RestClientException):
- message = "An object with that identifier already exists"
-
-
-class SSHTimeout(base.TempestException):
- message = ("Connection to the %(host)s via SSH timed out.\n"
- "User: %(user)s, Password: %(password)s")
-
-
-class SSHExecCommandFailed(base.TempestException):
- """Raised when remotely executed command returns nonzero status."""
- message = ("Command '%(command)s', exit status: %(exit_status)d, "
- "Error:\n%(strerror)s")
-
-
-class ServerUnreachable(base.TempestException):
- message = "The server is not reachable via the configured network"
-
-
-class TearDownException(base.TempestException):
- message = "%(num)d cleanUp operation failed"
-
-
-class ResponseWithNonEmptyBody(base.RFCViolation):
- message = ("RFC Violation! Response with %(status)d HTTP Status Code "
- "MUST NOT have a body")
-
-
-class ResponseWithEntity(base.RFCViolation):
- message = ("RFC Violation! Response with 205 HTTP Status Code "
- "MUST NOT have an entity")
-
-
-class InvalidHTTPResponseBody(base.RestClientException):
- message = "HTTP response body is invalid json or xml"
-
-
-class InvalidContentType(base.RestClientException):
- message = "Invalid content type provided"
-
-
-class UnexpectedResponseCode(base.RestClientException):
- message = "Unexpected response code received"
diff --git a/tempest/exceptions/base.py b/tempest/exceptions/base.py
deleted file mode 100644
index b8e470e..0000000
--- a/tempest/exceptions/base.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import testtools
-
-
-class TempestException(Exception):
- """
- Base Tempest Exception
-
- To correctly use this class, inherit from it and define
- a 'message' property. That message will get printf'd
- with the keyword arguments provided to the constructor.
- """
- message = "An unknown exception occurred"
-
- def __init__(self, *args, **kwargs):
- super(TempestException, self).__init__()
- try:
- self._error_string = self.message % kwargs
- except Exception:
- # at least get the core message out if something happened
- self._error_string = self.message
- if len(args) > 0:
- # If there is a non-kwarg parameter, assume it's the error
- # message or reason description and tack it on to the end
- # of the exception message
- # Convert all arguments into their string representations...
- args = ["%s" % arg for arg in args]
- self._error_string = (self._error_string +
- "\nDetails: %s" % '\n'.join(args))
-
- def __str__(self):
- return self._error_string
-
-
-class RestClientException(TempestException,
- testtools.TestCase.failureException):
- pass
-
-
-class RFCViolation(RestClientException):
- message = "RFC Violation"
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 55be60a..183d422 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -12,15 +12,20 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
import re
+import pep8
-PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron']
+
+PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
+ 'trove', 'ironic', 'savanna', 'heat', 'ceilometer',
+ 'marconi', 'sahara']
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
TEST_DEFINITION = re.compile(r'^\s*def test.*')
SETUPCLASS_DEFINITION = re.compile(r'^\s*def setUpClass')
-SCENARIO_DECORATOR = re.compile(r'\s*@.*services\(')
+SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
@@ -45,7 +50,7 @@
T104: Scenario tests require a services decorator
"""
- if 'tempest/scenario' in filename:
+ if 'tempest/scenario/test_' in filename:
if TEST_DEFINITION.match(physical_line):
if not SCENARIO_DECORATOR.match(previous_logical):
return (physical_line.find('def'),
@@ -53,6 +58,10 @@
def no_setupclass_for_unit_tests(physical_line, filename):
+
+ if pep8.noqa(physical_line):
+ return
+
if 'tempest/tests' in filename:
if SETUPCLASS_DEFINITION.match(physical_line):
return (physical_line.find('def'),
@@ -73,8 +82,32 @@
return 0, "T106: Don't put vi configuration in source files"
+def service_tags_not_in_module_path(physical_line, filename):
+ """Check that a service tag isn't in the module path
+
+ A service tag should only be added if the service name isn't already in
+ the module path.
+
+ T107
+ """
+ # NOTE(mtreinish) Scenario tests always need service tags, but subdirs are
+ # created for services like heat which would cause false negatives for
+ # those tests, so just exclude the scenario tests.
+ if 'tempest/scenario' not in filename:
+ matches = SCENARIO_DECORATOR.match(physical_line)
+ if matches:
+ services = matches.group(1).split(',')
+ for service in services:
+ service_name = service.strip().strip("'")
+ modulepath = os.path.split(filename)[0]
+ if service_name in modulepath:
+ return (physical_line.find(service_name),
+ "T107: service tag should not be in path")
+
+
def factory(register):
register(import_no_clients_in_api)
register(scenario_tests_need_service_tags)
register(no_setupclass_for_unit_tests)
register(no_vi_headers)
+ register(service_tags_not_in_module_path)
diff --git a/tempest/manager.py b/tempest/manager.py
index 708447e..fb2842f 100644
--- a/tempest/manager.py
+++ b/tempest/manager.py
@@ -29,7 +29,7 @@
and a client object for a test case to use in performing actions.
"""
- def __init__(self, username=None, password=None, tenant_name=None):
+ def __init__(self, credentials=None):
"""
We allow overriding of the credentials used within the various
client classes managed by the Manager object. Left as None, the
@@ -38,29 +38,18 @@
:param credentials: Override of the credentials
"""
self.auth_version = CONF.identity.auth_version
- # FIXME(andreaf) Change Manager __init__ to accept a credentials dict
- if username is None or password is None:
- # Tenant None is a valid use case
- self.credentials = self.get_default_credentials()
+ if credentials is None:
+ self.credentials = auth.get_default_credentials('user')
else:
- self.credentials = dict(username=username, password=password,
- tenant_name=tenant_name)
- if self.auth_version == 'v3':
- self.credentials['domain_name'] = 'Default'
+ self.credentials = credentials
+ # Check if passed or default credentials are valid
+ if not self.credentials.is_valid():
+ raise exceptions.InvalidCredentials()
# Creates an auth provider for the credentials
self.auth_provider = self.get_auth_provider(self.credentials)
# FIXME(andreaf) unused
self.client_attr_names = []
- # we do this everywhere, have it be part of the super class
- def _validate_credentials(self, username, password, tenant_name):
- if None in (username, password, tenant_name):
- msg = ("Missing required credentials. "
- "username: %(u)s, password: %(p)s, "
- "tenant_name: %(t)s" %
- {'u': username, 'p': password, 't': tenant_name})
- raise exceptions.InvalidConfiguration(msg)
-
@classmethod
def get_auth_provider_class(cls, auth_version):
if auth_version == 'v2':
@@ -68,20 +57,12 @@
else:
return auth.KeystoneV3AuthProvider
- def get_default_credentials(self):
- return dict(
- username=CONF.identity.username,
- password=CONF.identity.password,
- tenant_name=CONF.identity.tenant_name
- )
-
- def get_auth_provider(self, credentials=None):
- auth_params = dict(client_type=getattr(self, 'client_type', None),
- interface=getattr(self, 'interface', None))
+ def get_auth_provider(self, credentials):
+ if credentials is None:
+ raise exceptions.InvalidCredentials(
+ 'Credentials must be specified')
auth_provider_class = self.get_auth_provider_class(self.auth_version)
- # If invalid / incomplete credentials are provided, use default ones
- if credentials is None or \
- not auth_provider_class.check_credentials(credentials):
- credentials = self.credentials
- auth_params['credentials'] = credentials
- return auth_provider_class(**auth_params)
+ return auth_provider_class(
+ client_type=getattr(self, 'client_type', None),
+ interface=getattr(self, 'interface', None),
+ credentials=credentials)
diff --git a/tempest/openstack/common/__init__.py b/tempest/openstack/common/__init__.py
index e69de29..d1223ea 100644
--- a/tempest/openstack/common/__init__.py
+++ b/tempest/openstack/common/__init__.py
@@ -0,0 +1,17 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import six
+
+
+six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))
diff --git a/tempest/openstack/common/config/generator.py b/tempest/openstack/common/config/generator.py
index eeb5a32..8156cc5 100644
--- a/tempest/openstack/common/config/generator.py
+++ b/tempest/openstack/common/config/generator.py
@@ -1,4 +1,5 @@
# Copyright 2012 SINA Corporation
+# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -18,6 +19,7 @@
from __future__ import print_function
+import argparse
import imp
import os
import re
@@ -27,6 +29,7 @@
from oslo.config import cfg
import six
+import stevedore.named
from tempest.openstack.common import gettextutils
from tempest.openstack.common import importutils
@@ -38,6 +41,7 @@
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
+DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
@@ -46,11 +50,12 @@
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
+ DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
- FLOATOPT, LISTOPT,
+ FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
@@ -59,34 +64,60 @@
WORDWRAP_WIDTH = 60
-def generate(srcfiles):
+def raise_extension_exception(extmanager, ep, err):
+ raise
+
+
+def generate(argv):
+ parser = argparse.ArgumentParser(
+ description='generate sample configuration file',
+ )
+ parser.add_argument('-m', dest='modules', action='append')
+ parser.add_argument('-l', dest='libraries', action='append')
+ parser.add_argument('srcfiles', nargs='*')
+ parsed_args = parser.parse_args(argv)
+
mods_by_pkg = dict()
- for filepath in srcfiles:
+ for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
- pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
- pkg_names.sort()
- ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
- ext_names.sort()
+ pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
+ ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
- extra_modules = os.getenv("TEMPEST_CONFIG_GENERATOR_EXTRA_MODULES", "")
- if extra_modules:
- for module_name in extra_modules.split(','):
- module_name = module_name.strip()
+ if parsed_args.modules:
+ for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
+ # Look for entry points defined in libraries (or applications) for
+ # option discovery, and include their return values in the output.
+ #
+ # Each entry point should be a function returning an iterable
+ # of pairs with the group name (or None for the default group)
+ # and the list of Opt instances for that group.
+ if parsed_args.libraries:
+ loader = stevedore.named.NamedExtensionManager(
+ 'oslo.config.opts',
+ names=list(set(parsed_args.libraries)),
+ invoke_on_load=False,
+ on_load_failure_callback=raise_extension_exception
+ )
+ for ext in loader:
+ for group, opts in ext.plugin():
+ opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
+ opt_list.append((ext.name, opts))
+
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
@@ -120,7 +151,7 @@
def _is_in_group(opt, group):
"Check if opt is in group."
- for key, value in group._opts.items():
+ for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
@@ -134,7 +165,7 @@
return 'DEFAULT'
# what other groups is it in?
- for key, value in cfg.CONF.items():
+ for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
@@ -203,7 +234,7 @@
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
- elif value == socket.gethostname() and 'host' in name:
+ elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
return 'tempest'
elif value.strip() != value:
return '"%s"' % value
@@ -221,7 +252,8 @@
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
- opt_help += ' (' + OPT_TYPES[opt_type] + ')'
+ opt_help = u'%s (%s)' % (opt_help,
+ OPT_TYPES[opt_type])
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
@@ -251,6 +283,11 @@
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
+ elif opt_type == DICTOPT:
+ assert(isinstance(opt_default, dict))
+ opt_default_strlist = [str(key) + ':' + str(value)
+ for (key, value) in opt_default.items()]
+ print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
diff --git a/tempest/openstack/common/gettextutils.py b/tempest/openstack/common/gettextutils.py
index 825c2e0..17f66f7 100644
--- a/tempest/openstack/common/gettextutils.py
+++ b/tempest/openstack/common/gettextutils.py
@@ -23,14 +23,11 @@
"""
import copy
+import functools
import gettext
-import logging
+import locale
+from logging import handlers
import os
-import re
-try:
- import UserString as _userString
-except ImportError:
- import collections as _userString
from babel import localedata
import six
@@ -38,6 +35,17 @@
_localedir = os.environ.get('tempest'.upper() + '_LOCALEDIR')
_t = gettext.translation('tempest', localedir=_localedir, fallback=True)
+# We use separate translation catalogs for each log level, so set up a
+# mapping between the log level name and the translator. The domain
+# for the log level is project_name + "-log-" + log_level so messages
+# for each level end up in their own catalog.
+_t_log_levels = dict(
+ (level, gettext.translation('tempest' + '-log-' + level,
+ localedir=_localedir,
+ fallback=True))
+ for level in ['info', 'warning', 'error', 'critical']
+)
+
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
@@ -56,13 +64,35 @@
def _(msg):
if USE_LAZY:
- return Message(msg, 'tempest')
+ return Message(msg, domain='tempest')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
+def _log_translation(msg, level):
+ """Build a single translation of a log message
+ """
+ if USE_LAZY:
+ return Message(msg, domain='tempest' + '-log-' + level)
+ else:
+ translator = _t_log_levels[level]
+ if six.PY3:
+ return translator.gettext(msg)
+ return translator.ugettext(msg)
+
+# Translators for log levels.
+#
+# The abbreviated names are meant to reflect the usual use of a short
+# name like '_'. The "L" is for "log" and the other letter comes from
+# the level.
+_LI = functools.partial(_log_translation, level='info')
+_LW = functools.partial(_log_translation, level='warning')
+_LE = functools.partial(_log_translation, level='error')
+_LC = functools.partial(_log_translation, level='critical')
+
+
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
@@ -88,11 +118,6 @@
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
- #
- # Also included below is an example LocaleHandler that translates
- # Messages to an associated locale, effectively allowing many logs,
- # each with their own locale.
-
def _lazy_gettext(msg):
"""Create and return a Message object.
@@ -103,7 +128,7 @@
Message encapsulates a string so that we can translate
it later when needed.
"""
- return Message(msg, domain)
+ return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
@@ -118,182 +143,144 @@
unicode=True)
-class Message(_userString.UserString, object):
- """Class used to encapsulate translatable messages."""
- def __init__(self, msg, domain):
- # _msg is the gettext msgid and should never change
- self._msg = msg
- self._left_extra_msg = ''
- self._right_extra_msg = ''
- self._locale = None
- self.params = None
- self.domain = domain
+class Message(six.text_type):
+ """A Message object is a unicode object that can be translated.
- @property
- def data(self):
- # NOTE(mrodden): this should always resolve to a unicode string
- # that best represents the state of the message currently
+ Translation of Message is done explicitly using the translate() method.
+ For all non-translation intents and purposes, a Message is simply unicode,
+ and can be treated as such.
+ """
- localedir = os.environ.get(self.domain.upper() + '_LOCALEDIR')
- if self.locale:
- lang = gettext.translation(self.domain,
- localedir=localedir,
- languages=[self.locale],
- fallback=True)
- else:
- # use system locale for translations
- lang = gettext.translation(self.domain,
- localedir=localedir,
- fallback=True)
+ def __new__(cls, msgid, msgtext=None, params=None,
+ domain='tempest', *args):
+ """Create a new Message object.
+ In order for translation to work gettext requires a message ID, this
+ msgid will be used as the base unicode text. It is also possible
+ for the msgid and the base unicode text to be different by passing
+ the msgtext parameter.
+ """
+ # If the base msgtext is not given, we use the default translation
+ # of the msgid (which is in English) just in case the system locale is
+ # not English, so that the base text will be in that locale by default.
+ if not msgtext:
+ msgtext = Message._translate_msgid(msgid, domain)
+ # We want to initialize the parent unicode with the actual object that
+ # would have been plain unicode if 'Message' was not enabled.
+ msg = super(Message, cls).__new__(cls, msgtext)
+ msg.msgid = msgid
+ msg.domain = domain
+ msg.params = params
+ return msg
+
+ def translate(self, desired_locale=None):
+ """Translate this message to the desired locale.
+
+ :param desired_locale: The desired locale to translate the message to,
+ if no locale is provided the message will be
+ translated to the system's default locale.
+
+ :returns: the translated message in unicode
+ """
+
+ translated_message = Message._translate_msgid(self.msgid,
+ self.domain,
+ desired_locale)
+ if self.params is None:
+ # No need for more translation
+ return translated_message
+
+ # This Message object may have been formatted with one or more
+ # Message objects as substitution arguments, given either as a single
+ # argument, part of a tuple, or as one or more values in a dictionary.
+ # When translating this Message we need to translate those Messages too
+ translated_params = _translate_args(self.params, desired_locale)
+
+ translated_message = translated_message % translated_params
+
+ return translated_message
+
+ @staticmethod
+ def _translate_msgid(msgid, domain, desired_locale=None):
+ if not desired_locale:
+ system_locale = locale.getdefaultlocale()
+ # If the system locale is not available to the runtime use English
+ if not system_locale[0]:
+ desired_locale = 'en_US'
+ else:
+ desired_locale = system_locale[0]
+
+ locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
+ lang = gettext.translation(domain,
+ localedir=locale_dir,
+ languages=[desired_locale],
+ fallback=True)
if six.PY3:
- ugettext = lang.gettext
+ translator = lang.gettext
else:
- ugettext = lang.ugettext
+ translator = lang.ugettext
- full_msg = (self._left_extra_msg +
- ugettext(self._msg) +
- self._right_extra_msg)
-
- if self.params is not None:
- full_msg = full_msg % self.params
-
- return six.text_type(full_msg)
-
- @property
- def locale(self):
- return self._locale
-
- @locale.setter
- def locale(self, value):
- self._locale = value
- if not self.params:
- return
-
- # This Message object may have been constructed with one or more
- # Message objects as substitution parameters, given as a single
- # Message, or a tuple or Map containing some, so when setting the
- # locale for this Message we need to set it for those Messages too.
- if isinstance(self.params, Message):
- self.params.locale = value
- return
- if isinstance(self.params, tuple):
- for param in self.params:
- if isinstance(param, Message):
- param.locale = value
- return
- if isinstance(self.params, dict):
- for param in self.params.values():
- if isinstance(param, Message):
- param.locale = value
-
- def _save_dictionary_parameter(self, dict_param):
- full_msg = self.data
- # look for %(blah) fields in string;
- # ignore %% and deal with the
- # case where % is first character on the line
- keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', full_msg)
-
- # if we don't find any %(blah) blocks but have a %s
- if not keys and re.findall('(?:[^%]|^)%[a-z]', full_msg):
- # apparently the full dictionary is the parameter
- params = copy.deepcopy(dict_param)
- else:
- params = {}
- for key in keys:
- try:
- params[key] = copy.deepcopy(dict_param[key])
- except TypeError:
- # cast uncopyable thing to unicode string
- params[key] = six.text_type(dict_param[key])
-
- return params
-
- def _save_parameters(self, other):
- # we check for None later to see if
- # we actually have parameters to inject,
- # so encapsulate if our parameter is actually None
- if other is None:
- self.params = (other, )
- elif isinstance(other, dict):
- self.params = self._save_dictionary_parameter(other)
- else:
- # fallback to casting to unicode,
- # this will handle the problematic python code-like
- # objects that cannot be deep-copied
- try:
- self.params = copy.deepcopy(other)
- except TypeError:
- self.params = six.text_type(other)
-
- return self
-
- # overrides to be more string-like
- def __unicode__(self):
- return self.data
-
- def __str__(self):
- if six.PY3:
- return self.__unicode__()
- return self.data.encode('utf-8')
-
- def __getstate__(self):
- to_copy = ['_msg', '_right_extra_msg', '_left_extra_msg',
- 'domain', 'params', '_locale']
- new_dict = self.__dict__.fromkeys(to_copy)
- for attr in to_copy:
- new_dict[attr] = copy.deepcopy(self.__dict__[attr])
-
- return new_dict
-
- def __setstate__(self, state):
- for (k, v) in state.items():
- setattr(self, k, v)
-
- # operator overloads
- def __add__(self, other):
- copied = copy.deepcopy(self)
- copied._right_extra_msg += other.__str__()
- return copied
-
- def __radd__(self, other):
- copied = copy.deepcopy(self)
- copied._left_extra_msg += other.__str__()
- return copied
+ translated_message = translator(msgid)
+ return translated_message
def __mod__(self, other):
- # do a format string to catch and raise
- # any possible KeyErrors from missing parameters
- self.data % other
- copied = copy.deepcopy(self)
- return copied._save_parameters(other)
+ # When we mod a Message we want the actual operation to be performed
+ # by the parent class (i.e. unicode()), the only thing we do here is
+ # save the original msgid and the parameters in case of a translation
+ params = self._sanitize_mod_params(other)
+ unicode_mod = super(Message, self).__mod__(params)
+ modded = Message(self.msgid,
+ msgtext=unicode_mod,
+ params=params,
+ domain=self.domain)
+ return modded
- def __mul__(self, other):
- return self.data * other
+ def _sanitize_mod_params(self, other):
+ """Sanitize the object being modded with this Message.
- def __rmul__(self, other):
- return other * self.data
-
- def __getitem__(self, key):
- return self.data[key]
-
- def __getslice__(self, start, end):
- return self.data.__getslice__(start, end)
-
- def __getattribute__(self, name):
- # NOTE(mrodden): handle lossy operations that we can't deal with yet
- # These override the UserString implementation, since UserString
- # uses our __class__ attribute to try and build a new message
- # after running the inner data string through the operation.
- # At that point, we have lost the gettext message id and can just
- # safely resolve to a string instead.
- ops = ['capitalize', 'center', 'decode', 'encode',
- 'expandtabs', 'ljust', 'lstrip', 'replace', 'rjust', 'rstrip',
- 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill']
- if name in ops:
- return getattr(self.data, name)
+ - Add support for modding 'None' so translation supports it
+ - Trim the modded object, which can be a large dictionary, to only
+ those keys that would actually be used in a translation
+ - Snapshot the object being modded, in case the message is
+ translated, it will be used as it was when the Message was created
+ """
+ if other is None:
+ params = (other,)
+ elif isinstance(other, dict):
+ # Merge the dictionaries
+ # Copy each item in case one does not support deep copy.
+ params = {}
+ if isinstance(self.params, dict):
+ for key, val in self.params.items():
+ params[key] = self._copy_param(val)
+ for key, val in other.items():
+ params[key] = self._copy_param(val)
else:
- return _userString.UserString.__getattribute__(self, name)
+ params = self._copy_param(other)
+ return params
+
+ def _copy_param(self, param):
+ try:
+ return copy.deepcopy(param)
+ except Exception:
+ # Fallback to casting to unicode this will handle the
+ # python code-like objects that can't be deep-copied
+ return six.text_type(param)
+
+ def __add__(self, other):
+ msg = _('Message objects do not support addition.')
+ raise TypeError(msg)
+
+ def __radd__(self, other):
+ return self.__add__(other)
+
+ def __str__(self):
+ # NOTE(luisg): Logging in python 2.6 tries to str() log records,
+ # and it expects specifically a UnicodeError in order to proceed.
+ msg = _('Message objects do not support str() because they may '
+ 'contain non-ascii characters. '
+ 'Please use unicode() or translate() instead.')
+ raise UnicodeError(msg)
def get_available_languages(domain):
@@ -319,53 +306,143 @@
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
+
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
+
+ # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
+ # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
+ # are perfectly legitimate locales:
+ # https://github.com/mitsuhiko/babel/issues/37
+ # In Babel 1.3 they fixed the bug and they support these locales, but
+ # they are still not explicitly "listed" by locale_identifiers().
+ # That is why we add the locales here explicitly if necessary so that
+ # they are listed as supported.
+ aliases = {'zh': 'zh_CN',
+ 'zh_Hant_HK': 'zh_HK',
+ 'zh_Hant': 'zh_TW',
+ 'fil': 'tl_PH'}
+ for (locale, alias) in six.iteritems(aliases):
+ if locale in language_list and alias not in language_list:
+ language_list.append(alias)
+
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
-def get_localized_message(message, user_locale):
- """Gets a localized version of the given message in the given locale.
+def translate(obj, desired_locale=None):
+ """Gets the translated unicode representation of the given object.
- If the message is not a Message object the message is returned as-is.
- If the locale is None the message is translated to the default locale.
+ If the object is not translatable it is returned as-is.
+ If the locale is None the object is translated to the system locale.
- :returns: the translated message in unicode, or the original message if
+ :param obj: the object to translate
+ :param desired_locale: the locale to translate the message to, if None the
+ default system locale will be used
+ :returns: the translated object in unicode, or the original object if
it could not be translated
"""
- translated = message
+ message = obj
+ if not isinstance(message, Message):
+ # If the object to translate is not already translatable,
+ # let's first get its unicode representation
+ message = six.text_type(obj)
if isinstance(message, Message):
- original_locale = message.locale
- message.locale = user_locale
- translated = six.text_type(message)
- message.locale = original_locale
- return translated
+ # Even after unicoding() we still need to check if we are
+ # running with translatable unicode before translating
+ return message.translate(desired_locale)
+ return obj
-class LocaleHandler(logging.Handler):
- """Handler that can have a locale associated to translate Messages.
+def _translate_args(args, desired_locale=None):
+ """Translates all the translatable elements of the given arguments object.
- A quick example of how to utilize the Message class above.
- LocaleHandler takes a locale and a target logging.Handler object
- to forward LogRecord objects to after translating the internal Message.
+ This method is used for translating the translatable values in method
+ arguments which include values of tuples or dictionaries.
+ If the object is not a tuple or a dictionary the object itself is
+ translated if it is translatable.
+
+ If the locale is None the object is translated to the system locale.
+
+ :param args: the args to translate
+ :param desired_locale: the locale to translate the args to, if None the
+ default system locale will be used
+ :returns: a new args object with the translated contents of the original
+ """
+ if isinstance(args, tuple):
+ return tuple(translate(v, desired_locale) for v in args)
+ if isinstance(args, dict):
+ translated_dict = {}
+ for (k, v) in six.iteritems(args):
+ translated_v = translate(v, desired_locale)
+ translated_dict[k] = translated_v
+ return translated_dict
+ return translate(args, desired_locale)
+
+
+class TranslationHandler(handlers.MemoryHandler):
+ """Handler that translates records before logging them.
+
+ The TranslationHandler takes a locale and a target logging.Handler object
+ to forward LogRecord objects to after translating them. This handler
+ depends on Message objects being logged, instead of regular strings.
+
+ The handler can be configured declaratively in the logging.conf as follows:
+
+ [handlers]
+ keys = translatedlog, translator
+
+ [handler_translatedlog]
+ class = handlers.WatchedFileHandler
+ args = ('/var/log/api-localized.log',)
+ formatter = context
+
+ [handler_translator]
+ class = openstack.common.log.TranslationHandler
+ target = translatedlog
+ args = ('zh_CN',)
+
+ If the specified locale is not available in the system, the handler will
+ log in the default locale.
"""
- def __init__(self, locale, target):
- """Initialize a LocaleHandler
+ def __init__(self, locale=None, target=None):
+ """Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
- logging.Handler.__init__(self)
+ # NOTE(luisg): In order to allow this handler to be a wrapper for
+ # other handlers, such as a FileHandler, and still be able to
+ # configure it using logging.conf, this handler has to extend
+ # MemoryHandler because only the MemoryHandlers' logging.conf
+ # parsing is implemented such that it accepts a target handler.
+ handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
- self.target = target
+
+ def setFormatter(self, fmt):
+ self.target.setFormatter(fmt)
def emit(self, record):
- if isinstance(record.msg, Message):
- # set the locale and resolve to a string
- record.msg.locale = self.locale
+ # We save the message from the original record to restore it
+ # after translation, so other handlers are not affected by this
+ original_msg = record.msg
+ original_args = record.args
+
+ try:
+ self._translate_and_log_record(record)
+ finally:
+ record.msg = original_msg
+ record.args = original_args
+
+ def _translate_and_log_record(self, record):
+ record.msg = translate(record.msg, self.locale)
+
+ # In addition to translating the message, we also need to translate
+ # arguments that were passed to the log method that were not part
+ # of the main message e.g., log.info(_('Some message %s'), this_one))
+ record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
diff --git a/tempest/openstack/common/importutils.py b/tempest/openstack/common/importutils.py
index 4fd9ae2..6c0d3b2 100644
--- a/tempest/openstack/common/importutils.py
+++ b/tempest/openstack/common/importutils.py
@@ -58,6 +58,13 @@
return sys.modules[import_str]
+def import_versioned_module(version, submodule=None):
+ module = 'tempest.v%s' % version
+ if submodule:
+ module = '.'.join((module, submodule))
+ return import_module(module)
+
+
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index f06a850..0ef34c6 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -24,6 +24,7 @@
from novaclient import exceptions as nova_exceptions
from tempest.api.network import common as net_common
+from tempest import auth
from tempest import clients
from tempest.common import isolated_creds
from tempest.common.utils import data_utils
@@ -65,42 +66,42 @@
cls.__name__, tempest_client=False,
network_resources=cls.network_resources)
- username, password, tenant_name = cls.credentials()
-
cls.manager = clients.OfficialClientManager(
- username, password, tenant_name)
+ credentials=cls.credentials())
cls.compute_client = cls.manager.compute_client
cls.image_client = cls.manager.image_client
+ cls.baremetal_client = cls.manager.baremetal_client
cls.identity_client = cls.manager.identity_client
cls.network_client = cls.manager.network_client
cls.volume_client = cls.manager.volume_client
cls.object_storage_client = cls.manager.object_storage_client
cls.orchestration_client = cls.manager.orchestration_client
+ cls.data_processing_client = cls.manager.data_processing_client
cls.resource_keys = {}
cls.os_resources = []
@classmethod
- def _get_credentials(cls, get_creds, prefix):
+ def _get_credentials(cls, get_creds, ctype):
if CONF.compute.allow_tenant_isolation:
- username, tenant_name, password = get_creds()
+ creds = get_creds()
else:
- username = getattr(CONF.identity, prefix + 'username')
- password = getattr(CONF.identity, prefix + 'password')
- tenant_name = getattr(CONF.identity, prefix + 'tenant_name')
- return username, password, tenant_name
+ creds = auth.get_default_credentials(ctype)
+ return creds
@classmethod
def credentials(cls):
- return cls._get_credentials(cls.isolated_creds.get_primary_creds, '')
+ return cls._get_credentials(cls.isolated_creds.get_primary_creds,
+ 'user')
@classmethod
def alt_credentials(cls):
- return cls._get_credentials(cls.isolated_creds.get_alt_creds, 'alt_')
+ return cls._get_credentials(cls.isolated_creds.get_alt_creds,
+ 'alt_user')
@classmethod
def admin_credentials(cls):
return cls._get_credentials(cls.isolated_creds.get_admin_creds,
- 'admin_')
+ 'identity_admin')
@staticmethod
def cleanup_resource(resource, test_name):
@@ -283,7 +284,7 @@
return rules
def create_server(self, client=None, name=None, image=None, flavor=None,
- create_kwargs={}):
+ wait=True, create_kwargs={}):
if client is None:
client = self.compute_client
if name is None:
@@ -318,7 +319,8 @@
server = client.servers.create(name, image, flavor, **create_kwargs)
self.assertEqual(server.name, name)
self.set_resource(name, server)
- self.status_timeout(client.servers, server.id, 'ACTIVE')
+ if wait:
+ self.status_timeout(client.servers, server.id, 'ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
@@ -439,6 +441,82 @@
LOG.debug("image:%s" % self.image)
+class BaremetalScenarioTest(OfficialClientTest):
+ @classmethod
+ def setUpClass(cls):
+ super(BaremetalScenarioTest, cls).setUpClass()
+
+ if (not CONF.service_available.ironic or
+ not CONF.baremetal.driver_enabled):
+ msg = 'Ironic not available or Ironic compute driver not enabled'
+ raise cls.skipException(msg)
+
+ # use an admin client manager for baremetal client
+ admin_creds = cls.admin_credentials()
+ manager = clients.OfficialClientManager(credentials=admin_creds)
+ cls.baremetal_client = manager.baremetal_client
+
+ # allow any issues obtaining the node list to raise early
+ cls.baremetal_client.node.list()
+
+ def _node_state_timeout(self, node_id, state_attr,
+ target_states, timeout=10, interval=1):
+ if not isinstance(target_states, list):
+ target_states = [target_states]
+
+ def check_state():
+ node = self.get_node(node_id=node_id)
+ if getattr(node, state_attr) in target_states:
+ return True
+ return False
+
+ if not tempest.test.call_until_true(
+ check_state, timeout, interval):
+ msg = ("Timed out waiting for node %s to reach %s state(s) %s" %
+ (node_id, state_attr, target_states))
+ raise exceptions.TimeoutException(msg)
+
+ def wait_provisioning_state(self, node_id, state, timeout):
+ self._node_state_timeout(
+ node_id=node_id, state_attr='provision_state',
+ target_states=state, timeout=timeout)
+
+ def wait_power_state(self, node_id, state):
+ self._node_state_timeout(
+ node_id=node_id, state_attr='power_state',
+ target_states=state, timeout=CONF.baremetal.power_timeout)
+
+ def wait_node(self, instance_id):
+ """Waits for a node to be associated with instance_id."""
+ from ironicclient import exc as ironic_exceptions
+
+ def _get_node():
+ node = None
+ try:
+ node = self.get_node(instance_id=instance_id)
+ except ironic_exceptions.HTTPNotFound:
+ pass
+ return node is not None
+
+ if not tempest.test.call_until_true(
+ _get_node, CONF.baremetal.association_timeout, 1):
+ msg = ('Timed out waiting to get Ironic node by instance id %s'
+ % instance_id)
+ raise exceptions.TimeoutException(msg)
+
+ def get_node(self, node_id=None, instance_id=None):
+ if node_id:
+ return self.baremetal_client.node.get(node_id)
+ elif instance_id:
+ return self.baremetal_client.node.get_by_instance_uuid(instance_id)
+
+ def get_ports(self, node_id):
+ ports = []
+ for port in self.baremetal_client.node.list_ports(node_id):
+ ports.append(self.baremetal_client.port.get(port.uuid))
+ return ports
+
+
class NetworkScenarioTest(OfficialClientTest):
"""
Base class for network scenario tests
@@ -462,13 +540,7 @@
@classmethod
def setUpClass(cls):
super(NetworkScenarioTest, cls).setUpClass()
- if CONF.compute.allow_tenant_isolation:
- cls.tenant_id = cls.isolated_creds.get_primary_tenant().id
- else:
- cls.tenant_id = cls.manager._get_identity_client(
- CONF.identity.username,
- CONF.identity.password,
- CONF.identity.tenant_name).tenant_id
+ cls.tenant_id = cls.manager.identity_client.tenant_id
def _create_network(self, tenant_id, namestart='network-smoke-'):
name = data_utils.rand_name(namestart)
@@ -974,10 +1046,10 @@
@classmethod
def credentials(cls):
- username = CONF.identity.admin_username
- password = CONF.identity.admin_password
- tenant_name = CONF.identity.tenant_name
- return username, password, tenant_name
+ admin_creds = auth.get_default_credentials('identity_admin')
+ creds = auth.get_default_credentials('user')
+ admin_creds.tenant_name = creds.tenant_name
+ return admin_creds
def _load_template(self, base_file, file_name):
filepath = os.path.join(os.path.dirname(os.path.realpath(base_file)),
diff --git a/tempest/scenario/test_baremetal_basic_ops.py b/tempest/scenario/test_baremetal_basic_ops.py
new file mode 100644
index 0000000..c53aa83
--- /dev/null
+++ b/tempest/scenario/test_baremetal_basic_ops.py
@@ -0,0 +1,147 @@
+#
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import config
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+from tempest import test
+
+CONF = config.CONF
+
+LOG = logging.getLogger(__name__)
+
+
+# power/provision states as of icehouse
+class PowerStates(object):
+ """Possible power states of an Ironic node."""
+ POWER_ON = 'power on'
+ POWER_OFF = 'power off'
+ REBOOT = 'rebooting'
+ SUSPEND = 'suspended'
+
+
+class ProvisionStates(object):
+ """Possible provision states of an Ironic node."""
+ NOSTATE = None
+ INIT = 'initializing'
+ ACTIVE = 'active'
+ BUILDING = 'building'
+ DEPLOYWAIT = 'wait call-back'
+ DEPLOYING = 'deploying'
+ DEPLOYFAIL = 'deploy failed'
+ DEPLOYDONE = 'deploy complete'
+ DELETING = 'deleting'
+ DELETED = 'deleted'
+ ERROR = 'error'
+
+
+class BaremetalBasicOptsPXESSH(manager.BaremetalScenarioTest):
+ """
+ This smoke test tests the pxe_ssh Ironic driver. It follows this basic
+ set of operations:
+ * Creates a keypair
+ * Boots an instance using the keypair
+ * Monitors the associated Ironic node for power and
+ expected state transitions
+ * Validates Ironic node's driver_info has been properly
+ updated
+ * Validates Ironic node's port data has been properly updated
+ * Verifies SSH connectivity using created keypair via fixed IP
+ * Associates a floating ip
+ * Verifies SSH connectivity using created keypair via floating IP
+ * Deletes instance
+ * Monitors the associated Ironic node for power and
+ expected state transitions
+ """
+ def add_keypair(self):
+ self.keypair = self.create_keypair()
+
+ def add_floating_ip(self):
+ floating_ip = self.compute_client.floating_ips.create()
+ self.instance.add_floating_ip(floating_ip)
+ return floating_ip.ip
+
+ def verify_connectivity(self, ip=None):
+ if ip:
+ dest = self.get_remote_client(ip)
+ else:
+ dest = self.get_remote_client(self.instance)
+ dest.validate_authentication()
+
+ def validate_driver_info(self):
+ f_id = self.instance.flavor['id']
+ flavor_extra = self.compute_client.flavors.get(f_id).get_keys()
+ driver_info = self.node.driver_info
+ self.assertEqual(driver_info['pxe_deploy_kernel'],
+ flavor_extra['baremetal:deploy_kernel_id'])
+ self.assertEqual(driver_info['pxe_deploy_ramdisk'],
+ flavor_extra['baremetal:deploy_ramdisk_id'])
+ self.assertEqual(driver_info['pxe_image_source'],
+ self.instance.image['id'])
+
+ def validate_ports(self):
+ for port in self.get_ports(self.node.uuid):
+ n_port_id = port.extra['vif_port_id']
+ n_port = self.network_client.show_port(n_port_id)['port']
+ self.assertEqual(n_port['device_id'], self.instance.id)
+ self.assertEqual(n_port['mac_address'], port.address)
+
+ def boot_instance(self):
+ create_kwargs = {
+ 'key_name': self.keypair.id
+ }
+ self.instance = self.create_server(
+ wait=False, create_kwargs=create_kwargs)
+
+ self.set_resource('instance', self.instance)
+
+ self.wait_node(self.instance.id)
+ self.node = self.get_node(instance_id=self.instance.id)
+
+ self.wait_power_state(self.node.uuid, PowerStates.POWER_ON)
+
+ self.wait_provisioning_state(
+ self.node.uuid,
+ [ProvisionStates.DEPLOYWAIT, ProvisionStates.ACTIVE],
+ timeout=15)
+
+ self.wait_provisioning_state(self.node.uuid, ProvisionStates.ACTIVE,
+ timeout=CONF.baremetal.active_timeout)
+
+ self.status_timeout(
+ self.compute_client.servers, self.instance.id, 'ACTIVE')
+
+ self.node = self.get_node(instance_id=self.instance.id)
+ self.instance = self.compute_client.servers.get(self.instance.id)
+
+ def terminate_instance(self):
+ self.instance.delete()
+ self.remove_resource('instance')
+ self.wait_power_state(self.node.uuid, PowerStates.POWER_OFF)
+ self.wait_provisioning_state(
+ self.node.uuid,
+ ProvisionStates.NOSTATE,
+ timeout=CONF.baremetal.unprovision_timeout)
+
+ @test.services('baremetal', 'compute', 'image', 'network')
+ def test_baremetal_server_ops(self):
+ self.add_keypair()
+ self.boot_instance()
+ self.validate_driver_info()
+ self.validate_ports()
+ self.verify_connectivity()
+ floating_ip = self.add_floating_ip()
+ self.verify_connectivity(ip=floating_ip)
+ self.terminate_instance()
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index b7a30f8..0210c56 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -31,7 +31,7 @@
Test large operations.
This test below:
- * Spin up multiple instances in one nova call
+ * Spin up multiple instances in one nova call, and repeat three times
* as a regular user
* TODO: same thing for cinder
@@ -69,3 +69,5 @@
return
self.glance_image_create()
self.nova_boot()
+ self.nova_boot()
+ self.nova_boot()
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index ce2c66f..d771aed 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -17,7 +17,6 @@
import urllib
from tempest.api.network import common as net_common
-from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.scenario import manager
@@ -59,24 +58,45 @@
def setUpClass(cls):
super(TestLoadBalancerBasic, cls).setUpClass()
cls.check_preconditions()
- cls.security_groups = {}
cls.servers_keypairs = {}
cls.members = []
cls.floating_ips = {}
- cls.server_ip = None
- cls.vip_ip = None
+ cls.server_ips = {}
cls.port1 = 80
cls.port2 = 88
- def _create_security_groups(self):
- self.security_groups[self.tenant_id] =\
- self._create_security_group_neutron(tenant_id=self.tenant_id)
+ def setUp(self):
+ super(TestLoadBalancerBasic, self).setUp()
+ self.server_ips = {}
+ self._create_security_group()
- def _create_server(self):
- tenant_id = self.tenant_id
- name = data_utils.rand_name("smoke_server-")
+ def cleanup_wrapper(self, resource):
+ self.cleanup_resource(resource, self.__class__.__name__)
+
+ def _create_security_group(self):
+ self.security_group = self._create_security_group_neutron(
+ tenant_id=self.tenant_id)
+ self._create_security_group_rules_for_port(self.port1)
+ self._create_security_group_rules_for_port(self.port2)
+ self.addCleanup(self.cleanup_wrapper, self.security_group)
+
+ def _create_security_group_rules_for_port(self, port):
+ rule = {
+ 'direction': 'ingress',
+ 'protocol': 'tcp',
+ 'port_range_min': port,
+ 'port_range_max': port,
+ }
+ self._create_security_group_rule(
+ client=self.network_client,
+ secgroup=self.security_group,
+ tenant_id=self.tenant_id,
+ **rule)
+
+ def _create_server(self, name):
keypair = self.create_keypair(name='keypair-%s' % name)
- security_groups = [self.security_groups[tenant_id].name]
+ self.addCleanup(self.cleanup_wrapper, keypair)
+ security_groups = [self.security_group.name]
net = self._list_networks(tenant_id=self.tenant_id)[0]
create_kwargs = {
'nics': [
@@ -87,51 +107,106 @@
}
server = self.create_server(name=name,
create_kwargs=create_kwargs)
- self.servers_keypairs[server] = keypair
+ self.addCleanup(self.cleanup_wrapper, server)
+ self.servers_keypairs[server.id] = keypair
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
public_network_id = config.network.public_network_id
floating_ip = self._create_floating_ip(
server, public_network_id)
+ self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips[floating_ip] = server
- self.server_ip = floating_ip.floating_ip_address
+ self.server_ips[server.id] = floating_ip.floating_ip_address
else:
- self.server_ip = server.networks[net['name']][0]
+ self.server_ips[server.id] = server.networks[net['name']][0]
self.assertTrue(self.servers_keypairs)
return server
- def _start_servers(self, server):
+ def _create_servers(self):
+ for count in range(2):
+ self._create_server(name=("server%s" % (count + 1)))
+ self.assertEqual(len(self.servers_keypairs), 2)
+
+ def _start_servers(self):
"""
+ Start two backends
+
1. SSH to the instance
2. Start two http backends listening on ports 80 and 88 respectively
+ In case there are two instances, each backend is created on a separate
+ instance.
+
+ The backends are the inetd services. To start them we need to edit
+ /etc/inetd.conf in the following way:
+ www stream tcp nowait root /bin/sh sh /home/cirros/script_name
+
+ Where /home/cirros/script_name is a path to a script which
+ echoes the responses:
+ echo -e 'HTTP/1.0 200 OK\r\n\r\nserver_name
+
+ If we want the server to listen on port 88, then we use
+ "kerberos" instead of "www".
"""
- private_key = self.servers_keypairs[server].private_key
- ssh_client = self.get_remote_client(
- server_or_ip=self.server_ip,
- private_key=private_key).ssh_client
- start_server = "while true; do echo -e 'HTTP/1.0 200 OK\r\n\r\n" \
- "%(server)s' | sudo nc -l -p %(port)s ; done &"
- cmd = start_server % {'server': 'server1',
- 'port': self.port1}
- ssh_client.exec_command(cmd)
- cmd = start_server % {'server': 'server2',
- 'port': self.port2}
- ssh_client.exec_command(cmd)
+ for server_id, ip in self.server_ips.iteritems():
+ private_key = self.servers_keypairs[server_id].private_key
+ server_name = self.compute_client.servers.get(server_id).name
+ ssh_client = self.get_remote_client(
+ server_or_ip=ip,
+ private_key=private_key)
+ ssh_client.validate_authentication()
+ # Create service for inetd
+ create_script = """sudo sh -c "echo -e \\"echo -e 'HTTP/1.0 """ \
+ """200 OK\\\\\\r\\\\\\n\\\\\\r\\\\\\n""" \
+ """%(server)s'\\" >>/home/cirros/%(script)s\""""
- def _check_connection(self, check_ip):
- def try_connect(ip):
+ cmd = create_script % {
+ 'server': server_name,
+ 'script': 'script1'}
+ ssh_client.exec_command(cmd)
+ # Configure inetd
+ configure_inetd = """sudo sh -c "echo -e \\"%(service)s """ \
+ """stream tcp nowait root /bin/sh sh """ \
+ """/home/cirros/%(script)s\\" >> """ \
+ """/etc/inetd.conf\""""
+ # "www" stands for port 80
+ cmd = configure_inetd % {'service': 'www',
+ 'script': 'script1'}
+ ssh_client.exec_command(cmd)
+
+ if len(self.server_ips) == 1:
+ cmd = create_script % {'server': 'server2',
+ 'script': 'script2'}
+ ssh_client.exec_command(cmd)
+ # "kerberos" stands for port 88
+ cmd = configure_inetd % {'service': 'kerberos',
+ 'script': 'script2'}
+ ssh_client.exec_command(cmd)
+
+ # Get PIDs of inetd
+ pids = ssh_client.get_pids('inetd')
+ if pids != ['']:
+ # If there are any inetd processes, reload them
+ kill_cmd = "sudo kill -HUP %s" % ' '.join(pids)
+ ssh_client.exec_command(kill_cmd)
+ else:
+ # In other case start inetd
+ start_inetd = "sudo /usr/sbin/inetd /etc/inetd.conf"
+ ssh_client.exec_command(start_inetd)
+
+ def _check_connection(self, check_ip, port=80):
+ def try_connect(ip, port):
try:
- urllib.urlopen("http://{0}/".format(ip))
- return True
+ resp = urllib.urlopen("http://{0}:{1}/".format(ip, port))
+ if resp.getcode() == 200:
+ return True
+ return False
except IOError:
return False
timeout = config.compute.ping_timeout
- timer = 0
- while not try_connect(check_ip):
- time.sleep(1)
- timer += 1
- if timer >= timeout:
+ start = time.time()
+ while not try_connect(check_ip, port):
+ if (time.time() - start) > timeout:
message = "Timed out trying to connect to %s" % check_ip
raise exceptions.TimeoutException(message)
@@ -142,30 +217,37 @@
self.subnet = net_common.DeletableSubnet(client=self.network_client,
**subnet)
self.pool = super(TestLoadBalancerBasic, self)._create_pool(
- 'ROUND_ROBIN',
- 'HTTP',
- self.subnet.id)
+ lb_method='ROUND_ROBIN',
+ protocol='HTTP',
+ subnet_id=self.subnet.id)
+ self.addCleanup(self.cleanup_wrapper, self.pool)
self.assertTrue(self.pool)
- def _create_members(self, server_ids):
+ def _create_members(self):
"""
Create two members.
In case there is only one server, create both members with the same ip
but with different ports to listen on.
"""
- servers = self.compute_client.servers.list()
- for server in servers:
- if server.id in server_ids:
- ip = self.server_ip
- pool_id = self.pool.id
- if len(set(server_ids)) == 1 or len(servers) == 1:
- member1 = self._create_member(ip, self.port1, pool_id)
- member2 = self._create_member(ip, self.port2, pool_id)
- self.members.extend([member1, member2])
- else:
- member = self._create_member(ip, self.port1, pool_id)
- self.members.append(member)
+
+ for server_id, ip in self.server_ips.iteritems():
+ if len(self.server_ips) == 1:
+ member1 = self._create_member(address=ip,
+ protocol_port=self.port1,
+ pool_id=self.pool.id)
+ self.addCleanup(self.cleanup_wrapper, member1)
+ member2 = self._create_member(address=ip,
+ protocol_port=self.port2,
+ pool_id=self.pool.id)
+ self.addCleanup(self.cleanup_wrapper, member2)
+ self.members.extend([member1, member2])
+ else:
+ member = self._create_member(address=ip,
+ protocol_port=self.port1,
+ pool_id=self.pool.id)
+ self.addCleanup(self.cleanup_wrapper, member)
+ self.members.append(member)
self.assertTrue(self.members)
def _assign_floating_ip_to_vip(self, vip):
@@ -173,22 +255,23 @@
port_id = vip.port_id
floating_ip = self._create_floating_ip(vip, public_network_id,
port_id=port_id)
+ self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips.setdefault(vip.id, [])
self.floating_ips[vip.id].append(floating_ip)
def _create_load_balancer(self):
self._create_pool()
- self._create_members([self.servers_keypairs.keys()[0].id])
- subnet_id = self.subnet.id
- pool_id = self.pool.id
- self.vip = super(TestLoadBalancerBasic, self)._create_vip('HTTP', 80,
- subnet_id,
- pool_id)
- self._status_timeout(NeutronRetriever(self.network_client,
- self.network_client.vip_path,
- net_common.DeletableVip),
- self.vip.id,
- expected_status='ACTIVE')
+ self._create_members()
+ self.vip = self._create_vip(protocol='HTTP',
+ protocol_port=80,
+ subnet_id=self.subnet.id,
+ pool_id=self.pool.id)
+ self.addCleanup(self.cleanup_wrapper, self.vip)
+ self.status_timeout(NeutronRetriever(self.network_client,
+ self.network_client.vip_path,
+ net_common.DeletableVip),
+ self.vip.id,
+ expected_status='ACTIVE')
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
self._assign_floating_ip_to_vip(self.vip)
@@ -199,33 +282,50 @@
def _check_load_balancing(self):
"""
- 1. Send 10 requests on the floating ip associated with the VIP
+ 1. Send 100 requests on the floating ip associated with the VIP
2. Check that the requests are shared between
the two servers and that both of them get equal portions
of the requests
"""
self._check_connection(self.vip_ip)
+ resp = self._send_requests(self.vip_ip)
+ self.assertEqual(set(["server1\n", "server2\n"]), set(resp))
+ self.assertEqual(50, resp.count("server1\n"))
+ self.assertEqual(50, resp.count("server2\n"))
+
+ def _send_requests(self, vip_ip):
resp = []
- for count in range(10):
+ for count in range(100):
resp.append(
urllib.urlopen(
- "http://{0}/".format(self.vip_ip)).read())
- self.assertEqual(set(["server1\n", "server2\n"]), set(resp))
- self.assertEqual(5, resp.count("server1\n"))
- self.assertEqual(5, resp.count("server2\n"))
+ "http://{0}/".format(vip_ip)).read())
+ return resp
+ @test.skip_because(bug='1295165')
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_load_balancer_basic(self):
- self._create_security_groups()
- server = self._create_server()
- self._start_servers(server)
+ self._create_server('server1')
+ self._start_servers()
self._create_load_balancer()
self._check_load_balancing()
class NeutronRetriever(object):
+ """
+ Helper class to make possible handling neutron objects returned by GET
+ requests as attribute dicts.
+
+ Whet get() method is called, the returned dictionary is wrapped into
+ a corresponding DeletableResource class which provides attribute access
+ to dictionary values.
+
+ Usage:
+ This retriever is used to allow using status_timeout from
+ tempest.manager with Neutron objects.
+ """
+
def __init__(self, network_client, path, resource):
self.network_client = network_client
self.path = path
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 39b7760..24d2677 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -97,7 +97,7 @@
except Exception:
LOG.exception('ssh to server failed')
self._log_console_output()
- debug.log_ip_ns()
+ debug.log_net_debug()
raise
def check_partitions(self):
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
new file mode 100644
index 0000000..0ba65cf
--- /dev/null
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -0,0 +1,198 @@
+# Copyright 2014 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+from tempest.common import debug
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+from tempest.test import services
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class TestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
+
+ """
+ This test case checks VM connectivity after some advanced
+ instance operations executed:
+
+ * Stop/Start an instance
+ * Reboot an instance
+ * Rebuild an instance
+ * Pause/Unpause an instance
+ * Suspend/Resume an instance
+ * Resize an instance
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestNetworkAdvancedServerOps, cls).setUpClass()
+ cls.check_preconditions()
+ if not (CONF.network.tenant_networks_reachable
+ or CONF.network.public_network_id):
+ msg = ('Either tenant_networks_reachable must be "true", or '
+ 'public_network_id must be defined.')
+ cls.enabled = False
+ raise cls.skipException(msg)
+
+ def cleanup_wrapper(self, resource):
+ self.cleanup_resource(resource, self.__class__.__name__)
+
+ def setUp(self):
+ super(TestNetworkAdvancedServerOps, self).setUp()
+ key_name = data_utils.rand_name('keypair-smoke-')
+ self.keypair = self.create_keypair(name=key_name)
+ self.addCleanup(self.cleanup_wrapper, self.keypair)
+ security_group =\
+ self._create_security_group_neutron(tenant_id=self.tenant_id)
+ self.addCleanup(self.cleanup_wrapper, security_group)
+ network = self._create_network(self.tenant_id)
+ self.addCleanup(self.cleanup_wrapper, network)
+ router = self._get_router(self.tenant_id)
+ self.addCleanup(self.cleanup_wrapper, router)
+ subnet = self._create_subnet(network)
+ self.addCleanup(self.cleanup_wrapper, subnet)
+ subnet.add_to_router(router.id)
+ public_network_id = CONF.network.public_network_id
+ create_kwargs = {
+ 'nics': [
+ {'net-id': network.id},
+ ],
+ 'key_name': self.keypair.name,
+ 'security_groups': [security_group.name],
+ }
+ server_name = data_utils.rand_name('server-smoke-%d-')
+ self.server = self.create_server(name=server_name,
+ create_kwargs=create_kwargs)
+ self.addCleanup(self.cleanup_wrapper, self.server)
+ self.floating_ip = self._create_floating_ip(self.server,
+ public_network_id)
+ self.addCleanup(self.cleanup_wrapper, self.floating_ip)
+
+ def _check_tenant_network_connectivity(self, server,
+ username,
+ private_key,
+ should_connect=True):
+ if not CONF.network.tenant_networks_reachable:
+ msg = 'Tenant networks not configured to be reachable.'
+ LOG.info(msg)
+ return
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ try:
+ for net_name, ip_addresses in server.networks.iteritems():
+ for ip_address in ip_addresses:
+ self._check_vm_connectivity(ip_address,
+ username,
+ private_key,
+ should_connect=should_connect)
+ except Exception:
+ LOG.exception('Tenant network connectivity check failed')
+ self._log_console_output(servers=[server])
+ debug.log_ip_ns()
+ raise
+
+ def _check_public_network_connectivity(self, floating_ip,
+ username,
+ private_key,
+ should_connect=True):
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ try:
+ self._check_vm_connectivity(floating_ip, username, private_key,
+ should_connect=should_connect)
+ except Exception:
+ LOG.exception("Public network connectivity check failed")
+ debug.log_ip_ns()
+ raise
+
+ def _check_network_connectivity(self, should_connect=True):
+ username = CONF.compute.image_ssh_user
+ private_key = self.keypair.private_key
+ self._check_tenant_network_connectivity(self.server,
+ username,
+ private_key,
+ should_connect=should_connect)
+ floating_ip = self.floating_ip.floating_ip_address
+ self._check_public_network_connectivity(floating_ip,
+ username,
+ private_key,
+ should_connect=should_connect)
+
+ def _wait_server_status_and_check_network_connectivity(self):
+ self.status_timeout(self.compute_client.servers, self.server.id,
+ 'ACTIVE')
+ self._check_network_connectivity()
+
+ @services('compute', 'network')
+ def test_server_connectivity_stop_start(self):
+ self.server.stop()
+ self.status_timeout(self.compute_client.servers, self.server.id,
+ 'SHUTOFF')
+ self._check_network_connectivity(should_connect=False)
+ self.server.start()
+ self._wait_server_status_and_check_network_connectivity()
+
+ @services('compute', 'network')
+ def test_server_connectivity_reboot(self):
+ self.server.reboot()
+ self._wait_server_status_and_check_network_connectivity()
+
+ @services('compute', 'network')
+ def test_server_connectivity_rebuild(self):
+ image_ref_alt = CONF.compute.image_ref_alt
+ self.server.rebuild(image_ref_alt)
+ self._wait_server_status_and_check_network_connectivity()
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
+ @services('compute', 'network')
+ def test_server_connectivity_pause_unpause(self):
+ self.server.pause()
+ self.status_timeout(self.compute_client.servers, self.server.id,
+ 'PAUSED')
+ self._check_network_connectivity(should_connect=False)
+ self.server.unpause()
+ self._wait_server_status_and_check_network_connectivity()
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
+ @services('compute', 'network')
+ def test_server_connectivity_suspend_resume(self):
+ self.server.suspend()
+ self.status_timeout(self.compute_client.servers, self.server.id,
+ 'SUSPENDED')
+ self._check_network_connectivity(should_connect=False)
+ self.server.resume()
+ self._wait_server_status_and_check_network_connectivity()
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize is not available.')
+ @services('compute', 'network')
+ def test_server_connectivity_resize(self):
+ resize_flavor = CONF.compute.flavor_ref_alt
+ if resize_flavor == CONF.compute.flavor_ref:
+ msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
+ raise self.skipException(msg)
+ resize_flavor = CONF.compute.flavor_ref_alt
+ self.server.resize(resize_flavor)
+ self.status_timeout(self.compute_client.servers, self.server.id,
+ 'VERIFY_RESIZE')
+ self.server.confirm_resize()
+ self._wait_server_status_and_check_network_connectivity()
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 489b271..d5ab3d3 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -172,7 +172,7 @@
except Exception:
LOG.exception('Tenant connectivity check failed')
self._log_console_output(servers=self.servers.keys())
- debug.log_ip_ns()
+ debug.log_net_debug()
raise
def _create_and_associate_floating_ips(self):
@@ -204,7 +204,7 @@
ex_msg += ": " + msg
LOG.exception(ex_msg)
self._log_console_output(servers=self.servers.keys())
- debug.log_ip_ns()
+ debug.log_net_debug()
raise
def _disassociate_floating_ips(self):
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index d404dd1..4616b82 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -98,17 +98,10 @@
access point
"""
- def __init__(self, tenant_id, tenant_user, tenant_pass, tenant_name):
- self.manager = clients.OfficialClientManager(
- tenant_user,
- tenant_pass,
- tenant_name
- )
- self.keypair = None
- self.tenant_id = tenant_id
- self.tenant_name = tenant_name
- self.tenant_user = tenant_user
- self.tenant_pass = tenant_pass
+ def __init__(self, credentials):
+ self.manager = clients.OfficialClientManager(credentials)
+ # Credentials from manager are filled with both names and IDs
+ self.creds = self.manager.credentials
self.network = None
self.subnet = None
self.router = None
@@ -121,12 +114,14 @@
self.router = router
def _get_tenant_credentials(self):
- return self.tenant_user, self.tenant_pass, self.tenant_name
+ # FIXME(andreaf) Unused method
+ return self.creds
@classmethod
def check_preconditions(cls):
super(TestSecurityGroupsBasicOps, cls).check_preconditions()
- if (cls.alt_tenant_id is None) or (cls.tenant_id is cls.alt_tenant_id):
+ if (cls.alt_creds is None) or \
+ (cls.tenant_id is cls.alt_creds.tenant_id):
msg = 'No alt_tenant defined'
cls.enabled = False
raise cls.skipException(msg)
@@ -140,21 +135,20 @@
@classmethod
def setUpClass(cls):
super(TestSecurityGroupsBasicOps, cls).setUpClass()
- alt_creds = cls.alt_credentials()
- cls.alt_tenant_id = cls.manager._get_identity_client(
- *alt_creds
- ).tenant_id
+ cls.alt_creds = cls.alt_credentials()
+ cls.alt_manager = clients.OfficialClientManager(cls.alt_creds)
+ # Credentials from the manager are filled with both IDs and Names
+ cls.alt_creds = cls.alt_manager.credentials
cls.check_preconditions()
# TODO(mnewby) Consider looking up entities as needed instead
# of storing them as collections on the class.
cls.floating_ips = {}
cls.tenants = {}
- cls.primary_tenant = cls.TenantProperties(cls.tenant_id,
- *cls.credentials())
- cls.alt_tenant = cls.TenantProperties(cls.alt_tenant_id,
- *alt_creds)
+ creds = cls.credentials()
+ cls.primary_tenant = cls.TenantProperties(creds)
+ cls.alt_tenant = cls.TenantProperties(cls.alt_creds)
for tenant in [cls.primary_tenant, cls.alt_tenant]:
- cls.tenants[tenant.tenant_id] = tenant
+ cls.tenants[tenant.creds.tenant_id] = tenant
cls.floating_ip_access = not CONF.network.public_router_id
def cleanup_wrapper(self, resource):
@@ -175,14 +169,14 @@
def _create_tenant_security_groups(self, tenant):
access_sg = self._create_empty_security_group(
namestart='secgroup_access-',
- tenant_id=tenant.tenant_id
+ tenant_id=tenant.creds.tenant_id
)
self.addCleanup(self.cleanup_wrapper, access_sg)
# don't use default secgroup since it allows in-tenant traffic
def_sg = self._create_empty_security_group(
namestart='secgroup_general-',
- tenant_id=tenant.tenant_id
+ tenant_id=tenant.creds.tenant_id
)
self.addCleanup(self.cleanup_wrapper, def_sg)
tenant.security_groups.update(access=access_sg, default=def_sg)
@@ -239,7 +233,7 @@
],
'key_name': tenant.keypair.name,
'security_groups': security_groups,
- 'tenant_id': tenant.tenant_id
+ 'tenant_id': tenant.creds.tenant_id
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
self.addCleanup(self.cleanup_wrapper, server)
@@ -248,7 +242,7 @@
def _create_tenant_servers(self, tenant, num=1):
for i in range(num):
name = 'server-{tenant}-gen-{num}-'.format(
- tenant=tenant.tenant_name,
+ tenant=tenant.creds.tenant_name,
num=i
)
name = data_utils.rand_name(name)
@@ -262,8 +256,8 @@
workaround ip namespace
"""
secgroups = [sg.name for sg in tenant.security_groups.values()]
- name = 'server-{tenant}-access_point-'.format(tenant=tenant.tenant_name
- )
+ name = 'server-{tenant}-access_point-'.format(
+ tenant=tenant.creds.tenant_name)
name = data_utils.rand_name(name)
server = self._create_server(name, tenant,
security_groups=secgroups)
@@ -277,7 +271,7 @@
self.floating_ips.setdefault(server, floating_ip)
def _create_tenant_network(self, tenant):
- network, subnet, router = self._create_networks(tenant.tenant_id)
+ network, subnet, router = self._create_networks(tenant.creds.tenant_id)
for r in [network, router, subnet]:
self.addCleanup(self.cleanup_wrapper, r)
tenant.set_network(network, subnet, router)
@@ -300,7 +294,7 @@
tenant_id = tenant_or_id
else:
tenant = tenant_or_id
- tenant_id = tenant.tenant_id
+ tenant_id = tenant.creds.tenant_id
self._set_compute_context(tenant)
self._create_tenant_keypairs(tenant_id)
self._create_tenant_network(tenant)
@@ -335,15 +329,13 @@
if should_succeed:
msg = "Timed out waiting for %s to become reachable" % ip
else:
- # todo(yfried): remove this line when bug 1252620 is fixed
- return True
msg = "%s is reachable" % ip
try:
self.assertTrue(self._check_remote_connectivity(access_point, ip,
should_succeed),
msg)
except Exception:
- debug.log_ip_ns()
+ debug.log_net_debug()
raise
def _test_in_tenant_block(self, tenant):
@@ -422,11 +414,15 @@
access_point_ssh = self._connect_to_access_point(tenant)
mac_addr = access_point_ssh.get_mac_address()
mac_addr = mac_addr.strip().lower()
- port_list = self.network_client.list_ports()['ports']
+ # Get the fixed_ips and mac_address fields of all ports. Select
+ # only those two columns to reduce the size of the response.
+ port_list = self.network_client.list_ports(
+ fields=['fixed_ips', 'mac_address'])['ports']
port_detail_list = [
(port['fixed_ips'][0]['subnet_id'],
port['fixed_ips'][0]['ip_address'],
- port['mac_address'].lower()) for port in port_list
+ port['mac_address'].lower())
+ for port in port_list if port['fixed_ips']
]
server_ip = self._get_server_ip(tenant.access_point)
subnet_id = tenant.subnet.id
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index c0eb6e7..5a1dc04 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
@@ -37,16 +39,12 @@
cls.set_network_resources()
super(TestServerAdvancedOps, cls).setUpClass()
- if not CONF.compute_feature_enabled.resize:
- msg = "Skipping test - resize not available on this host"
- raise cls.skipException(msg)
-
- resize_flavor = CONF.compute.flavor_ref_alt
-
- if resize_flavor == CONF.compute.flavor_ref:
+ if CONF.compute.flavor_ref_alt == CONF.compute.flavor_ref:
msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
raise cls.skipException(msg)
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize is not available.')
@test.services('compute')
def test_resize_server_confirm(self):
# We create an instance for use in this test
@@ -65,6 +63,8 @@
self.status_timeout(
self.compute_client.servers, instance_id, 'ACTIVE')
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
@test.services('compute')
def test_server_sequence_suspend_resume(self):
# We create an instance for use in this test
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index d369f12..13e00a5 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -14,19 +14,17 @@
# under the License.
from tempest.common.utils import data_utils
-from tempest.common.utils import test_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
+from tempest.scenario import utils as test_utils
from tempest import test
-import testscenarios
-
CONF = config.CONF
LOG = logging.getLogger(__name__)
-load_tests = testscenarios.load_tests_apply_scenarios
+load_tests = test_utils.load_tests_input_scenario_utils
class TestServerBasicOps(manager.OfficialClientTest):
@@ -43,13 +41,6 @@
* Terminate the instance
"""
- scenario_utils = test_utils.InputScenarioUtils()
- scenario_flavor = scenario_utils.scenario_flavors
- scenario_image = scenario_utils.scenario_images
-
- scenarios = testscenarios.multiply_scenarios(scenario_image,
- scenario_flavor)
-
def setUp(self):
super(TestServerBasicOps, self).setUp()
# Setup image and flavor the test instance
@@ -99,42 +90,6 @@
create_kwargs=create_kwargs)
self.set_resource('instance', instance)
- def pause_server(self):
- instance = self.get_resource('instance')
- instance_id = instance.id
- LOG.debug("Pausing instance %s. Current status: %s",
- instance_id, instance.status)
- instance.pause()
- self.status_timeout(
- self.compute_client.servers, instance_id, 'PAUSED')
-
- def unpause_server(self):
- instance = self.get_resource('instance')
- instance_id = instance.id
- LOG.debug("Unpausing instance %s. Current status: %s",
- instance_id, instance.status)
- instance.unpause()
- self.status_timeout(
- self.compute_client.servers, instance_id, 'ACTIVE')
-
- def suspend_server(self):
- instance = self.get_resource('instance')
- instance_id = instance.id
- LOG.debug("Suspending instance %s. Current status: %s",
- instance_id, instance.status)
- instance.suspend()
- self.status_timeout(self.compute_client.servers,
- instance_id, 'SUSPENDED')
-
- def resume_server(self):
- instance = self.get_resource('instance')
- instance_id = instance.id
- LOG.debug("Resuming instance %s. Current status: %s",
- instance_id, instance.status)
- instance.resume()
- self.status_timeout(
- self.compute_client.servers, instance_id, 'ACTIVE')
-
def terminate_instance(self):
instance = self.get_resource('instance')
instance.delete()
@@ -149,10 +104,11 @@
instance.add_floating_ip(floating_ip)
# Check ssh
try:
- self.get_remote_client(
+ linux_client = self.get_remote_client(
server_or_ip=floating_ip.ip,
username=self.image_utils.ssh_user(self.image_ref),
- private_key=self.keypair.private)
+ private_key=self.keypair.private_key)
+ linux_client.validate_authentication()
except Exception:
LOG.exception('ssh to server failed')
self._log_console_output()
@@ -163,9 +119,5 @@
self.add_keypair()
self.create_security_group()
self.boot_instance()
- self.pause_server()
- self.unpause_server()
- self.suspend_server()
- self.resume_server()
self.verify_ssh()
self.terminate_instance()
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 128ec17..5235871 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -50,6 +50,13 @@
14. Check the existence of a file which created at 6. in volume2
"""
+ @classmethod
+ def setUpClass(cls):
+ super(TestStampPattern, cls).setUpClass()
+
+ if not CONF.volume_feature_enabled.snapshot:
+ raise cls.skipException("Cinder volume snapshots are disabled")
+
def _wait_for_volume_snapshot_status(self, volume_snapshot, status):
self.status_timeout(self.volume_client.volume_snapshots,
volume_snapshot.id, status)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 9803664..faca31f 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -35,6 +35,12 @@
* Boot an additional instance from the new snapshot based volume
* Check written content in the instance booted from snapshot
"""
+ @classmethod
+ def setUpClass(cls):
+ super(TestVolumeBootPattern, cls).setUpClass()
+
+ if not CONF.volume_feature_enabled.snapshot:
+ raise cls.skipException("Cinder volume snapshots are disabled")
def _create_volume_from_image(self):
img_uuid = CONF.compute.image_ref
@@ -53,7 +59,7 @@
'block_device_mapping': bd_map,
'key_name': keypair.name
}
- return self.create_server(create_kwargs=create_kwargs)
+ return self.create_server(image='', create_kwargs=create_kwargs)
def _create_snapshot_from_volume(self, vol_id):
volume_snapshots = self.volume_client.volume_snapshots
@@ -169,3 +175,15 @@
# deletion operations to succeed
self._stop_instances([instance_2nd, instance_from_snapshot])
self._detach_volumes([volume_origin, volume])
+
+
+class TestVolumeBootPatternV2(TestVolumeBootPattern):
+ def _boot_instance_from_volume(self, vol_id, keypair):
+ bdms = [{'uuid': vol_id, 'source_type': 'volume',
+ 'destination_type': 'volume', 'boot_index': 0,
+ 'delete_on_termination': False}]
+ create_kwargs = {
+ 'block_device_mapping_v2': bdms,
+ 'key_name': keypair.name
+ }
+ return self.create_server(image='', create_kwargs=create_kwargs)
diff --git a/tempest/common/utils/test_utils.py b/tempest/scenario/utils.py
similarity index 80%
rename from tempest/common/utils/test_utils.py
rename to tempest/scenario/utils.py
index cc0d831..e2adb34 100644
--- a/tempest/common/utils/test_utils.py
+++ b/tempest/scenario/utils.py
@@ -12,15 +12,20 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest import clients
-from tempest.common.utils import misc
-from tempest import config
import json
import re
import string
import unicodedata
+import testscenarios
+import testtools
+
+from tempest import auth
+from tempest import clients
+from tempest.common.utils import misc
+from tempest import config
+
CONF = config.CONF
@@ -35,9 +40,8 @@
self.non_ssh_image_pattern = \
CONF.input_scenario.non_ssh_image_regex
# Setup clients
- ocm = clients.OfficialClientManager(CONF.identity.username,
- CONF.identity.password,
- CONF.identity.tenant_name)
+ ocm = clients.OfficialClientManager(
+ auth.get_default_credentials('user'))
self.client = ocm.compute_client
def ssh_user(self, image_id):
@@ -79,7 +83,7 @@
class TestInputScenario(manager.OfficialClientTest):
- scenario_utils = test_utils.InputScenarioUtils()
+ scenario_utils = utils.InputScenarioUtils()
scenario_flavor = scenario_utils.scenario_flavors
scenario_image = scenario_utils.scenario_images
scenarios = testscenarios.multiply_scenarios(scenario_image,
@@ -95,9 +99,8 @@
digit=string.digits)
def __init__(self):
- ocm = clients.OfficialClientManager(CONF.identity.username,
- CONF.identity.password,
- CONF.identity.tenant_name)
+ ocm = clients.OfficialClientManager(
+ auth.get_default_credentials('user', fill_in=False))
self.client = ocm.compute_client
self.image_pattern = CONF.input_scenario.image_regex
self.flavor_pattern = CONF.input_scenario.flavor_regex
@@ -134,3 +137,22 @@
for f in flavors if re.search(self.flavor_pattern, str(f.name))
]
return self._scenario_flavors
+
+
+def load_tests_input_scenario_utils(*args):
+ """
+ Wrapper for testscenarios to set the scenarios to avoid running a getattr
+ on the CONF object at import.
+ """
+ if getattr(args[0], 'suiteClass', None) is not None:
+ loader, standard_tests, pattern = args
+ else:
+ standard_tests, module, loader = args
+ scenario_utils = InputScenarioUtils()
+ scenario_flavor = scenario_utils.scenario_flavors
+ scenario_image = scenario_utils.scenario_images
+ for test in testtools.iterate_tests(standard_tests):
+ setattr(test, 'scenarios', testscenarios.multiply_scenarios(
+ scenario_image,
+ scenario_flavor))
+ return testscenarios.load_tests_apply_scenarios(*args)
diff --git a/tempest/services/baremetal/base.py b/tempest/services/baremetal/base.py
index 5f6b513..2af287f 100644
--- a/tempest/services/baremetal/base.py
+++ b/tempest/services/baremetal/base.py
@@ -12,6 +12,7 @@
import functools
import json
+import urllib
import six
@@ -103,16 +104,19 @@
return patch
- def _list_request(self, resource, permanent=False):
+ def _list_request(self, resource, permanent=False, **kwargs):
"""
Get the list of objects of the specified type.
:param resource: The name of the REST resource, e.g., 'nodes'.
+ "param **kw: Parameters for the request.
:return: A tuple with the server response and deserialized JSON list
of objects
"""
uri = self._get_uri(resource, permanent=permanent)
+ if kwargs:
+ uri += "?%s" % urllib.urlencode(kwargs)
resp, body = self.get(uri)
diff --git a/tempest/services/baremetal/v1/base_v1.py b/tempest/services/baremetal/v1/base_v1.py
index 3f4c509..296a199 100644
--- a/tempest/services/baremetal/v1/base_v1.py
+++ b/tempest/services/baremetal/v1/base_v1.py
@@ -37,9 +37,24 @@
return self._list_request('chassis')
@base.handle_errors
- def list_ports(self):
+ def list_ports(self, **kwargs):
"""List all existing ports."""
- return self._list_request('ports')
+ return self._list_request('ports', **kwargs)
+
+ @base.handle_errors
+ def list_nodestates(self, uuid):
+ """List all existing states."""
+ return self._list_request('/nodes/%s/states' % uuid)
+
+ @base.handle_errors
+ def list_ports_detail(self):
+ """Details list all existing ports."""
+ return self._list_request('/ports/detail')
+
+ @base.handle_errors
+ def list_drivers(self):
+ """List all existing drivers."""
+ return self._list_request('drivers')
@base.handle_errors
def show_node(self, uuid):
@@ -116,12 +131,20 @@
Create a port with the specified parameters.
:param node_id: The ID of the node which owns the port.
- :param address: MAC address of the port. Default: 01:23:45:67:89:0A.
+ :param address: MAC address of the port.
+ :param extra: Meta data of the port. Default: {'foo': 'bar'}.
+ :param uuid: UUID of the port.
:return: A tuple with the server response and the created port.
"""
- port = {'address': kwargs.get('address', '01:23:45:67:89:0A'),
- 'node_uuid': node_id}
+ port = {'extra': kwargs.get('extra', {'foo': 'bar'}),
+ 'uuid': kwargs['uuid']}
+
+ if node_id is not None:
+ port['node_uuid'] = node_id
+
+ if kwargs['address'] is not None:
+ port['address'] = kwargs['address']
return self._create_request('ports', 'port', port)
@@ -192,15 +215,14 @@
return self._patch_request('chassis', uuid, patch)
@base.handle_errors
- def update_port(self, uuid, **kwargs):
+ def update_port(self, uuid, patch):
"""
Update the specified port.
:param uuid: The unique identifier of the port.
+ :param patch: List of dicts representing json patches.
:return: A tuple with the server response and the updated port.
"""
- port_attributes = ('address',)
- patch = self._make_patch(port_attributes, **kwargs)
return self._patch_request('ports', uuid, patch)
diff --git a/tempest/services/botoclients.py b/tempest/services/botoclients.py
index b52d48c..7616a99 100644
--- a/tempest/services/botoclients.py
+++ b/tempest/services/botoclients.py
@@ -179,19 +179,6 @@
'revoke_security_group',
'revoke_security_group_egress'))
- def get_good_zone(self):
- """
- :rtype: BaseString
- :return: Returns with the first available zone name
- """
- for zone in self.get_all_zones():
- # NOTE(afazekas): zone.region_name was None
- if (zone.state == "available" and
- zone.region.name == self.connection_data["region"].name):
- return zone.name
- else:
- raise IndexError("Don't have a good zone")
-
class ObjectClientS3(BotoClientBase):
diff --git a/tempest/services/compute/json/agents_client.py b/tempest/services/compute/json/agents_client.py
new file mode 100644
index 0000000..98d8896
--- /dev/null
+++ b/tempest/services/compute/json/agents_client.py
@@ -0,0 +1,61 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import urllib
+
+from tempest.api_schema.compute import agents as common_schema
+from tempest.api_schema.compute.v2 import agents as schema
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class AgentsClientJSON(rest_client.RestClient):
+ """
+ Tests Agents API
+ """
+
+ def __init__(self, auth_provider):
+ super(AgentsClientJSON, self).__init__(auth_provider)
+ self.service = CONF.compute.catalog_type
+
+ def list_agents(self, params=None):
+ """List all agent builds."""
+ url = 'os-agents'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.validate_response(common_schema.list_agents, resp, body)
+ return resp, body['agents']
+
+ def create_agent(self, **kwargs):
+ """Create an agent build."""
+ post_body = json.dumps({'agent': kwargs})
+ resp, body = self.post('os-agents', post_body)
+ return resp, self._parse_resp(body)
+
+ def delete_agent(self, agent_id):
+ """Delete an existing agent build."""
+ resp, body = self.delete("os-agents/%s" % str(agent_id))
+ self.validate_response(schema.delete_agent, resp, body)
+ return resp, body
+
+ def update_agent(self, agent_id, **kwargs):
+ """Update an agent build."""
+ put_body = json.dumps({'para': kwargs})
+ resp, body = self.put('os-agents/%s' % str(agent_id), put_body)
+ return resp, self._parse_resp(body)
diff --git a/tempest/services/compute/json/aggregates_client.py b/tempest/services/compute/json/aggregates_client.py
index 700a29b..71d6f63 100644
--- a/tempest/services/compute/json/aggregates_client.py
+++ b/tempest/services/compute/json/aggregates_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute import aggregates as schema
+from tempest.api_schema.compute.v2 import aggregates as v2_schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -32,12 +34,14 @@
"""Get aggregate list."""
resp, body = self.get("os-aggregates")
body = json.loads(body)
+ self.validate_response(schema.list_aggregates, resp, body)
return resp, body['aggregates']
def get_aggregate(self, aggregate_id):
"""Get details of the given aggregate."""
resp, body = self.get("os-aggregates/%s" % str(aggregate_id))
body = json.loads(body)
+ self.validate_response(schema.get_aggregate, resp, body)
return resp, body['aggregate']
def create_aggregate(self, **kwargs):
@@ -46,6 +50,7 @@
resp, body = self.post('os-aggregates', post_body)
body = json.loads(body)
+ self.validate_response(v2_schema.create_aggregate, resp, body)
return resp, body['aggregate']
def update_aggregate(self, aggregate_id, name, availability_zone=None):
@@ -58,11 +63,14 @@
resp, body = self.put('os-aggregates/%s' % str(aggregate_id), put_body)
body = json.loads(body)
+ self.validate_response(schema.update_aggregate, resp, body)
return resp, body['aggregate']
def delete_aggregate(self, aggregate_id):
"""Deletes the given aggregate."""
- return self.delete("os-aggregates/%s" % str(aggregate_id))
+ resp, body = self.delete("os-aggregates/%s" % str(aggregate_id))
+ self.validate_response(v2_schema.delete_aggregate, resp, body)
+ return resp, body
def is_resource_deleted(self, id):
try:
@@ -80,6 +88,7 @@
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
+ self.validate_response(schema.aggregate_add_remove_host, resp, body)
return resp, body['aggregate']
def remove_host(self, aggregate_id, host):
@@ -91,6 +100,7 @@
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
+ self.validate_response(schema.aggregate_add_remove_host, resp, body)
return resp, body['aggregate']
def set_metadata(self, aggregate_id, meta):
@@ -102,4 +112,5 @@
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
+ self.validate_response(schema.aggregate_set_metadata, resp, body)
return resp, body['aggregate']
diff --git a/tempest/services/compute/json/availability_zone_client.py b/tempest/services/compute/json/availability_zone_client.py
index 9278d5b..1c067e8 100644
--- a/tempest/services/compute/json/availability_zone_client.py
+++ b/tempest/services/compute/json/availability_zone_client.py
@@ -15,6 +15,7 @@
import json
+from tempest.api_schema.compute.v2 import availability_zone as schema
from tempest.common import rest_client
from tempest import config
@@ -31,9 +32,12 @@
def get_availability_zone_list(self):
resp, body = self.get('os-availability-zone')
body = json.loads(body)
+ self.validate_response(schema.get_availability_zone_list, resp, body)
return resp, body['availabilityZoneInfo']
def get_availability_zone_list_detail(self):
resp, body = self.get('os-availability-zone/detail')
body = json.loads(body)
+ self.validate_response(schema.get_availability_zone_list_detail, resp,
+ body)
return resp, body['availabilityZoneInfo']
diff --git a/tempest/services/compute/json/certificates_client.py b/tempest/services/compute/json/certificates_client.py
index c05e352..1d04628 100644
--- a/tempest/services/compute/json/certificates_client.py
+++ b/tempest/services/compute/json/certificates_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute import certificates as schema
+from tempest.api_schema.compute.v2 import certificates as v2schema
from tempest.common import rest_client
from tempest import config
@@ -31,6 +33,7 @@
url = "os-certificates/%s" % (id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_certificate, resp, body)
return resp, body['certificate']
def create_certificate(self):
@@ -38,4 +41,5 @@
url = "os-certificates"
resp, body = self.post(url, None)
body = json.loads(body)
+ self.validate_response(v2schema.create_certificate, resp, body)
return resp, body['certificate']
diff --git a/tempest/services/compute/json/extensions_client.py b/tempest/services/compute/json/extensions_client.py
index 5ad8b98..ed2b14d 100644
--- a/tempest/services/compute/json/extensions_client.py
+++ b/tempest/services/compute/json/extensions_client.py
@@ -15,6 +15,7 @@
import json
+from tempest.api_schema.compute.v2 import extensions as schema
from tempest.common import rest_client
from tempest import config
@@ -31,6 +32,7 @@
url = 'extensions'
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_extensions, resp, body)
return resp, body['extensions']
def is_enabled(self, extension):
diff --git a/tempest/services/compute/json/fixed_ips_client.py b/tempest/services/compute/json/fixed_ips_client.py
index 8b2c6c9..f2d5cbe 100644
--- a/tempest/services/compute/json/fixed_ips_client.py
+++ b/tempest/services/compute/json/fixed_ips_client.py
@@ -15,6 +15,7 @@
import json
+from tempest.api_schema.compute.v2 import fixed_ips as schema
from tempest.common import rest_client
from tempest import config
@@ -31,10 +32,12 @@
url = "os-fixed-ips/%s" % (fixed_ip)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.fixed_ips, resp, body)
return resp, body['fixed_ip']
def reserve_fixed_ip(self, ip, body):
"""This reserves and unreserves fixed ips."""
url = "os-fixed-ips/%s/action" % (ip)
resp, body = self.post(url, json.dumps(body))
+ self.validate_response(schema.fixed_ip_action, resp, body)
return resp, body
diff --git a/tempest/services/compute/json/flavors_client.py b/tempest/services/compute/json/flavors_client.py
index a8111af..89cbe1d 100644
--- a/tempest/services/compute/json/flavors_client.py
+++ b/tempest/services/compute/json/flavors_client.py
@@ -16,6 +16,11 @@
import json
import urllib
+from tempest.api_schema.compute import flavors as common_schema
+from tempest.api_schema.compute import flavors_access as schema_access
+from tempest.api_schema.compute import flavors_extra_specs \
+ as schema_extra_specs
+from tempest.api_schema.compute.v2 import flavors as v2schema
from tempest.common import rest_client
from tempest import config
@@ -35,6 +40,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(common_schema.list_flavors, resp, body)
return resp, body['flavors']
def list_flavors_with_detail(self, params=None):
@@ -44,11 +50,13 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(v2schema.list_flavors_details, resp, body)
return resp, body['flavors']
def get_flavor_details(self, flavor_id):
resp, body = self.get("flavors/%s" % str(flavor_id))
body = json.loads(body)
+ self.validate_response(v2schema.create_get_flavor_details, resp, body)
return resp, body['flavor']
def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
@@ -72,11 +80,14 @@
resp, body = self.post('flavors', post_body)
body = json.loads(body)
+ self.validate_response(v2schema.create_get_flavor_details, resp, body)
return resp, body['flavor']
def delete_flavor(self, flavor_id):
"""Deletes the given flavor."""
- return self.delete("flavors/%s" % str(flavor_id))
+ resp, body = self.delete("flavors/{0}".format(flavor_id))
+ self.validate_response(v2schema.delete_flavor, resp, body)
+ return resp, body
def is_resource_deleted(self, id):
# Did not use get_flavor_details(id) for verification as it gives
@@ -94,12 +105,16 @@
resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,
post_body)
body = json.loads(body)
+ self.validate_response(schema_extra_specs.flavor_extra_specs,
+ resp, body)
return resp, body['extra_specs']
def get_flavor_extra_spec(self, flavor_id):
"""Gets extra Specs details of the mentioned flavor."""
resp, body = self.get('flavors/%s/os-extra_specs' % flavor_id)
body = json.loads(body)
+ self.validate_response(schema_extra_specs.flavor_extra_specs,
+ resp, body)
return resp, body['extra_specs']
def get_flavor_extra_spec_with_key(self, flavor_id, key):
@@ -107,6 +122,8 @@
resp, body = self.get('flavors/%s/os-extra_specs/%s' % (str(flavor_id),
key))
body = json.loads(body)
+ self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+ resp, body)
return resp, body
def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
@@ -114,17 +131,23 @@
resp, body = self.put('flavors/%s/os-extra_specs/%s' %
(flavor_id, key), json.dumps(kwargs))
body = json.loads(body)
+ self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+ resp, body)
return resp, body
def unset_flavor_extra_spec(self, flavor_id, key):
"""Unsets extra Specs from the mentioned flavor."""
- return self.delete('flavors/%s/os-extra_specs/%s' % (str(flavor_id),
- key))
+ resp, body = self.delete('flavors/%s/os-extra_specs/%s' %
+ (str(flavor_id), key))
+ self.validate_response(v2schema.unset_flavor_extra_specs, resp, body)
+ return resp, body
def list_flavor_access(self, flavor_id):
"""Gets flavor access information given the flavor id."""
resp, body = self.get('flavors/%s/os-flavor-access' % flavor_id)
body = json.loads(body)
+ self.validate_response(schema_access.add_remove_list_flavor_access,
+ resp, body)
return resp, body['flavor_access']
def add_flavor_access(self, flavor_id, tenant_id):
@@ -137,6 +160,8 @@
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
+ self.validate_response(schema_access.add_remove_list_flavor_access,
+ resp, body)
return resp, body['flavor_access']
def remove_flavor_access(self, flavor_id, tenant_id):
@@ -149,4 +174,6 @@
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
+ self.validate_response(schema_access.add_remove_list_flavor_access,
+ resp, body)
return resp, body['flavor_access']
diff --git a/tempest/services/compute/json/floating_ips_client.py b/tempest/services/compute/json/floating_ips_client.py
index 42487c3..e2e12d5 100644
--- a/tempest/services/compute/json/floating_ips_client.py
+++ b/tempest/services/compute/json/floating_ips_client.py
@@ -16,6 +16,7 @@
import json
import urllib
+from tempest.api_schema.compute.v2 import floating_ips as schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -36,6 +37,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_floating_ips, resp, body)
return resp, body['floating_ips']
def get_floating_ip_details(self, floating_ip_id):
@@ -45,6 +47,7 @@
body = json.loads(body)
if resp.status == 404:
raise exceptions.NotFound(body)
+ self.validate_response(schema.floating_ip, resp, body)
return resp, body['floating_ip']
def create_floating_ip(self, pool_name=None):
@@ -54,12 +57,14 @@
post_body = json.dumps(post_body)
resp, body = self.post(url, post_body)
body = json.loads(body)
+ self.validate_response(schema.floating_ip, resp, body)
return resp, body['floating_ip']
def delete_floating_ip(self, floating_ip_id):
"""Deletes the provided floating IP from the project."""
url = "os-floating-ips/%s" % str(floating_ip_id)
resp, body = self.delete(url)
+ self.validate_response(schema.add_remove_floating_ip, resp, body)
return resp, body
def associate_floating_ip_to_server(self, floating_ip, server_id):
@@ -73,6 +78,7 @@
post_body = json.dumps(post_body)
resp, body = self.post(url, post_body)
+ self.validate_response(schema.add_remove_floating_ip, resp, body)
return resp, body
def disassociate_floating_ip_from_server(self, floating_ip, server_id):
@@ -86,6 +92,7 @@
post_body = json.dumps(post_body)
resp, body = self.post(url, post_body)
+ self.validate_response(schema.add_remove_floating_ip, resp, body)
return resp, body
def is_resource_deleted(self, id):
@@ -103,4 +110,5 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.floating_ip_pools, resp, body)
return resp, body['floating_ip_pools']
diff --git a/tempest/services/compute/json/hosts_client.py b/tempest/services/compute/json/hosts_client.py
index fb45997..342f946 100644
--- a/tempest/services/compute/json/hosts_client.py
+++ b/tempest/services/compute/json/hosts_client.py
@@ -15,6 +15,8 @@
import json
import urllib
+from tempest.api_schema.compute import hosts as schema
+from tempest.api_schema.compute.v2 import hosts as v2_schema
from tempest.common import rest_client
from tempest import config
@@ -36,6 +38,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_hosts, resp, body)
return resp, body['hosts']
def show_host_detail(self, hostname):
@@ -43,6 +46,7 @@
resp, body = self.get("os-hosts/%s" % str(hostname))
body = json.loads(body)
+ self.validate_response(schema.show_host_detail, resp, body)
return resp, body['host']
def update_host(self, hostname, **kwargs):
@@ -57,6 +61,7 @@
resp, body = self.put("os-hosts/%s" % str(hostname), request_body)
body = json.loads(body)
+ self.validate_response(v2_schema.update_host, resp, body)
return resp, body
def startup_host(self, hostname):
@@ -64,6 +69,7 @@
resp, body = self.get("os-hosts/%s/startup" % str(hostname))
body = json.loads(body)
+ self.validate_response(v2_schema.startup_host, resp, body)
return resp, body['host']
def shutdown_host(self, hostname):
@@ -71,6 +77,7 @@
resp, body = self.get("os-hosts/%s/shutdown" % str(hostname))
body = json.loads(body)
+ self.validate_response(v2_schema.shutdown_host, resp, body)
return resp, body['host']
def reboot_host(self, hostname):
@@ -78,4 +85,5 @@
resp, body = self.get("os-hosts/%s/reboot" % str(hostname))
body = json.loads(body)
+ self.validate_response(v2_schema.reboot_host, resp, body)
return resp, body['host']
diff --git a/tempest/services/compute/json/hypervisor_client.py b/tempest/services/compute/json/hypervisor_client.py
index c6b13b0..30228b3 100644
--- a/tempest/services/compute/json/hypervisor_client.py
+++ b/tempest/services/compute/json/hypervisor_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute import hypervisors as common_schema
+from tempest.api_schema.compute.v2 import hypervisors as v2schema
from tempest.common import rest_client
from tempest import config
@@ -31,40 +33,51 @@
"""List hypervisors information."""
resp, body = self.get('os-hypervisors')
body = json.loads(body)
+ self.validate_response(common_schema.common_hypervisors_detail,
+ resp, body)
return resp, body['hypervisors']
def get_hypervisor_list_details(self):
"""Show detailed hypervisors information."""
resp, body = self.get('os-hypervisors/detail')
body = json.loads(body)
+ self.validate_response(common_schema.common_list_hypervisors_detail,
+ resp, body)
return resp, body['hypervisors']
def get_hypervisor_show_details(self, hyper_id):
"""Display the details of the specified hypervisor."""
resp, body = self.get('os-hypervisors/%s' % hyper_id)
body = json.loads(body)
+ self.validate_response(common_schema.common_show_hypervisor,
+ resp, body)
return resp, body['hypervisor']
def get_hypervisor_servers(self, hyper_name):
"""List instances belonging to the specified hypervisor."""
resp, body = self.get('os-hypervisors/%s/servers' % hyper_name)
body = json.loads(body)
+ self.validate_response(v2schema.hypervisors_servers, resp, body)
return resp, body['hypervisors']
def get_hypervisor_stats(self):
"""Get hypervisor statistics over all compute nodes."""
resp, body = self.get('os-hypervisors/statistics')
body = json.loads(body)
+ self.validate_response(common_schema.hypervisor_statistics, resp, body)
return resp, body['hypervisor_statistics']
def get_hypervisor_uptime(self, hyper_id):
"""Display the uptime of the specified hypervisor."""
resp, body = self.get('os-hypervisors/%s/uptime' % hyper_id)
body = json.loads(body)
+ self.validate_response(common_schema.hypervisor_uptime, resp, body)
return resp, body['hypervisor']
def search_hypervisor(self, hyper_name):
"""Search specified hypervisor."""
resp, body = self.get('os-hypervisors/%s/search' % hyper_name)
body = json.loads(body)
+ self.validate_response(common_schema.common_hypervisors_detail,
+ resp, body)
return resp, body['hypervisors']
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index 5a79a29..af7752a 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -16,6 +16,7 @@
import json
import urllib
+from tempest.api_schema.compute.v2 import images as schema
from tempest.common import rest_client
from tempest.common import waiters
from tempest import config
@@ -47,6 +48,7 @@
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
+ self.validate_response(schema.create_image, resp, body)
return resp, body
def list_images(self, params=None):
@@ -57,6 +59,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_images, resp, body)
return resp, body['images']
def list_images_with_detail(self, params=None):
@@ -67,6 +70,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_images_details, resp, body)
return resp, body['images']
def get_image(self, image_id):
@@ -74,11 +78,14 @@
resp, body = self.get("images/%s" % str(image_id))
self.expected_success(200, resp)
body = json.loads(body)
+ self.validate_response(schema.get_image, resp, body)
return resp, body['image']
def delete_image(self, image_id):
"""Deletes the provided image."""
- return self.delete("images/%s" % str(image_id))
+ resp, body = self.delete("images/%s" % str(image_id))
+ self.validate_response(schema.delete, resp, body)
+ return resp, body
def wait_for_image_status(self, image_id, status):
"""Waits for an image to reach a given status."""
@@ -88,6 +95,7 @@
"""Lists all metadata items for an image."""
resp, body = self.get("images/%s/metadata" % str(image_id))
body = json.loads(body)
+ self.validate_response(schema.image_metadata, resp, body)
return resp, body['metadata']
def set_image_metadata(self, image_id, meta):
@@ -95,6 +103,7 @@
post_body = json.dumps({'metadata': meta})
resp, body = self.put('images/%s/metadata' % str(image_id), post_body)
body = json.loads(body)
+ self.validate_response(schema.image_metadata, resp, body)
return resp, body['metadata']
def update_image_metadata(self, image_id, meta):
@@ -102,12 +111,14 @@
post_body = json.dumps({'metadata': meta})
resp, body = self.post('images/%s/metadata' % str(image_id), post_body)
body = json.loads(body)
+ self.validate_response(schema.image_metadata, resp, body)
return resp, body['metadata']
def get_image_metadata_item(self, image_id, key):
"""Returns the value for a specific image metadata key."""
resp, body = self.get("images/%s/metadata/%s" % (str(image_id), key))
body = json.loads(body)
+ self.validate_response(schema.image_meta_item, resp, body)
return resp, body['meta']
def set_image_metadata_item(self, image_id, key, meta):
@@ -116,12 +127,14 @@
resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key),
post_body)
body = json.loads(body)
+ self.validate_response(schema.image_meta_item, resp, body)
return resp, body['meta']
def delete_image_metadata_item(self, image_id, key):
"""Deletes a single image metadata key/value pair."""
resp, body = self.delete("images/%s/metadata/%s" %
(str(image_id), key))
+ self.validate_response(schema.delete, resp, body)
return resp, body
def is_resource_deleted(self, id):
diff --git a/tempest/services/compute/json/instance_usage_audit_log_client.py b/tempest/services/compute/json/instance_usage_audit_log_client.py
index 1f6e988..4700ca7 100644
--- a/tempest/services/compute/json/instance_usage_audit_log_client.py
+++ b/tempest/services/compute/json/instance_usage_audit_log_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute.v2 import instance_usage_audit_logs \
+ as schema
from tempest.common import rest_client
from tempest import config
@@ -32,10 +34,13 @@
url = 'os-instance_usage_audit_log'
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_instance_usage_audit_log,
+ resp, body)
return resp, body["instance_usage_audit_logs"]
def get_instance_usage_audit_log(self, time_before):
url = 'os-instance_usage_audit_log/%s' % time_before
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_instance_usage_audit_log, resp, body)
return resp, body["instance_usage_audit_log"]
diff --git a/tempest/services/compute/json/interfaces_client.py b/tempest/services/compute/json/interfaces_client.py
index 9928b94..8d51123 100644
--- a/tempest/services/compute/json/interfaces_client.py
+++ b/tempest/services/compute/json/interfaces_client.py
@@ -16,6 +16,8 @@
import json
import time
+from tempest.api_schema.compute import interfaces as common_schema
+from tempest.api_schema.compute.v2 import interfaces as schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -32,6 +34,7 @@
def list_interfaces(self, server):
resp, body = self.get('servers/%s/os-interface' % server)
body = json.loads(body)
+ self.validate_response(schema.list_interfaces, resp, body)
return resp, body['interfaceAttachments']
def create_interface(self, server, port_id=None, network_id=None,
@@ -58,6 +61,7 @@
def delete_interface(self, server, port_id):
resp, body = self.delete('servers/%s/os-interface/%s' % (server,
port_id))
+ self.validate_response(common_schema.delete_interface, resp, body)
return resp, body
def wait_for_interface_status(self, server, port_id, status):
diff --git a/tempest/services/compute/json/keypairs_client.py b/tempest/services/compute/json/keypairs_client.py
index 28f3c31..be93789 100644
--- a/tempest/services/compute/json/keypairs_client.py
+++ b/tempest/services/compute/json/keypairs_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute import keypairs as common_schema
+from tempest.api_schema.compute.v2 import keypairs as schema
from tempest.common import rest_client
from tempest import config
@@ -35,11 +37,13 @@
# servers, etc. A bug?
# For now we shall adhere to the spec, but the spec for keypairs
# is yet to be found
+ self.validate_response(common_schema.list_keypairs, resp, body)
return resp, body['keypairs']
def get_keypair(self, key_name):
resp, body = self.get("os-keypairs/%s" % str(key_name))
body = json.loads(body)
+ self.validate_response(schema.get_keypair, resp, body)
return resp, body['keypair']
def create_keypair(self, name, pub_key=None):
@@ -49,7 +53,10 @@
post_body = json.dumps(post_body)
resp, body = self.post("os-keypairs", body=post_body)
body = json.loads(body)
+ self.validate_response(schema.create_keypair, resp, body)
return resp, body['keypair']
def delete_keypair(self, key_name):
- return self.delete("os-keypairs/%s" % str(key_name))
+ resp, body = self.delete("os-keypairs/%s" % str(key_name))
+ self.validate_response(schema.delete_keypair, resp, body)
+ return resp, body
diff --git a/tempest/services/compute/json/limits_client.py b/tempest/services/compute/json/limits_client.py
index 1493718..e503bef 100644
--- a/tempest/services/compute/json/limits_client.py
+++ b/tempest/services/compute/json/limits_client.py
@@ -15,6 +15,7 @@
import json
+from tempest.api_schema.compute.v2 import limits as schema
from tempest.common import rest_client
from tempest import config
@@ -30,11 +31,13 @@
def get_absolute_limits(self):
resp, body = self.get("limits")
body = json.loads(body)
+ self.validate_response(schema.get_limit, resp, body)
return resp, body['limits']['absolute']
def get_specific_absolute_limit(self, absolute_limit):
resp, body = self.get("limits")
body = json.loads(body)
+ self.validate_response(schema.get_limit, resp, body)
if absolute_limit not in body['limits']['absolute']:
return None
else:
diff --git a/tempest/services/compute/json/migrations_client.py b/tempest/services/compute/json/migrations_client.py
new file mode 100644
index 0000000..beef5d2
--- /dev/null
+++ b/tempest/services/compute/json/migrations_client.py
@@ -0,0 +1,41 @@
+# Copyright 2014 NEC Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import urllib
+
+from tempest.api_schema.compute import migrations as schema
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class MigrationsClientJSON(rest_client.RestClient):
+
+ def __init__(self, auth_provider):
+ super(MigrationsClientJSON, self).__init__(auth_provider)
+ self.service = CONF.compute.catalog_type
+
+ def list_migrations(self, params=None):
+ """Lists all migrations."""
+
+ url = 'os-migrations'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.validate_response(schema.list_migrations, resp, body)
+ return resp, body['migrations']
diff --git a/tempest/services/compute/json/quotas_client.py b/tempest/services/compute/json/quotas_client.py
index 459ab6d..7e828d8 100644
--- a/tempest/services/compute/json/quotas_client.py
+++ b/tempest/services/compute/json/quotas_client.py
@@ -15,6 +15,7 @@
import json
+from tempest.api_schema.compute.v2 import quotas as schema
from tempest.common import rest_client
from tempest import config
@@ -27,12 +28,15 @@
super(QuotasClientJSON, self).__init__(auth_provider)
self.service = CONF.compute.catalog_type
- def get_quota_set(self, tenant_id):
+ def get_quota_set(self, tenant_id, user_id=None):
"""List the quota set for a tenant."""
url = 'os-quota-sets/%s' % str(tenant_id)
+ if user_id:
+ url += '?user_id=%s' % str(user_id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.quota_set, resp, body)
return resp, body['quota_set']
def get_default_quota_set(self, tenant_id):
@@ -41,10 +45,11 @@
url = 'os-quota-sets/%s/defaults' % str(tenant_id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.quota_set, resp, body)
return resp, body['quota_set']
- def update_quota_set(self, tenant_id, force=None,
- injected_file_content_bytes=None,
+ def update_quota_set(self, tenant_id, user_id=None,
+ force=None, injected_file_content_bytes=None,
metadata_items=None, ram=None, floating_ips=None,
fixed_ips=None, key_pairs=None, instances=None,
security_group_rules=None, injected_files=None,
@@ -96,7 +101,20 @@
post_body['security_groups'] = security_groups
post_body = json.dumps({'quota_set': post_body})
- resp, body = self.put('os-quota-sets/%s' % str(tenant_id), post_body)
+
+ if user_id:
+ resp, body = self.put('os-quota-sets/%s?user_id=%s' %
+ (str(tenant_id), str(user_id)), post_body)
+ else:
+ resp, body = self.put('os-quota-sets/%s' % str(tenant_id),
+ post_body)
body = json.loads(body)
+ self.validate_response(schema.quota_set_update, resp, body)
return resp, body['quota_set']
+
+ def delete_quota_set(self, tenant_id):
+ """Delete the tenant's quota set."""
+ resp, body = self.delete('os-quota-sets/%s' % str(tenant_id))
+ self.validate_response(schema.delete_quota, resp, body)
+ return resp, body
diff --git a/tempest/services/compute/json/security_groups_client.py b/tempest/services/compute/json/security_groups_client.py
index 899d4ef..c19baf3 100644
--- a/tempest/services/compute/json/security_groups_client.py
+++ b/tempest/services/compute/json/security_groups_client.py
@@ -16,6 +16,7 @@
import json
import urllib
+from tempest.api_schema.compute.v2 import security_groups as schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -38,6 +39,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_security_groups, resp, body)
return resp, body['security_groups']
def get_security_group(self, security_group_id):
@@ -45,6 +47,7 @@
url = "os-security-groups/%s" % str(security_group_id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_security_group, resp, body)
return resp, body['security_group']
def create_security_group(self, name, description):
@@ -60,6 +63,7 @@
post_body = json.dumps({'security_group': post_body})
resp, body = self.post('os-security-groups', post_body)
body = json.loads(body)
+ self.validate_response(schema.get_security_group, resp, body)
return resp, body['security_group']
def update_security_group(self, security_group_id, name=None,
@@ -79,6 +83,7 @@
resp, body = self.put('os-security-groups/%s' % str(security_group_id),
post_body)
body = json.loads(body)
+ self.validate_response(schema.update_security_group, resp, body)
return resp, body['security_group']
def delete_security_group(self, security_group_id):
@@ -109,16 +114,21 @@
url = 'os-security-group-rules'
resp, body = self.post(url, post_body)
body = json.loads(body)
+ self.validate_response(schema.create_security_group_rule, resp, body)
return resp, body['security_group_rule']
def delete_security_group_rule(self, group_rule_id):
"""Deletes the provided Security Group rule."""
- return self.delete('os-security-group-rules/%s' % str(group_rule_id))
+ resp, body = self.delete('os-security-group-rules/%s' %
+ str(group_rule_id))
+ self.validate_response(schema.delete_security_group_rule, resp, body)
+ return resp, body
def list_security_group_rules(self, security_group_id):
"""List all rules for a security group."""
resp, body = self.get('os-security-groups')
body = json.loads(body)
+ self.validate_response(schema.list_security_groups, resp, body)
for sg in body['security_groups']:
if sg['id'] == security_group_id:
return resp, sg['rules']
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 70d075a..92cfc8e 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -18,6 +18,8 @@
import time
import urllib
+from tempest.api_schema.compute import servers as common_schema
+from tempest.api_schema.compute.v2 import servers as schema
from tempest.common import rest_client
from tempest.common import waiters
from tempest import config
@@ -77,7 +79,12 @@
value = kwargs.get(key)
if value is not None:
post_body[post_param] = value
- post_body = json.dumps({'server': post_body})
+ post_body = {'server': post_body}
+
+ if 'sched_hints' in kwargs:
+ hints = {'os:scheduler_hints': kwargs.get('sched_hints')}
+ post_body = dict(post_body.items() + hints.items())
+ post_body = json.dumps(post_body)
resp, body = self.post('servers', post_body)
body = json.loads(body)
@@ -85,6 +92,7 @@
# with return reservation id set True
if 'reservation_id' in body:
return resp, body
+ self.validate_response(schema.create_server, resp, body)
return resp, body['server']
def update_server(self, server_id, name=None, meta=None, accessIPv4=None,
@@ -118,6 +126,7 @@
post_body = json.dumps({'server': post_body})
resp, body = self.put("servers/%s" % str(server_id), post_body)
body = json.loads(body)
+ self.validate_response(schema.update_server, resp, body)
return resp, body['server']
def get_server(self, server_id):
@@ -128,7 +137,9 @@
def delete_server(self, server_id):
"""Deletes the given server."""
- return self.delete("servers/%s" % str(server_id))
+ resp, body = self.delete("servers/%s" % str(server_id))
+ self.validate_response(common_schema.delete_server, resp, body)
+ return resp, body
def list_servers(self, params=None):
"""Lists all servers for a user."""
@@ -139,6 +150,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(common_schema.list_servers, resp, body)
return resp, body
def list_servers_with_detail(self, params=None):
@@ -188,14 +200,24 @@
resp, body = self.get("servers/%s/ips/%s" %
(str(server_id), network_id))
body = json.loads(body)
+ self.validate_response(schema.list_addresses_by_network, resp, body)
return resp, body
- def action(self, server_id, action_name, response_key, **kwargs):
+ def action(self, server_id, action_name, response_key,
+ schema=None, **kwargs):
post_body = json.dumps({action_name: kwargs})
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
if response_key is not None:
- body = json.loads(body)[response_key]
+ body = json.loads(body)
+ # Check for Schema as 'None' because if we donot have any server
+ # action schema implemented yet then they can pass 'None' to skip
+ # the validation.Once all server action has their schema
+ # implemented then, this check can be removed if every actions are
+ # supposed to validate their response.
+ if schema is not None:
+ self.validate_response(schema, resp, body)
+ body = body[response_key]
return resp, body
def create_backup(self, server_id, backup_type, rotation, name):
@@ -214,6 +236,7 @@
resp, body = self.get("servers/%s/os-server-password" %
str(server_id))
body = json.loads(body)
+ self.validate_response(common_schema.get_password, resp, body)
return resp, body
def delete_password(self, server_id):
@@ -253,13 +276,10 @@
"""Reverts a server back to its original flavor."""
return self.action(server_id, 'revertResize', None, **kwargs)
- def create_image(self, server_id, name):
- """Creates an image of the given server."""
- return self.action(server_id, 'createImage', None, name=name)
-
def list_server_metadata(self, server_id):
resp, body = self.get("servers/%s/metadata" % str(server_id))
body = json.loads(body)
+ self.validate_response(common_schema.list_server_metadata, resp, body)
return resp, body['metadata']
def set_server_metadata(self, server_id, meta, no_metadata_field=False):
@@ -270,6 +290,7 @@
resp, body = self.put('servers/%s/metadata' % str(server_id),
post_body)
body = json.loads(body)
+ self.validate_response(common_schema.set_server_metadata, resp, body)
return resp, body['metadata']
def update_server_metadata(self, server_id, meta):
@@ -282,6 +303,8 @@
def get_server_metadata_item(self, server_id, key):
resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
body = json.loads(body)
+ self.validate_response(schema.set_get_server_metadata_item,
+ resp, body)
return resp, body['meta']
def set_server_metadata_item(self, server_id, key, meta):
@@ -289,11 +312,15 @@
resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
post_body)
body = json.loads(body)
+ self.validate_response(schema.set_get_server_metadata_item,
+ resp, body)
return resp, body['meta']
def delete_server_metadata_item(self, server_id, key):
resp, body = self.delete("servers/%s/metadata/%s" %
(str(server_id), key))
+ self.validate_response(common_schema.delete_server_metadata_item,
+ resp, body)
return resp, body
def stop(self, server_id, **kwargs):
@@ -312,12 +339,15 @@
})
resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
post_body)
+ body = json.loads(body)
+ self.validate_response(schema.attach_volume, resp, body)
return resp, body
def detach_volume(self, server_id, volume_id):
"""Detaches a volume from a server instance."""
resp, body = self.delete('servers/%s/os-volume_attachments/%s' %
(server_id, volume_id))
+ self.validate_response(schema.detach_volume, resp, body)
return resp, body
def add_security_group(self, server_id, name):
@@ -396,7 +426,9 @@
"""
resp, body = self.get('/'.join(['servers', server_id,
'os-virtual-interfaces']))
- return resp, json.loads(body)
+ body = json.loads(body)
+ self.validate_response(schema.list_virtual_interfaces, resp, body)
+ return resp, body
def rescue_server(self, server_id, **kwargs):
"""Rescue the provided server."""
@@ -440,3 +472,9 @@
def inject_network_info(self, server_id, **kwargs):
"""Inject the Network Info into server"""
return self.action(server_id, 'injectNetworkInfo', None, **kwargs)
+
+ def get_vnc_console(self, server_id, console_type):
+ """Get URL of VNC console."""
+ return self.action(server_id, "os-getVNCConsole",
+ "console", common_schema.get_vnc_console,
+ type=console_type)
diff --git a/tempest/services/compute/json/services_client.py b/tempest/services/compute/json/services_client.py
index 1ab25ec..d58ca6f 100644
--- a/tempest/services/compute/json/services_client.py
+++ b/tempest/services/compute/json/services_client.py
@@ -17,6 +17,7 @@
import json
import urllib
+from tempest.api_schema.compute import services as schema
from tempest.common import rest_client
from tempest import config
@@ -36,6 +37,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_services, resp, body)
return resp, body['services']
def enable_service(self, host_name, binary):
@@ -47,6 +49,7 @@
post_body = json.dumps({'binary': binary, 'host': host_name})
resp, body = self.put('os-services/enable', post_body)
body = json.loads(body)
+ self.validate_response(schema.enable_service, resp, body)
return resp, body['service']
def disable_service(self, host_name, binary):
diff --git a/tempest/services/compute/json/tenant_usages_client.py b/tempest/services/compute/json/tenant_usages_client.py
index f3a67dd..f8adae7 100644
--- a/tempest/services/compute/json/tenant_usages_client.py
+++ b/tempest/services/compute/json/tenant_usages_client.py
@@ -16,6 +16,7 @@
import json
import urllib
+from tempest.api_schema.compute.v2 import tenant_usages as schema
from tempest.common import rest_client
from tempest import config
@@ -35,6 +36,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_tenant, resp, body)
return resp, body['tenant_usages'][0]
def get_tenant_usage(self, tenant_id, params=None):
@@ -44,4 +46,5 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_tenant, resp, body)
return resp, body['tenant_usage']
diff --git a/tempest/services/compute/json/volumes_extensions_client.py b/tempest/services/compute/json/volumes_extensions_client.py
index 5ef11ed..d1014af 100644
--- a/tempest/services/compute/json/volumes_extensions_client.py
+++ b/tempest/services/compute/json/volumes_extensions_client.py
@@ -17,6 +17,7 @@
import time
import urllib
+from tempest.api_schema.compute.v2 import volumes as schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -41,6 +42,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_volumes, resp, body)
return resp, body['volumes']
def list_volumes_with_detail(self, params=None):
@@ -51,6 +53,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_volumes, resp, body)
return resp, body['volumes']
def get_volume(self, volume_id):
@@ -58,6 +61,7 @@
url = "os-volumes/%s" % str(volume_id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.create_get_volume, resp, body)
return resp, body['volume']
def create_volume(self, size, **kwargs):
@@ -77,11 +81,14 @@
post_body = json.dumps({'volume': post_body})
resp, body = self.post('os-volumes', post_body)
body = json.loads(body)
+ self.validate_response(schema.create_get_volume, resp, body)
return resp, body['volume']
def delete_volume(self, volume_id):
"""Deletes the Specified Volume."""
- return self.delete("os-volumes/%s" % str(volume_id))
+ resp, body = self.delete("os-volumes/%s" % str(volume_id))
+ self.validate_response(schema.delete_volume, resp, body)
+ return resp, body
def wait_for_volume_status(self, volume_id, status):
"""Waits for a Volume to reach a given status."""
diff --git a/tempest/services/compute/v3/json/agents_client.py b/tempest/services/compute/v3/json/agents_client.py
index 6893af2..48be54c 100644
--- a/tempest/services/compute/v3/json/agents_client.py
+++ b/tempest/services/compute/v3/json/agents_client.py
@@ -15,6 +15,8 @@
import json
import urllib
+from tempest.api_schema.compute import agents as common_schema
+from tempest.api_schema.compute.v3 import agents as schema
from tempest.common import rest_client
from tempest import config
@@ -33,7 +35,9 @@
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
- return resp, self._parse_resp(body)
+ body = json.loads(body)
+ self.validate_response(common_schema.list_agents, resp, body)
+ return resp, body['agents']
def create_agent(self, **kwargs):
"""Create an agent build."""
@@ -43,7 +47,9 @@
def delete_agent(self, agent_id):
"""Delete an existing agent build."""
- return self.delete('os-agents/%s' % str(agent_id))
+ resp, body = self.delete("os-agents/%s" % str(agent_id))
+ self.validate_response(schema.delete_agent, resp, body)
+ return resp, body
def update_agent(self, agent_id, **kwargs):
"""Update an agent build."""
diff --git a/tempest/services/compute/v3/json/aggregates_client.py b/tempest/services/compute/v3/json/aggregates_client.py
index fddf5df..d9b7930 100644
--- a/tempest/services/compute/v3/json/aggregates_client.py
+++ b/tempest/services/compute/v3/json/aggregates_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute import aggregates as schema
+from tempest.api_schema.compute.v3 import aggregates as v3_schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -32,12 +34,14 @@
"""Get aggregate list."""
resp, body = self.get("os-aggregates")
body = json.loads(body)
+ self.validate_response(schema.list_aggregates, resp, body)
return resp, body['aggregates']
def get_aggregate(self, aggregate_id):
"""Get details of the given aggregate."""
resp, body = self.get("os-aggregates/%s" % str(aggregate_id))
body = json.loads(body)
+ self.validate_response(schema.get_aggregate, resp, body)
return resp, body['aggregate']
def create_aggregate(self, **kwargs):
@@ -46,6 +50,7 @@
resp, body = self.post('os-aggregates', post_body)
body = json.loads(body)
+ self.validate_response(v3_schema.create_aggregate, resp, body)
return resp, body['aggregate']
def update_aggregate(self, aggregate_id, name, availability_zone=None):
@@ -58,11 +63,14 @@
resp, body = self.put('os-aggregates/%s' % str(aggregate_id), put_body)
body = json.loads(body)
+ self.validate_response(schema.update_aggregate, resp, body)
return resp, body['aggregate']
def delete_aggregate(self, aggregate_id):
"""Deletes the given aggregate."""
- return self.delete("os-aggregates/%s" % str(aggregate_id))
+ resp, body = self.delete("os-aggregates/%s" % str(aggregate_id))
+ self.validate_response(v3_schema.delete_aggregate, resp, body)
+ return resp, body
def is_resource_deleted(self, id):
try:
@@ -80,6 +88,7 @@
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
+ self.validate_response(v3_schema.aggregate_add_remove_host, resp, body)
return resp, body['aggregate']
def remove_host(self, aggregate_id, host):
@@ -91,6 +100,7 @@
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
+ self.validate_response(v3_schema.aggregate_add_remove_host, resp, body)
return resp, body['aggregate']
def set_metadata(self, aggregate_id, meta):
@@ -102,4 +112,5 @@
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
+ self.validate_response(schema.aggregate_set_metadata, resp, body)
return resp, body['aggregate']
diff --git a/tempest/services/compute/v3/json/availability_zone_client.py b/tempest/services/compute/v3/json/availability_zone_client.py
index bad2de9..bf74e68 100644
--- a/tempest/services/compute/v3/json/availability_zone_client.py
+++ b/tempest/services/compute/v3/json/availability_zone_client.py
@@ -15,6 +15,7 @@
import json
+from tempest.api_schema.compute.v3 import availability_zone as schema
from tempest.common import rest_client
from tempest import config
@@ -31,9 +32,12 @@
def get_availability_zone_list(self):
resp, body = self.get('os-availability-zone')
body = json.loads(body)
+ self.validate_response(schema.get_availability_zone_list, resp, body)
return resp, body['availability_zone_info']
def get_availability_zone_list_detail(self):
resp, body = self.get('os-availability-zone/detail')
body = json.loads(body)
+ self.validate_response(schema.get_availability_zone_list_detail, resp,
+ body)
return resp, body['availability_zone_info']
diff --git a/tempest/services/compute/v3/json/certificates_client.py b/tempest/services/compute/v3/json/certificates_client.py
index f8beeb9..be9b3c3 100644
--- a/tempest/services/compute/v3/json/certificates_client.py
+++ b/tempest/services/compute/v3/json/certificates_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute import certificates as schema
+from tempest.api_schema.compute.v3 import certificates as v3schema
from tempest.common import rest_client
from tempest import config
@@ -31,6 +33,7 @@
url = "os-certificates/%s" % (id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_certificate, resp, body)
return resp, body['certificate']
def create_certificate(self):
@@ -38,4 +41,5 @@
url = "os-certificates"
resp, body = self.post(url, None)
body = json.loads(body)
+ self.validate_response(v3schema.create_certificate, resp, body)
return resp, body['certificate']
diff --git a/tempest/services/compute/v3/json/extensions_client.py b/tempest/services/compute/v3/json/extensions_client.py
index 46f17a4..13292db 100644
--- a/tempest/services/compute/v3/json/extensions_client.py
+++ b/tempest/services/compute/v3/json/extensions_client.py
@@ -15,6 +15,7 @@
import json
+from tempest.api_schema.compute.v3 import extensions as schema
from tempest.common import rest_client
from tempest import config
@@ -31,6 +32,7 @@
url = 'extensions'
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_extensions, resp, body)
return resp, body['extensions']
def is_enabled(self, extension):
diff --git a/tempest/services/compute/v3/json/flavors_client.py b/tempest/services/compute/v3/json/flavors_client.py
index 656bd84..5afab5a 100644
--- a/tempest/services/compute/v3/json/flavors_client.py
+++ b/tempest/services/compute/v3/json/flavors_client.py
@@ -16,6 +16,11 @@
import json
import urllib
+from tempest.api_schema.compute import flavors as common_schema
+from tempest.api_schema.compute import flavors_access as schema_access
+from tempest.api_schema.compute import flavors_extra_specs \
+ as schema_extra_specs
+from tempest.api_schema.compute.v3 import flavors as v3schema
from tempest.common import rest_client
from tempest import config
@@ -35,6 +40,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(common_schema.list_flavors, resp, body)
return resp, body['flavors']
def list_flavors_with_detail(self, params=None):
@@ -44,11 +50,13 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(v3schema.list_flavors_details, resp, body)
return resp, body['flavors']
def get_flavor_details(self, flavor_id):
resp, body = self.get("flavors/%s" % str(flavor_id))
body = json.loads(body)
+ self.validate_response(v3schema.get_flavor_details, resp, body)
return resp, body['flavor']
def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
@@ -72,11 +80,14 @@
resp, body = self.post('flavors', post_body)
body = json.loads(body)
+ self.validate_response(v3schema.create_flavor_details, resp, body)
return resp, body['flavor']
def delete_flavor(self, flavor_id):
"""Deletes the given flavor."""
- return self.delete("flavors/%s" % str(flavor_id))
+ resp, body = self.delete("flavors/{0}".format(flavor_id))
+ self.validate_response(v3schema.delete_flavor, resp, body)
+ return resp, body
def is_resource_deleted(self, id):
# Did not use get_flavor_details(id) for verification as it gives
@@ -94,12 +105,15 @@
resp, body = self.post('flavors/%s/flavor-extra-specs' % flavor_id,
post_body)
body = json.loads(body)
+ self.validate_response(v3schema.set_flavor_extra_specs, resp, body)
return resp, body['extra_specs']
def get_flavor_extra_spec(self, flavor_id):
"""Gets extra Specs details of the mentioned flavor."""
resp, body = self.get('flavors/%s/flavor-extra-specs' % flavor_id)
body = json.loads(body)
+ self.validate_response(schema_extra_specs.flavor_extra_specs,
+ resp, body)
return resp, body['extra_specs']
def get_flavor_extra_spec_with_key(self, flavor_id, key):
@@ -107,6 +121,8 @@
resp, body = self.get('flavors/%s/flavor-extra-specs/%s' %
(str(flavor_id), key))
body = json.loads(body)
+ self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+ resp, body)
return resp, body
def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
@@ -114,17 +130,23 @@
resp, body = self.put('flavors/%s/flavor-extra-specs/%s' %
(flavor_id, key), json.dumps(kwargs))
body = json.loads(body)
+ self.validate_response(schema_extra_specs.flavor_extra_specs_key,
+ resp, body)
return resp, body
def unset_flavor_extra_spec(self, flavor_id, key):
"""Unsets extra Specs from the mentioned flavor."""
- return self.delete('flavors/%s/flavor-extra-specs/%s' %
- (str(flavor_id), key))
+ resp, body = self.delete('flavors/%s/flavor-extra-specs/%s' %
+ (str(flavor_id), key))
+ self.validate_response(v3schema.unset_flavor_extra_specs, resp, body)
+ return resp, body
def list_flavor_access(self, flavor_id):
"""Gets flavor access information given the flavor id."""
resp, body = self.get('flavors/%s/flavor-access' % flavor_id)
body = json.loads(body)
+ self.validate_response(schema_access.add_remove_list_flavor_access,
+ resp, body)
return resp, body['flavor_access']
def add_flavor_access(self, flavor_id, tenant_id):
@@ -137,6 +159,8 @@
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
+ self.validate_response(schema_access.add_remove_list_flavor_access,
+ resp, body)
return resp, body['flavor_access']
def remove_flavor_access(self, flavor_id, tenant_id):
@@ -149,4 +173,6 @@
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
+ self.validate_response(schema_access.add_remove_list_flavor_access,
+ resp, body)
return resp, body['flavor_access']
diff --git a/tempest/services/compute/v3/json/hosts_client.py b/tempest/services/compute/v3/json/hosts_client.py
index e27c7c6..d2eb43d 100644
--- a/tempest/services/compute/v3/json/hosts_client.py
+++ b/tempest/services/compute/v3/json/hosts_client.py
@@ -15,6 +15,8 @@
import json
import urllib
+from tempest.api_schema.compute import hosts as schema
+from tempest.api_schema.compute.v3 import hosts as v3_schema
from tempest.common import rest_client
from tempest import config
@@ -36,6 +38,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_hosts, resp, body)
return resp, body['hosts']
def show_host_detail(self, hostname):
@@ -43,6 +46,7 @@
resp, body = self.get("os-hosts/%s" % str(hostname))
body = json.loads(body)
+ self.validate_response(schema.show_host_detail, resp, body)
return resp, body['host']
def update_host(self, hostname, **kwargs):
@@ -57,6 +61,7 @@
resp, body = self.put("os-hosts/%s" % str(hostname), request_body)
body = json.loads(body)
+ self.validate_response(v3_schema.update_host, resp, body)
return resp, body
def startup_host(self, hostname):
@@ -64,6 +69,7 @@
resp, body = self.get("os-hosts/%s/startup" % str(hostname))
body = json.loads(body)
+ self.validate_response(v3_schema.startup_host, resp, body)
return resp, body['host']
def shutdown_host(self, hostname):
@@ -71,6 +77,7 @@
resp, body = self.get("os-hosts/%s/shutdown" % str(hostname))
body = json.loads(body)
+ self.validate_response(v3_schema.shutdown_host, resp, body)
return resp, body['host']
def reboot_host(self, hostname):
@@ -78,4 +85,5 @@
resp, body = self.get("os-hosts/%s/reboot" % str(hostname))
body = json.loads(body)
+ self.validate_response(v3_schema.reboot_host, resp, body)
return resp, body['host']
diff --git a/tempest/services/compute/v3/json/hypervisor_client.py b/tempest/services/compute/v3/json/hypervisor_client.py
index 30e391f..51468c9 100644
--- a/tempest/services/compute/v3/json/hypervisor_client.py
+++ b/tempest/services/compute/v3/json/hypervisor_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute import hypervisors as common_schema
+from tempest.api_schema.compute.v3 import hypervisors as v3schema
from tempest.common import rest_client
from tempest import config
@@ -31,40 +33,49 @@
"""List hypervisors information."""
resp, body = self.get('os-hypervisors')
body = json.loads(body)
+ self.validate_response(common_schema.common_hypervisors_detail,
+ resp, body)
return resp, body['hypervisors']
def get_hypervisor_list_details(self):
"""Show detailed hypervisors information."""
resp, body = self.get('os-hypervisors/detail')
body = json.loads(body)
+ self.validate_response(v3schema.list_hypervisors_detail, resp, body)
return resp, body['hypervisors']
def get_hypervisor_show_details(self, hyper_id):
"""Display the details of the specified hypervisor."""
resp, body = self.get('os-hypervisors/%s' % hyper_id)
body = json.loads(body)
+ self.validate_response(v3schema.show_hypervisor, resp, body)
return resp, body['hypervisor']
def get_hypervisor_servers(self, hyper_name):
"""List instances belonging to the specified hypervisor."""
resp, body = self.get('os-hypervisors/%s/servers' % hyper_name)
body = json.loads(body)
+ self.validate_response(v3schema.hypervisors_servers, resp, body)
return resp, body['hypervisor']
def get_hypervisor_stats(self):
"""Get hypervisor statistics over all compute nodes."""
resp, body = self.get('os-hypervisors/statistics')
body = json.loads(body)
+ self.validate_response(common_schema.hypervisor_statistics, resp, body)
return resp, body['hypervisor_statistics']
def get_hypervisor_uptime(self, hyper_id):
"""Display the uptime of the specified hypervisor."""
resp, body = self.get('os-hypervisors/%s/uptime' % hyper_id)
body = json.loads(body)
+ self.validate_response(common_schema.hypervisor_uptime, resp, body)
return resp, body['hypervisor']
def search_hypervisor(self, hyper_name):
"""Search specified hypervisor."""
resp, body = self.get('os-hypervisors/search?query=%s' % hyper_name)
body = json.loads(body)
+ self.validate_response(common_schema.common_hypervisors_detail,
+ resp, body)
return resp, body['hypervisors']
diff --git a/tempest/services/compute/v3/json/interfaces_client.py b/tempest/services/compute/v3/json/interfaces_client.py
index b45426c..77b3179 100644
--- a/tempest/services/compute/v3/json/interfaces_client.py
+++ b/tempest/services/compute/v3/json/interfaces_client.py
@@ -16,6 +16,8 @@
import json
import time
+from tempest.api_schema.compute import interfaces as common_schema
+from tempest.api_schema.compute.v3 import interfaces as schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -32,6 +34,7 @@
def list_interfaces(self, server):
resp, body = self.get('servers/%s/os-attach-interfaces' % server)
body = json.loads(body)
+ self.validate_response(schema.list_interfaces, resp, body)
return resp, body['interface_attachments']
def create_interface(self, server, port_id=None, network_id=None,
@@ -59,6 +62,7 @@
resp, body =\
self.delete('servers/%s/os-attach-interfaces/%s' % (server,
port_id))
+ self.validate_response(common_schema.delete_interface, resp, body)
return resp, body
def wait_for_interface_status(self, server, port_id, status):
diff --git a/tempest/services/compute/v3/json/keypairs_client.py b/tempest/services/compute/v3/json/keypairs_client.py
index 9ca4885..f090d7d 100644
--- a/tempest/services/compute/v3/json/keypairs_client.py
+++ b/tempest/services/compute/v3/json/keypairs_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute import keypairs as common_schema
+from tempest.api_schema.compute.v3 import keypairs as schema
from tempest.common import rest_client
from tempest import config
@@ -35,11 +37,13 @@
# servers, etc. A bug?
# For now we shall adhere to the spec, but the spec for keypairs
# is yet to be found
+ self.validate_response(common_schema.list_keypairs, resp, body)
return resp, body['keypairs']
def get_keypair(self, key_name):
resp, body = self.get("keypairs/%s" % str(key_name))
body = json.loads(body)
+ self.validate_response(schema.get_keypair, resp, body)
return resp, body['keypair']
def create_keypair(self, name, pub_key=None):
@@ -49,7 +53,10 @@
post_body = json.dumps(post_body)
resp, body = self.post("keypairs", body=post_body)
body = json.loads(body)
+ self.validate_response(schema.create_keypair, resp, body)
return resp, body['keypair']
def delete_keypair(self, key_name):
- return self.delete("keypairs/%s" % str(key_name))
+ resp, body = self.delete("keypairs/%s" % str(key_name))
+ self.validate_response(schema.delete_keypair, resp, body)
+ return resp, body
diff --git a/tempest/services/compute/v3/json/migration_client.py b/tempest/services/compute/v3/json/migration_client.py
new file mode 100644
index 0000000..c821567
--- /dev/null
+++ b/tempest/services/compute/v3/json/migration_client.py
@@ -0,0 +1,41 @@
+# Copyright 2014 NEC Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import urllib
+
+from tempest.api_schema.compute import migrations as schema
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class MigrationsV3ClientJSON(rest_client.RestClient):
+
+ def __init__(self, auth_provider):
+ super(MigrationsV3ClientJSON, self).__init__(auth_provider)
+ self.service = CONF.compute.catalog_v3_type
+
+ def list_migrations(self, params=None):
+ """Lists all migrations."""
+
+ url = 'os-migrations'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.validate_response(schema.list_migrations, resp, body)
+ return resp, body['migrations']
diff --git a/tempest/services/compute/v3/json/quotas_client.py b/tempest/services/compute/v3/json/quotas_client.py
index 32e31a3..37a8906 100644
--- a/tempest/services/compute/v3/json/quotas_client.py
+++ b/tempest/services/compute/v3/json/quotas_client.py
@@ -15,6 +15,7 @@
import json
+from tempest.api_schema.compute.v3 import quotas as schema
from tempest.common import rest_client
from tempest import config
@@ -27,12 +28,15 @@
super(QuotasV3ClientJSON, self).__init__(auth_provider)
self.service = CONF.compute.catalog_v3_type
- def get_quota_set(self, tenant_id):
+ def get_quota_set(self, tenant_id, user_id=None):
"""List the quota set for a tenant."""
url = 'os-quota-sets/%s' % str(tenant_id)
+ if user_id:
+ url += '?user_id=%s' % str(user_id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.quota_set, resp, body)
return resp, body['quota_set']
def get_quota_set_detail(self, tenant_id):
@@ -41,6 +45,7 @@
url = 'os-quota-sets/%s/detail' % str(tenant_id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.quota_set_detail, resp, body)
return resp, body['quota_set']
def get_default_quota_set(self, tenant_id):
@@ -49,9 +54,10 @@
url = 'os-quota-sets/%s/defaults' % str(tenant_id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.quota_set, resp, body)
return resp, body['quota_set']
- def update_quota_set(self, tenant_id, force=None,
+ def update_quota_set(self, tenant_id, user_id=None, force=None,
metadata_items=None, ram=None, floating_ips=None,
fixed_ips=None, key_pairs=None, instances=None,
security_group_rules=None, cores=None,
@@ -92,7 +98,20 @@
post_body['security_groups'] = security_groups
post_body = json.dumps({'quota_set': post_body})
- resp, body = self.put('os-quota-sets/%s' % str(tenant_id), post_body)
+
+ if user_id:
+ resp, body = self.put('os-quota-sets/%s?user_id=%s' %
+ (str(tenant_id), str(user_id)), post_body)
+ else:
+ resp, body = self.put('os-quota-sets/%s' % str(tenant_id),
+ post_body)
body = json.loads(body)
+ self.validate_response(schema.quota_set, resp, body)
return resp, body['quota_set']
+
+ def delete_quota_set(self, tenant_id):
+ """Delete the tenant's quota set."""
+ resp, body = self.delete('os-quota-sets/%s' % str(tenant_id))
+ self.validate_response(schema.delete_quota, resp, body)
+ return resp, body
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index 92eb09b..1990d39 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -19,6 +19,8 @@
import time
import urllib
+from tempest.api_schema.compute import servers as common_schema
+from tempest.api_schema.compute.v3 import servers as schema
from tempest.common import rest_client
from tempest.common import waiters
from tempest import config
@@ -91,6 +93,7 @@
# with return reservation id set True
if 'servers_reservation' in body:
return resp, body['servers_reservation']
+ self.validate_response(schema.create_server, resp, body)
return resp, body['server']
def update_server(self, server_id, name=None, meta=None, access_ip_v4=None,
@@ -123,6 +126,7 @@
post_body = json.dumps({'server': post_body})
resp, body = self.put("servers/%s" % str(server_id), post_body)
body = json.loads(body)
+ self.validate_response(schema.update_server, resp, body)
return resp, body['server']
def get_server(self, server_id):
@@ -133,7 +137,9 @@
def delete_server(self, server_id):
"""Deletes the given server."""
- return self.delete("servers/%s" % str(server_id))
+ resp, body = self.delete("servers/%s" % str(server_id))
+ self.validate_response(common_schema.delete_server, resp, body)
+ return resp, body
def list_servers(self, params=None):
"""Lists all servers for a user."""
@@ -144,6 +150,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(common_schema.list_servers, resp, body)
return resp, body
def list_servers_with_detail(self, params=None):
@@ -193,6 +200,7 @@
resp, body = self.get("servers/%s/ips/%s" %
(str(server_id), network_id))
body = json.loads(body)
+ self.validate_response(schema.list_addresses_by_network, resp, body)
return resp, body
def action(self, server_id, action_name, response_key, **kwargs):
@@ -219,6 +227,7 @@
resp, body = self.get("servers/%s/os-server-password" %
str(server_id))
body = json.loads(body)
+ self.validate_response(common_schema.get_password, resp, body)
return resp, body
def delete_password(self, server_id):
@@ -278,6 +287,7 @@
def list_server_metadata(self, server_id):
resp, body = self.get("servers/%s/metadata" % str(server_id))
body = json.loads(body)
+ self.validate_response(common_schema.list_server_metadata, resp, body)
return resp, body['metadata']
def set_server_metadata(self, server_id, meta, no_metadata_field=False):
@@ -288,6 +298,7 @@
resp, body = self.put('servers/%s/metadata' % str(server_id),
post_body)
body = json.loads(body)
+ self.validate_response(common_schema.set_server_metadata, resp, body)
return resp, body['metadata']
def update_server_metadata(self, server_id, meta):
@@ -300,6 +311,8 @@
def get_server_metadata_item(self, server_id, key):
resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
body = json.loads(body)
+ self.validate_response(schema.set_get_server_metadata_item,
+ resp, body)
return resp, body['metadata']
def set_server_metadata_item(self, server_id, key, meta):
@@ -307,11 +320,15 @@
resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
post_body)
body = json.loads(body)
+ self.validate_response(schema.set_get_server_metadata_item,
+ resp, body)
return resp, body['metadata']
def delete_server_metadata_item(self, server_id, key):
resp, body = self.delete("servers/%s/metadata/%s" %
(str(server_id), key))
+ self.validate_response(common_schema.delete_server_metadata_item,
+ resp, body)
return resp, body
def stop(self, server_id, **kwargs):
@@ -322,12 +339,17 @@
def attach_volume(self, server_id, volume_id, device='/dev/vdz'):
"""Attaches a volume to a server instance."""
- return self.action(server_id, 'attach', None, volume_id=volume_id,
- device=device)
+ resp, body = self.action(server_id, 'attach', None,
+ volume_id=volume_id, device=device)
+ self.validate_response(schema.attach_detach_volume, resp, body)
+ return resp, body
def detach_volume(self, server_id, volume_id):
"""Detaches a volume from a server instance."""
- return self.action(server_id, 'detach', None, volume_id=volume_id)
+ resp, body = self.action(server_id, 'detach', None,
+ volume_id=volume_id)
+ self.validate_response(schema.attach_detach_volume, resp, body)
+ return resp, body
def live_migrate_server(self, server_id, dest_host, use_block_migration):
"""This should be called with administrator privileges ."""
@@ -406,19 +428,19 @@
str(server_id))
return resp, json.loads(body)
- def list_instance_actions(self, server_id):
+ def list_server_actions(self, server_id):
"""List the provided server action."""
- resp, body = self.get("servers/%s/os-instance-actions" %
+ resp, body = self.get("servers/%s/os-server-actions" %
str(server_id))
body = json.loads(body)
- return resp, body['instance_actions']
+ return resp, body['server_actions']
- def get_instance_action(self, server_id, request_id):
+ def get_server_action(self, server_id, request_id):
"""Returns the action details of the provided server."""
- resp, body = self.get("servers/%s/os-instance-actions/%s" %
+ resp, body = self.get("servers/%s/os-server-actions/%s" %
(str(server_id), str(request_id)))
body = json.loads(body)
- return resp, body['instance_action']
+ return resp, body['server_action']
def force_delete_server(self, server_id, **kwargs):
"""Force delete a server."""
@@ -438,6 +460,7 @@
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
body = json.loads(body)
+ self.validate_response(common_schema.get_vnc_console, resp, body)
return resp, body['console']
def reset_network(self, server_id, **kwargs):
@@ -447,3 +470,13 @@
def inject_network_info(self, server_id, **kwargs):
"""Inject the Network Info into server"""
return self.action(server_id, 'inject_network_info', None, **kwargs)
+
+ def get_spice_console(self, server_id, console_type):
+ """Get URL of Spice console."""
+ return self.action(server_id, "get_spice_console"
+ "console", type=console_type)
+
+ def get_rdp_console(self, server_id, console_type):
+ """Get URL of RDP console."""
+ return self.action(server_id, "get_rdp_console"
+ "console", type=console_type)
diff --git a/tempest/services/compute/v3/json/services_client.py b/tempest/services/compute/v3/json/services_client.py
index b4e65a0..96ff580 100644
--- a/tempest/services/compute/v3/json/services_client.py
+++ b/tempest/services/compute/v3/json/services_client.py
@@ -17,6 +17,7 @@
import json
import urllib
+from tempest.api_schema.compute import services as schema
from tempest.common import rest_client
from tempest import config
@@ -36,6 +37,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_services, resp, body)
return resp, body['services']
def enable_service(self, host_name, binary):
@@ -52,6 +54,7 @@
})
resp, body = self.put('os-services/enable', post_body)
body = json.loads(body)
+ self.validate_response(schema.enable_service, resp, body)
return resp, body['service']
def disable_service(self, host_name, binary):
diff --git a/tempest/services/compute/v3/json/version_client.py b/tempest/services/compute/v3/json/version_client.py
index b560c58..568678d 100644
--- a/tempest/services/compute/v3/json/version_client.py
+++ b/tempest/services/compute/v3/json/version_client.py
@@ -15,6 +15,7 @@
import json
+from tempest.api_schema.compute import version as schema
from tempest.common import rest_client
from tempest import config
@@ -30,4 +31,5 @@
def get_version(self):
resp, body = self.get('')
body = json.loads(body)
+ self.validate_response(schema.version, resp, body)
return resp, body['version']
diff --git a/tempest/services/compute/xml/aggregates_client.py b/tempest/services/compute/xml/aggregates_client.py
index 5b250ee..9c2d4aa 100644
--- a/tempest/services/compute/xml/aggregates_client.py
+++ b/tempest/services/compute/xml/aggregates_client.py
@@ -16,12 +16,9 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
from tempest import exceptions
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
CONF = config.CONF
@@ -34,7 +31,7 @@
self.service = CONF.compute.catalog_type
def _format_aggregate(self, g):
- agg = xml_to_json(g)
+ agg = xml_utils.xml_to_json(g)
aggregate = {}
for key, value in agg.items():
if key == 'hosts':
@@ -64,21 +61,25 @@
def create_aggregate(self, name, availability_zone=None):
"""Creates a new aggregate."""
- post_body = Element("aggregate",
- name=name,
- availability_zone=availability_zone)
+ if availability_zone is not None:
+ post_body = xml_utils.Element("aggregate", name=name,
+ availability_zone=availability_zone)
+ else:
+ post_body = xml_utils.Element("aggregate", name=name)
resp, body = self.post('os-aggregates',
- str(Document(post_body)))
+ str(xml_utils.Document(post_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def update_aggregate(self, aggregate_id, name, availability_zone=None):
"""Update a aggregate."""
- put_body = Element("aggregate",
- name=name,
- availability_zone=availability_zone)
+ if availability_zone is not None:
+ put_body = xml_utils.Element("aggregate", name=name,
+ availability_zone=availability_zone)
+ else:
+ put_body = xml_utils.Element("aggregate", name=name)
resp, body = self.put('os-aggregates/%s' % str(aggregate_id),
- str(Document(put_body)))
+ str(xml_utils.Document(put_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
@@ -95,30 +96,30 @@
def add_host(self, aggregate_id, host):
"""Adds a host to the given aggregate."""
- post_body = Element("add_host", host=host)
+ post_body = xml_utils.Element("add_host", host=host)
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
- str(Document(post_body)))
+ str(xml_utils.Document(post_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def remove_host(self, aggregate_id, host):
"""Removes a host from the given aggregate."""
- post_body = Element("remove_host", host=host)
+ post_body = xml_utils.Element("remove_host", host=host)
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
- str(Document(post_body)))
+ str(xml_utils.Document(post_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def set_metadata(self, aggregate_id, meta):
"""Replaces the aggregate's existing metadata with new metadata."""
- post_body = Element("set_metadata")
- metadata = Element("metadata")
+ post_body = xml_utils.Element("set_metadata")
+ metadata = xml_utils.Element("metadata")
post_body.append(metadata)
for k, v in meta.items():
- meta = Element(k)
- meta.append(Text(v))
+ meta = xml_utils.Element(k)
+ meta.append(xml_utils.Text(v))
metadata.append(meta)
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
- str(Document(post_body)))
+ str(xml_utils.Document(post_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
diff --git a/tempest/services/compute/xml/availability_zone_client.py b/tempest/services/compute/xml/availability_zone_client.py
index 4d71186..38446b8 100644
--- a/tempest/services/compute/xml/availability_zone_client.py
+++ b/tempest/services/compute/xml/availability_zone_client.py
@@ -16,8 +16,8 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
-from tempest.services.compute.xml.common import xml_to_json
CONF = config.CONF
@@ -31,7 +31,7 @@
self.service = CONF.compute.catalog_type
def _parse_array(self, node):
- return [xml_to_json(x) for x in node]
+ return [xml_utils.xml_to_json(x) for x in node]
def get_availability_zone_list(self):
resp, body = self.get('os-availability-zone')
diff --git a/tempest/services/compute/xml/extensions_client.py b/tempest/services/compute/xml/extensions_client.py
index 3e8254c..d924dff 100644
--- a/tempest/services/compute/xml/extensions_client.py
+++ b/tempest/services/compute/xml/extensions_client.py
@@ -16,8 +16,8 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
-from tempest.services.compute.xml.common import xml_to_json
CONF = config.CONF
@@ -32,7 +32,7 @@
def _parse_array(self, node):
array = []
for child in node:
- array.append(xml_to_json(child))
+ array.append(xml_utils.xml_to_json(child))
return array
def list_extensions(self):
@@ -48,5 +48,5 @@
def get_extension(self, extension_alias):
resp, body = self.get('extensions/%s' % extension_alias)
- body = xml_to_json(etree.fromstring(body))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body
diff --git a/tempest/services/compute/xml/fixed_ips_client.py b/tempest/services/compute/xml/fixed_ips_client.py
index 0475530..e14ced6 100644
--- a/tempest/services/compute/xml/fixed_ips_client.py
+++ b/tempest/services/compute/xml/fixed_ips_client.py
@@ -15,10 +15,8 @@
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
CONF = config.CONF
@@ -43,7 +41,7 @@
# accept any action key value here to permit tests to cover cases with
# invalid actions raising badrequest.
key, value = body.popitem()
- xml_body = Element(key)
- xml_body.append(Text(value))
- resp, body = self.post(url, str(Document(xml_body)))
+ xml_body = xml_utils.Element(key)
+ xml_body.append(xml_utils.Text(value))
+ resp, body = self.post(url, str(xml_utils.Document(xml_body)))
return resp, body
diff --git a/tempest/services/compute/xml/flavors_client.py b/tempest/services/compute/xml/flavors_client.py
index 68a27c9..68ef323 100644
--- a/tempest/services/compute/xml/flavors_client.py
+++ b/tempest/services/compute/xml/flavors_client.py
@@ -18,12 +18,8 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
CONF = config.CONF
@@ -76,7 +72,7 @@
return flavor
def _parse_array(self, node):
- return [self._format_flavor(xml_to_json(x)) for x in node]
+ return [self._format_flavor(xml_utils.xml_to_json(x)) for x in node]
def _list_flavors(self, url, params):
if params:
@@ -96,19 +92,19 @@
def get_flavor_details(self, flavor_id):
resp, body = self.get("flavors/%s" % str(flavor_id))
- body = xml_to_json(etree.fromstring(body))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
flavor = self._format_flavor(body)
return resp, flavor
def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
"""Creates a new flavor or instance type."""
- flavor = Element("flavor",
- xmlns=XMLNS_11,
- ram=ram,
- vcpus=vcpus,
- disk=disk,
- id=flavor_id,
- name=name)
+ flavor = xml_utils.Element("flavor",
+ xmlns=xml_utils.XMLNS_11,
+ ram=ram,
+ vcpus=vcpus,
+ disk=disk,
+ id=flavor_id,
+ name=name)
if kwargs.get('rxtx'):
flavor.add_attr('rxtx_factor', kwargs.get('rxtx'))
if kwargs.get('swap'):
@@ -121,8 +117,8 @@
kwargs.get('is_public'))
flavor.add_attr('xmlns:OS-FLV-EXT-DATA', XMLNS_OS_FLV_EXT_DATA)
flavor.add_attr('xmlns:os-flavor-access', XMLNS_OS_FLV_ACCESS)
- resp, body = self.post('flavors', str(Document(flavor)))
- body = xml_to_json(etree.fromstring(body))
+ resp, body = self.post('flavors', str(xml_utils.Document(flavor)))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
flavor = self._format_flavor(body)
return resp, flavor
@@ -142,18 +138,18 @@
def set_flavor_extra_spec(self, flavor_id, specs):
"""Sets extra Specs to the mentioned flavor."""
- extra_specs = Element("extra_specs")
+ extra_specs = xml_utils.Element("extra_specs")
for key in specs.keys():
extra_specs.add_attr(key, specs[key])
resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,
- str(Document(extra_specs)))
- body = xml_to_json(etree.fromstring(body))
+ str(xml_utils.Document(extra_specs)))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body
def get_flavor_extra_spec(self, flavor_id):
"""Gets extra Specs of the mentioned flavor."""
resp, body = self.get('flavors/%s/os-extra_specs' % flavor_id)
- body = xml_to_json(etree.fromstring(body))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body
def get_flavor_extra_spec_with_key(self, flavor_id, key):
@@ -163,21 +159,21 @@
body = {}
element = etree.fromstring(xml_body)
key = element.get('key')
- body[key] = xml_to_json(element)
+ body[key] = xml_utils.xml_to_json(element)
return resp, body
def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
"""Update extra Specs details of the mentioned flavor and key."""
- doc = Document()
+ doc = xml_utils.Document()
for (k, v) in kwargs.items():
- element = Element(k)
+ element = xml_utils.Element(k)
doc.append(element)
- value = Text(v)
+ value = xml_utils.Text(v)
element.append(value)
resp, body = self.put('flavors/%s/os-extra_specs/%s' %
(flavor_id, key), str(doc))
- body = xml_to_json(etree.fromstring(body))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, {key: body}
def unset_flavor_extra_spec(self, flavor_id, key):
@@ -186,7 +182,7 @@
key))
def _parse_array_access(self, node):
- return [xml_to_json(x) for x in node]
+ return [xml_utils.xml_to_json(x) for x in node]
def list_flavor_access(self, flavor_id):
"""Gets flavor access information given the flavor id."""
@@ -196,8 +192,8 @@
def add_flavor_access(self, flavor_id, tenant_id):
"""Add flavor access for the specified tenant."""
- doc = Document()
- server = Element("addTenantAccess")
+ doc = xml_utils.Document()
+ server = xml_utils.Element("addTenantAccess")
doc.append(server)
server.add_attr("tenant", tenant_id)
resp, body = self.post('flavors/%s/action' % str(flavor_id), str(doc))
@@ -206,8 +202,8 @@
def remove_flavor_access(self, flavor_id, tenant_id):
"""Remove flavor access from the specified tenant."""
- doc = Document()
- server = Element("removeTenantAccess")
+ doc = xml_utils.Document()
+ server = xml_utils.Element("removeTenantAccess")
doc.append(server)
server.add_attr("tenant", tenant_id)
resp, body = self.post('flavors/%s/action' % str(flavor_id), str(doc))
diff --git a/tempest/services/compute/xml/floating_ips_client.py b/tempest/services/compute/xml/floating_ips_client.py
index be54753..fa4aa07 100644
--- a/tempest/services/compute/xml/floating_ips_client.py
+++ b/tempest/services/compute/xml/floating_ips_client.py
@@ -17,12 +17,9 @@
import urllib
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
from tempest import exceptions
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
CONF = config.CONF
@@ -37,11 +34,11 @@
def _parse_array(self, node):
array = []
for child in node.getchildren():
- array.append(xml_to_json(child))
+ array.append(xml_utils.xml_to_json(child))
return array
def _parse_floating_ip(self, body):
- json = xml_to_json(body)
+ json = xml_utils.xml_to_json(body)
return json
def list_floating_ips(self, params=None):
@@ -67,9 +64,9 @@
"""Allocate a floating IP to the project."""
url = 'os-floating-ips'
if pool_name:
- doc = Document()
- pool = Element("pool")
- pool.append(Text(pool_name))
+ doc = xml_utils.Document()
+ pool = xml_utils.Element("pool")
+ pool.append(xml_utils.Text(pool_name))
doc.append(pool)
resp, body = self.post(url, str(doc))
else:
@@ -86,8 +83,8 @@
def associate_floating_ip_to_server(self, floating_ip, server_id):
"""Associate the provided floating IP to a specific server."""
url = "servers/%s/action" % str(server_id)
- doc = Document()
- server = Element("addFloatingIp")
+ doc = xml_utils.Document()
+ server = xml_utils.Element("addFloatingIp")
doc.append(server)
server.add_attr("address", floating_ip)
resp, body = self.post(url, str(doc))
@@ -96,8 +93,8 @@
def disassociate_floating_ip_from_server(self, floating_ip, server_id):
"""Disassociate the provided floating IP from a specific server."""
url = "servers/%s/action" % str(server_id)
- doc = Document()
- server = Element("removeFloatingIp")
+ doc = xml_utils.Document()
+ server = xml_utils.Element("removeFloatingIp")
doc.append(server)
server.add_attr("address", floating_ip)
resp, body = self.post(url, str(doc))
diff --git a/tempest/services/compute/xml/hosts_client.py b/tempest/services/compute/xml/hosts_client.py
index b74cd04..23a7dd6 100644
--- a/tempest/services/compute/xml/hosts_client.py
+++ b/tempest/services/compute/xml/hosts_client.py
@@ -16,10 +16,8 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import xml_to_json
CONF = config.CONF
@@ -40,7 +38,7 @@
resp, body = self.get(url)
node = etree.fromstring(body)
- body = [xml_to_json(x) for x in node.getchildren()]
+ body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
return resp, body
def show_host_detail(self, hostname):
@@ -48,20 +46,20 @@
resp, body = self.get("os-hosts/%s" % str(hostname))
node = etree.fromstring(body)
- body = [xml_to_json(node)]
+ body = [xml_utils.xml_to_json(node)]
return resp, body
def update_host(self, hostname, **kwargs):
"""Update a host."""
- request_body = Element("updates")
+ request_body = xml_utils.Element("updates")
if kwargs:
for k, v in kwargs.iteritems():
- request_body.append(Element(k, v))
+ request_body.append(xml_utils.Element(k, v))
resp, body = self.put("os-hosts/%s" % str(hostname),
- str(Document(request_body)))
+ str(xml_utils.Document(request_body)))
node = etree.fromstring(body)
- body = [xml_to_json(x) for x in node.getchildren()]
+ body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
return resp, body
def startup_host(self, hostname):
@@ -69,7 +67,7 @@
resp, body = self.get("os-hosts/%s/startup" % str(hostname))
node = etree.fromstring(body)
- body = [xml_to_json(x) for x in node.getchildren()]
+ body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
return resp, body
def shutdown_host(self, hostname):
@@ -77,7 +75,7 @@
resp, body = self.get("os-hosts/%s/shutdown" % str(hostname))
node = etree.fromstring(body)
- body = [xml_to_json(x) for x in node.getchildren()]
+ body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
return resp, body
def reboot_host(self, hostname):
@@ -85,5 +83,5 @@
resp, body = self.get("os-hosts/%s/reboot" % str(hostname))
node = etree.fromstring(body)
- body = [xml_to_json(x) for x in node.getchildren()]
+ body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
return resp, body
diff --git a/tempest/services/compute/xml/hypervisor_client.py b/tempest/services/compute/xml/hypervisor_client.py
index ecd7541..1452708 100644
--- a/tempest/services/compute/xml/hypervisor_client.py
+++ b/tempest/services/compute/xml/hypervisor_client.py
@@ -16,8 +16,8 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
-from tempest.services.compute.xml.common import xml_to_json
CONF = config.CONF
@@ -30,7 +30,7 @@
self.service = CONF.compute.catalog_type
def _parse_array(self, node):
- return [xml_to_json(x) for x in node]
+ return [xml_utils.xml_to_json(x) for x in node]
def get_hypervisor_list(self):
"""List hypervisors information."""
@@ -47,7 +47,7 @@
def get_hypervisor_show_details(self, hyper_id):
"""Display the details of the specified hypervisor."""
resp, body = self.get('os-hypervisors/%s' % hyper_id)
- hypervisor = xml_to_json(etree.fromstring(body))
+ hypervisor = xml_utils.xml_to_json(etree.fromstring(body))
return resp, hypervisor
def get_hypervisor_servers(self, hyper_name):
@@ -59,13 +59,13 @@
def get_hypervisor_stats(self):
"""Get hypervisor statistics over all compute nodes."""
resp, body = self.get('os-hypervisors/statistics')
- stats = xml_to_json(etree.fromstring(body))
+ stats = xml_utils.xml_to_json(etree.fromstring(body))
return resp, stats
def get_hypervisor_uptime(self, hyper_id):
"""Display the uptime of the specified hypervisor."""
resp, body = self.get('os-hypervisors/%s/uptime' % hyper_id)
- uptime = xml_to_json(etree.fromstring(body))
+ uptime = xml_utils.xml_to_json(etree.fromstring(body))
return resp, uptime
def search_hypervisor(self, hyper_name):
diff --git a/tempest/services/compute/xml/images_client.py b/tempest/services/compute/xml/images_client.py
index 9d529be..6b15404 100644
--- a/tempest/services/compute/xml/images_client.py
+++ b/tempest/services/compute/xml/images_client.py
@@ -19,13 +19,9 @@
from tempest.common import rest_client
from tempest.common import waiters
+from tempest.common import xml_utils
from tempest import config
from tempest import exceptions
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
CONF = config.CONF
@@ -40,24 +36,24 @@
self.build_timeout = CONF.compute.build_timeout
def _parse_server(self, node):
- data = xml_to_json(node)
+ data = xml_utils.xml_to_json(node)
return self._parse_links(node, data)
def _parse_image(self, node):
"""Parses detailed XML image information into dictionary."""
- data = xml_to_json(node)
+ data = xml_utils.xml_to_json(node)
self._parse_links(node, data)
# parse all metadata
if 'metadata' in data:
- tag = node.find('{%s}metadata' % XMLNS_11)
+ tag = node.find('{%s}metadata' % xml_utils.XMLNS_11)
data['metadata'] = dict((x.get('key'), x.text)
for x in tag.getchildren())
# parse server information
if 'server' in data:
- tag = node.find('{%s}server' % XMLNS_11)
+ tag = node.find('{%s}server' % xml_utils.XMLNS_11)
data['server'] = self._parse_server(tag)
return data
@@ -67,7 +63,7 @@
if 'link' in data:
# remove single link element
del data['link']
- data['links'] = [xml_to_json(x) for x in
+ data['links'] = [xml_utils.xml_to_json(x) for x in
node.findall('{http://www.w3.org/2005/Atom}link')]
return data
@@ -93,17 +89,17 @@
def create_image(self, server_id, name, meta=None):
"""Creates an image of the original server."""
- post_body = Element('createImage', name=name)
+ post_body = xml_utils.Element('createImage', name=name)
if meta:
- metadata = Element('metadata')
+ metadata = xml_utils.Element('metadata')
post_body.append(metadata)
for k, v in meta.items():
- data = Element('meta', key=k)
- data.append(Text(v))
+ data = xml_utils.Element('meta', key=k)
+ data.append(xml_utils.Text(v))
metadata.append(data)
resp, body = self.post('servers/%s/action' % str(server_id),
- str(Document(post_body)))
+ str(xml_utils.Document(post_body)))
return resp, body
def list_images(self, params=None):
@@ -144,10 +140,10 @@
waiters.wait_for_image_status(self, image_id, status)
def _metadata_body(self, meta):
- post_body = Element('metadata')
+ post_body = xml_utils.Element('metadata')
for k, v in meta.items():
- data = Element('meta', key=k)
- data.append(Text(v))
+ data = xml_utils.Element('meta', key=k)
+ data.append(xml_utils.Text(v))
post_body.append(data)
return post_body
@@ -161,7 +157,7 @@
"""Sets the metadata for an image."""
post_body = self._metadata_body(meta)
resp, body = self.put('images/%s/metadata' % image_id,
- str(Document(post_body)))
+ str(xml_utils.Document(post_body)))
body = self._parse_key_value(etree.fromstring(body))
return resp, body
@@ -169,7 +165,7 @@
"""Updates the metadata for an image."""
post_body = self._metadata_body(meta)
resp, body = self.post('images/%s/metadata' % str(image_id),
- str(Document(post_body)))
+ str(xml_utils.Document(post_body)))
body = self._parse_key_value(etree.fromstring(body))
return resp, body
@@ -183,19 +179,19 @@
def set_image_metadata_item(self, image_id, key, meta):
"""Sets the value for a specific image metadata key."""
for k, v in meta.items():
- post_body = Element('meta', key=key)
- post_body.append(Text(v))
+ post_body = xml_utils.Element('meta', key=key)
+ post_body.append(xml_utils.Text(v))
resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key),
- str(Document(post_body)))
- body = xml_to_json(etree.fromstring(body))
+ str(xml_utils.Document(post_body)))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body
def update_image_metadata_item(self, image_id, key, meta):
"""Sets the value for a specific image metadata key."""
- post_body = Document('meta', Text(meta), key=key)
+ post_body = xml_utils.Document('meta', xml_utils.Text(meta), key=key)
resp, body = self.put('images/%s/metadata/%s' % (str(image_id), key),
post_body)
- body = xml_to_json(etree.fromstring(body))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body['meta']
def delete_image_metadata_item(self, image_id, key):
diff --git a/tempest/services/compute/xml/instance_usage_audit_log_client.py b/tempest/services/compute/xml/instance_usage_audit_log_client.py
index 1cd8c07..b139db1 100644
--- a/tempest/services/compute/xml/instance_usage_audit_log_client.py
+++ b/tempest/services/compute/xml/instance_usage_audit_log_client.py
@@ -16,8 +16,8 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
-from tempest.services.compute.xml.common import xml_to_json
CONF = config.CONF
@@ -33,11 +33,13 @@
def list_instance_usage_audit_logs(self):
url = 'os-instance_usage_audit_log'
resp, body = self.get(url)
- instance_usage_audit_logs = xml_to_json(etree.fromstring(body))
+ instance_usage_audit_logs = xml_utils.xml_to_json(
+ etree.fromstring(body))
return resp, instance_usage_audit_logs
def get_instance_usage_audit_log(self, time_before):
url = 'os-instance_usage_audit_log/%s' % time_before
resp, body = self.get(url)
- instance_usage_audit_log = xml_to_json(etree.fromstring(body))
+ instance_usage_audit_log = xml_utils.xml_to_json(
+ etree.fromstring(body))
return resp, instance_usage_audit_log
diff --git a/tempest/services/compute/xml/interfaces_client.py b/tempest/services/compute/xml/interfaces_client.py
index 8d4bfcc..e30a97c 100644
--- a/tempest/services/compute/xml/interfaces_client.py
+++ b/tempest/services/compute/xml/interfaces_client.py
@@ -18,13 +18,9 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
from tempest import exceptions
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
CONF = config.CONF
@@ -37,9 +33,9 @@
self.service = CONF.compute.catalog_type
def _process_xml_interface(self, node):
- iface = xml_to_json(node)
+ iface = xml_utils.xml_to_json(node)
# NOTE(danms): if multiple addresses per interface is ever required,
- # xml_to_json will need to be fixed or replaced in this case
+ # xml_utils.xml_to_json will need to be fixed or replaced in this case
iface['fixed_ips'] = [dict(iface['fixed_ips']['fixed_ip'].items())]
return iface
@@ -52,21 +48,21 @@
def create_interface(self, server, port_id=None, network_id=None,
fixed_ip=None):
- doc = Document()
- iface = Element('interfaceAttachment')
+ doc = xml_utils.Document()
+ iface = xml_utils.Element('interfaceAttachment')
if port_id:
- _port_id = Element('port_id')
- _port_id.append(Text(port_id))
+ _port_id = xml_utils.Element('port_id')
+ _port_id.append(xml_utils.Text(port_id))
iface.append(_port_id)
if network_id:
- _network_id = Element('net_id')
- _network_id.append(Text(network_id))
+ _network_id = xml_utils.Element('net_id')
+ _network_id.append(xml_utils.Text(network_id))
iface.append(_network_id)
if fixed_ip:
- _fixed_ips = Element('fixed_ips')
- _fixed_ip = Element('fixed_ip')
- _ip_address = Element('ip_address')
- _ip_address.append(Text(fixed_ip))
+ _fixed_ips = xml_utils.Element('fixed_ips')
+ _fixed_ip = xml_utils.Element('fixed_ip')
+ _ip_address = xml_utils.Element('ip_address')
+ _ip_address.append(xml_utils.Text(fixed_ip))
_fixed_ip.append(_ip_address)
_fixed_ips.append(_fixed_ip)
iface.append(_fixed_ips)
@@ -108,18 +104,18 @@
def add_fixed_ip(self, server_id, network_id):
"""Add a fixed IP to input server instance."""
- post_body = Element("addFixedIp",
- xmlns=XMLNS_11,
- networkId=network_id)
+ post_body = xml_utils.Element("addFixedIp",
+ xmlns=xml_utils.XMLNS_11,
+ networkId=network_id)
resp, body = self.post('servers/%s/action' % str(server_id),
- str(Document(post_body)))
+ str(xml_utils.Document(post_body)))
return resp, body
def remove_fixed_ip(self, server_id, ip_address):
"""Remove input fixed IP from input server instance."""
- post_body = Element("removeFixedIp",
- xmlns=XMLNS_11,
- address=ip_address)
+ post_body = xml_utils.Element("removeFixedIp",
+ xmlns=xml_utils.XMLNS_11,
+ address=ip_address)
resp, body = self.post('servers/%s/action' % str(server_id),
- str(Document(post_body)))
+ str(xml_utils.Document(post_body)))
return resp, body
diff --git a/tempest/services/compute/xml/keypairs_client.py b/tempest/services/compute/xml/keypairs_client.py
index fb498c0..8ff37ac 100644
--- a/tempest/services/compute/xml/keypairs_client.py
+++ b/tempest/services/compute/xml/keypairs_client.py
@@ -17,11 +17,8 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
CONF = config.CONF
@@ -36,34 +33,35 @@
def list_keypairs(self):
resp, body = self.get("os-keypairs")
node = etree.fromstring(body)
- body = [{'keypair': xml_to_json(x)} for x in node.getchildren()]
+ body = [{'keypair': xml_utils.xml_to_json(x)} for x in
+ node.getchildren()]
return resp, body
def get_keypair(self, key_name):
resp, body = self.get("os-keypairs/%s" % str(key_name))
- body = xml_to_json(etree.fromstring(body))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body
def create_keypair(self, name, pub_key=None):
- doc = Document()
+ doc = xml_utils.Document()
- keypair_element = Element("keypair")
+ keypair_element = xml_utils.Element("keypair")
if pub_key:
- public_key_element = Element("public_key")
- public_key_text = Text(pub_key)
+ public_key_element = xml_utils.Element("public_key")
+ public_key_text = xml_utils.Text(pub_key)
public_key_element.append(public_key_text)
keypair_element.append(public_key_element)
- name_element = Element("name")
- name_text = Text(name)
+ name_element = xml_utils.Element("name")
+ name_text = xml_utils.Text(name)
name_element.append(name_text)
keypair_element.append(name_element)
doc.append(keypair_element)
resp, body = self.post("os-keypairs", body=str(doc))
- body = xml_to_json(etree.fromstring(body))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body
def delete_keypair(self, key_name):
diff --git a/tempest/services/compute/xml/quotas_client.py b/tempest/services/compute/xml/quotas_client.py
index b8b759f..5502fcc 100644
--- a/tempest/services/compute/xml/quotas_client.py
+++ b/tempest/services/compute/xml/quotas_client.py
@@ -16,11 +16,8 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
CONF = config.CONF
@@ -44,12 +41,14 @@
return quota
- def get_quota_set(self, tenant_id):
+ def get_quota_set(self, tenant_id, user_id=None):
"""List the quota set for a tenant."""
url = 'os-quota-sets/%s' % str(tenant_id)
+ if user_id:
+ url += '?user_id=%s' % str(user_id)
resp, body = self.get(url)
- body = xml_to_json(etree.fromstring(body))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
body = self._format_quota(body)
return resp, body
@@ -58,12 +57,12 @@
url = 'os-quota-sets/%s/defaults' % str(tenant_id)
resp, body = self.get(url)
- body = xml_to_json(etree.fromstring(body))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
body = self._format_quota(body)
return resp, body
- def update_quota_set(self, tenant_id, force=None,
- injected_file_content_bytes=None,
+ def update_quota_set(self, tenant_id, user_id=None,
+ force=None, injected_file_content_bytes=None,
metadata_items=None, ram=None, floating_ips=None,
fixed_ips=None, key_pairs=None, instances=None,
security_group_rules=None, injected_files=None,
@@ -72,8 +71,8 @@
"""
Updates the tenant's quota limits for one or more resources
"""
- post_body = Element("quota_set",
- xmlns=XMLNS_11)
+ post_body = xml_utils.Element("quota_set",
+ xmlns=xml_utils.XMLNS_11)
if force is not None:
post_body.add_attr('force', force)
@@ -116,8 +115,18 @@
if security_groups is not None:
post_body.add_attr('security_groups', security_groups)
- resp, body = self.put('os-quota-sets/%s' % str(tenant_id),
- str(Document(post_body)))
- body = xml_to_json(etree.fromstring(body))
+ if user_id:
+ resp, body = self.put('os-quota-sets/%s?user_id=%s' %
+ (str(tenant_id), str(user_id)),
+ str(xml_utils.Document(post_body)))
+ else:
+ resp, body = self.put('os-quota-sets/%s' % str(tenant_id),
+ str(xml_utils.Document(post_body)))
+
+ body = xml_utils.xml_to_json(etree.fromstring(body))
body = self._format_quota(body)
return resp, body
+
+ def delete_quota_set(self, tenant_id):
+ """Delete the tenant's quota set."""
+ return self.delete('os-quota-sets/%s' % str(tenant_id))
diff --git a/tempest/services/compute/xml/security_groups_client.py b/tempest/services/compute/xml/security_groups_client.py
index d53e8da..9eccb90 100644
--- a/tempest/services/compute/xml/security_groups_client.py
+++ b/tempest/services/compute/xml/security_groups_client.py
@@ -17,13 +17,9 @@
import urllib
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
from tempest import exceptions
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
CONF = config.CONF
@@ -38,11 +34,11 @@
def _parse_array(self, node):
array = []
for child in node.getchildren():
- array.append(xml_to_json(child))
+ array.append(xml_utils.xml_to_json(child))
return array
def _parse_body(self, body):
- json = xml_to_json(body)
+ json = xml_utils.xml_to_json(body)
return json
def list_security_groups(self, params=None):
@@ -69,12 +65,12 @@
name (Required): Name of security group.
description (Required): Description of security group.
"""
- security_group = Element("security_group", name=name)
- des = Element("description")
- des.append(Text(content=description))
+ security_group = xml_utils.Element("security_group", name=name)
+ des = xml_utils.Element("description")
+ des.append(xml_utils.Text(content=description))
security_group.append(des)
resp, body = self.post('os-security-groups',
- str(Document(security_group)))
+ str(xml_utils.Document(security_group)))
body = self._parse_body(etree.fromstring(body))
return resp, body
@@ -86,18 +82,18 @@
name: new name of security group
description: new description of security group
"""
- security_group = Element("security_group")
+ security_group = xml_utils.Element("security_group")
if name:
- sg_name = Element("name")
- sg_name.append(Text(content=name))
+ sg_name = xml_utils.Element("name")
+ sg_name.append(xml_utils.Text(content=name))
security_group.append(sg_name)
if description:
- des = Element("description")
- des.append(Text(content=description))
+ des = xml_utils.Element("description")
+ des.append(xml_utils.Text(content=description))
security_group.append(des)
resp, body = self.put('os-security-groups/%s' %
str(security_group_id),
- str(Document(security_group)))
+ str(xml_utils.Document(security_group)))
body = self._parse_body(etree.fromstring(body))
return resp, body
@@ -117,7 +113,7 @@
cidr : CIDR for address range.
group_id : ID of the Source group
"""
- group_rule = Element("security_group_rule")
+ group_rule = xml_utils.Element("security_group_rule")
elements = dict()
elements['cidr'] = kwargs.get('cidr')
@@ -129,12 +125,12 @@
for k, v in elements.items():
if v is not None:
- element = Element(k)
- element.append(Text(content=str(v)))
+ element = xml_utils.Element(k)
+ element.append(xml_utils.Text(content=str(v)))
group_rule.append(element)
url = 'os-security-group-rules'
- resp, body = self.post(url, str(Document(group_rule)))
+ resp, body = self.post(url, str(xml_utils.Document(group_rule)))
body = self._parse_body(etree.fromstring(body))
return resp, body
@@ -151,8 +147,8 @@
secgroups = body.getchildren()
for secgroup in secgroups:
if secgroup.get('id') == security_group_id:
- node = secgroup.find('{%s}rules' % XMLNS_11)
- rules = [xml_to_json(x) for x in node.getchildren()]
+ node = secgroup.find('{%s}rules' % xml_utils.XMLNS_11)
+ rules = [xml_utils.xml_to_json(x) for x in node.getchildren()]
return resp, rules
raise exceptions.NotFound('No such Security Group')
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 4d3646c..c1105f9 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -21,14 +21,10 @@
from tempest.common import rest_client
from tempest.common import waiters
+from tempest.common import xml_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
CONF = config.CONF
@@ -60,12 +56,13 @@
def _translate_network_xml_to_json(network):
return [_translate_ip_xml_json(ip.attrib)
- for ip in network.findall('{%s}ip' % XMLNS_11)]
+ for ip in network.findall('{%s}ip' % xml_utils.XMLNS_11)]
def _translate_addresses_xml_to_json(xml_addresses):
return dict((network.attrib['id'], _translate_network_xml_to_json(network))
- for network in xml_addresses.findall('{%s}network' % XMLNS_11))
+ for network in xml_addresses.findall('{%s}network' %
+ xml_utils.XMLNS_11))
def _translate_server_xml_to_json(xml_dom):
@@ -97,16 +94,16 @@
'version': 6}],
'foo_novanetwork': [{'addr': '192.168.0.4', 'version': 4}]}}
"""
- nsmap = {'api': XMLNS_11}
+ nsmap = {'api': xml_utils.XMLNS_11}
addresses = xml_dom.xpath('/api:server/api:addresses', namespaces=nsmap)
if addresses:
if len(addresses) > 1:
raise ValueError('Expected only single `addresses` element.')
json_addresses = _translate_addresses_xml_to_json(addresses[0])
- json = xml_to_json(xml_dom)
+ json = xml_utils.xml_to_json(xml_dom)
json['addresses'] = json_addresses
else:
- json = xml_to_json(xml_dom)
+ json = xml_utils.xml_to_json(xml_dom)
diskConfig = ('{http://docs.openstack.org'
'/compute/ext/disk_config/api/v1.1}diskConfig')
terminated_at = ('{http://docs.openstack.org/'
@@ -122,6 +119,10 @@
'/compute/ext/extended_status/api/v1.1}vm_state')
task_state = ('{http://docs.openstack.org'
'/compute/ext/extended_status/api/v1.1}task_state')
+ if 'tenantId' in json:
+ json['tenant_id'] = json.pop('tenantId')
+ if 'userId' in json:
+ json['user_id'] = json.pop('userId')
if diskConfig in json:
json['OS-DCF:diskConfig'] = json.pop(diskConfig)
if terminated_at in json:
@@ -157,7 +158,7 @@
del json['link']
json['links'] = []
for linknode in node.findall('{http://www.w3.org/2005/Atom}link'):
- json['links'].append(xml_to_json(linknode))
+ json['links'].append(xml_utils.xml_to_json(linknode))
def _parse_server(self, body):
json = _translate_server_xml_to_json(body)
@@ -165,7 +166,7 @@
if 'metadata' in json and json['metadata']:
# NOTE(danms): if there was metadata, we need to re-parse
# that as a special type
- metadata_tag = body.find('{%s}metadata' % XMLNS_11)
+ metadata_tag = body.find('{%s}metadata' % xml_utils.XMLNS_11)
json["metadata"] = self._parse_key_value(metadata_tag)
if 'link' in json:
self._parse_links(body, json)
@@ -242,7 +243,13 @@
def _parse_array(self, node):
array = []
for child in node.getchildren():
- array.append(xml_to_json(child))
+ array.append(xml_utils.xml_to_json(child))
+ return array
+
+ def _parse_server_array(self, node):
+ array = []
+ for child in node.getchildren():
+ array.append(self._parse_server(child))
return array
def list_servers(self, params=None):
@@ -251,7 +258,7 @@
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
- servers = self._parse_array(etree.fromstring(body))
+ servers = self._parse_server_array(etree.fromstring(body))
return resp, {"servers": servers}
def list_servers_with_detail(self, params=None):
@@ -260,13 +267,13 @@
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
- servers = self._parse_array(etree.fromstring(body))
+ servers = self._parse_server_array(etree.fromstring(body))
return resp, {"servers": servers}
def update_server(self, server_id, name=None, meta=None, accessIPv4=None,
accessIPv6=None, disk_config=None):
- doc = Document()
- server = Element("server")
+ doc = xml_utils.Document()
+ server = xml_utils.Element("server")
doc.append(server)
if name is not None:
@@ -280,15 +287,15 @@
"compute/ext/disk_config/api/v1.1")
server.add_attr("OS-DCF:diskConfig", disk_config)
if meta is not None:
- metadata = Element("metadata")
+ metadata = xml_utils.Element("metadata")
server.append(metadata)
for k, v in meta:
- meta = Element("meta", key=k)
- meta.append(Text(v))
+ meta = xml_utils.Element("meta", key=k)
+ meta.append(xml_utils.Text(v))
metadata.append(meta)
resp, body = self.put('servers/%s' % str(server_id), str(doc))
- return resp, xml_to_json(etree.fromstring(body))
+ return resp, xml_utils.xml_to_json(etree.fromstring(body))
def create_server(self, name, image_ref, flavor_ref, **kwargs):
"""
@@ -312,11 +319,11 @@
max_count: Count of maximum number of instances to launch.
disk_config: Determines if user or admin controls disk configuration.
"""
- server = Element("server",
- xmlns=XMLNS_11,
- imageRef=image_ref,
- flavorRef=flavor_ref,
- name=name)
+ server = xml_utils.Element("server",
+ xmlns=xml_utils.XMLNS_11,
+ imageRef=image_ref,
+ flavorRef=flavor_ref,
+ name=name)
for attr in ["adminPass", "accessIPv4", "accessIPv6", "key_name",
"user_data", "availability_zone", "min_count",
@@ -330,37 +337,46 @@
server.add_attr('OS-DCF:diskConfig', kwargs['disk_config'])
if 'security_groups' in kwargs:
- secgroups = Element("security_groups")
+ secgroups = xml_utils.Element("security_groups")
server.append(secgroups)
for secgroup in kwargs['security_groups']:
- s = Element("security_group", name=secgroup['name'])
+ s = xml_utils.Element("security_group", name=secgroup['name'])
secgroups.append(s)
if 'networks' in kwargs:
- networks = Element("networks")
+ networks = xml_utils.Element("networks")
server.append(networks)
for network in kwargs['networks']:
- s = Element("network", uuid=network['uuid'],
- fixed_ip=network['fixed_ip'])
+ s = xml_utils.Element("network", uuid=network['uuid'],
+ fixed_ip=network['fixed_ip'])
networks.append(s)
if 'meta' in kwargs:
- metadata = Element("metadata")
+ metadata = xml_utils.Element("metadata")
server.append(metadata)
for k, v in kwargs['meta'].items():
- meta = Element("meta", key=k)
- meta.append(Text(v))
+ meta = xml_utils.Element("meta", key=k)
+ meta.append(xml_utils.Text(v))
metadata.append(meta)
if 'personality' in kwargs:
- personality = Element('personality')
+ personality = xml_utils.Element('personality')
server.append(personality)
for k in kwargs['personality']:
- temp = Element('file', path=k['path'])
- temp.append(Text(k['contents']))
+ temp = xml_utils.Element('file', path=k['path'])
+ temp.append(xml_utils.Text(k['contents']))
personality.append(temp)
- resp, body = self.post('servers', str(Document(server)))
+ if 'sched_hints' in kwargs:
+ sched_hints = kwargs.get('sched_hints')
+ hints = xml_utils.Element("os:scheduler_hints")
+ hints.add_attr('xmlns:os', xml_utils.XMLNS_11)
+ for attr in sched_hints:
+ p1 = xml_utils.Element(attr)
+ p1.append(sched_hints[attr])
+ hints.append(p1)
+ server.append(hints)
+ resp, body = self.post('servers', str(xml_utils.Document(server)))
server = self._parse_server(etree.fromstring(body))
return resp, server
@@ -418,11 +434,11 @@
def action(self, server_id, action_name, response_key, **kwargs):
if 'xmlns' not in kwargs:
- kwargs['xmlns'] = XMLNS_11
- doc = Document((Element(action_name, **kwargs)))
+ kwargs['xmlns'] = xml_utils.XMLNS_11
+ doc = xml_utils.Document((xml_utils.Element(action_name, **kwargs)))
resp, body = self.post("servers/%s/action" % server_id, str(doc))
if response_key is not None:
- body = xml_to_json(etree.fromstring(body))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body
def create_backup(self, server_id, backup_type, rotation, name):
@@ -438,7 +454,7 @@
def get_password(self, server_id):
resp, body = self.get("servers/%s/os-server-password" % str(server_id))
- body = xml_to_json(etree.fromstring(body))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body
def delete_password(self, server_id):
@@ -461,24 +477,23 @@
"compute/ext/disk_config/api/v1.1"
kwargs['xmlns:atom'] = "http://www.w3.org/2005/Atom"
if 'xmlns' not in kwargs:
- kwargs['xmlns'] = XMLNS_11
+ kwargs['xmlns'] = xml_utils.XMLNS_11
attrs = kwargs.copy()
if 'metadata' in attrs:
del attrs['metadata']
- rebuild = Element("rebuild",
- **attrs)
+ rebuild = xml_utils.Element("rebuild", **attrs)
if 'metadata' in kwargs:
- metadata = Element("metadata")
+ metadata = xml_utils.Element("metadata")
rebuild.append(metadata)
for k, v in kwargs['metadata'].items():
- meta = Element("meta", key=k)
- meta.append(Text(v))
+ meta = xml_utils.Element("meta", key=k)
+ meta.append(xml_utils.Text(v))
metadata.append(meta)
resp, body = self.post('servers/%s/action' % server_id,
- str(Document(rebuild)))
+ str(xml_utils.Document(rebuild)))
server = self._parse_server(etree.fromstring(body))
return resp, server
@@ -516,14 +531,14 @@
def live_migrate_server(self, server_id, dest_host, use_block_migration):
"""This should be called with administrator privileges ."""
- req_body = Element("os-migrateLive",
- xmlns=XMLNS_11,
- disk_over_commit=False,
- block_migration=use_block_migration,
- host=dest_host)
+ req_body = xml_utils.Element("os-migrateLive",
+ xmlns=xml_utils.XMLNS_11,
+ disk_over_commit=False,
+ block_migration=use_block_migration,
+ host=dest_host)
resp, body = self.post("servers/%s/action" % str(server_id),
- str(Document(req_body)))
+ str(xml_utils.Document(req_body)))
return resp, body
def list_server_metadata(self, server_id):
@@ -532,44 +547,44 @@
return resp, body
def set_server_metadata(self, server_id, meta, no_metadata_field=False):
- doc = Document()
+ doc = xml_utils.Document()
if not no_metadata_field:
- metadata = Element("metadata")
+ metadata = xml_utils.Element("metadata")
doc.append(metadata)
for k, v in meta.items():
- meta_element = Element("meta", key=k)
- meta_element.append(Text(v))
+ meta_element = xml_utils.Element("meta", key=k)
+ meta_element.append(xml_utils.Text(v))
metadata.append(meta_element)
resp, body = self.put('servers/%s/metadata' % str(server_id), str(doc))
- return resp, xml_to_json(etree.fromstring(body))
+ return resp, xml_utils.xml_to_json(etree.fromstring(body))
def update_server_metadata(self, server_id, meta):
- doc = Document()
- metadata = Element("metadata")
+ doc = xml_utils.Document()
+ metadata = xml_utils.Element("metadata")
doc.append(metadata)
for k, v in meta.items():
- meta_element = Element("meta", key=k)
- meta_element.append(Text(v))
+ meta_element = xml_utils.Element("meta", key=k)
+ meta_element.append(xml_utils.Text(v))
metadata.append(meta_element)
resp, body = self.post("/servers/%s/metadata" % str(server_id),
str(doc))
- body = xml_to_json(etree.fromstring(body))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body
def get_server_metadata_item(self, server_id, key):
resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
return resp, dict([(etree.fromstring(body).attrib['key'],
- xml_to_json(etree.fromstring(body)))])
+ xml_utils.xml_to_json(etree.fromstring(body)))])
def set_server_metadata_item(self, server_id, key, meta):
- doc = Document()
+ doc = xml_utils.Document()
for k, v in meta.items():
- meta_element = Element("meta", key=k)
- meta_element.append(Text(v))
+ meta_element = xml_utils.Element("meta", key=k)
+ meta_element.append(xml_utils.Text(v))
doc.append(meta_element)
resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
str(doc))
- return resp, xml_to_json(etree.fromstring(body))
+ return resp, xml_utils.xml_to_json(etree.fromstring(body))
def delete_server_metadata_item(self, server_id, key):
resp, body = self.delete("servers/%s/metadata/%s" %
@@ -598,10 +613,10 @@
return self.action(server_id, 'unrescue', None)
def attach_volume(self, server_id, volume_id, device='/dev/vdz'):
- post_body = Element("volumeAttachment", volumeId=volume_id,
- device=device)
+ post_body = xml_utils.Element("volumeAttachment", volumeId=volume_id,
+ device=device)
resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
- str(Document(post_body)))
+ str(xml_utils.Document(post_body)))
return resp, body
def detach_volume(self, server_id, volume_id):
@@ -614,7 +629,7 @@
def get_server_diagnostics(self, server_id):
"""Get the usage data for a server."""
resp, body = self.get("servers/%s/diagnostics" % server_id)
- body = xml_to_json(etree.fromstring(body))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body
def list_instance_actions(self, server_id):
@@ -627,7 +642,7 @@
"""Returns the action details of the provided server."""
resp, body = self.get("servers/%s/os-instance-actions/%s" %
(server_id, request_id))
- body = xml_to_json(etree.fromstring(body))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body
def force_delete_server(self, server_id, **kwargs):
@@ -645,3 +660,8 @@
def inject_network_info(self, server_id, **kwargs):
"""Inject the Network Info into server"""
return self.action(server_id, 'injectNetworkInfo', None, **kwargs)
+
+ def get_vnc_console(self, server_id, console_type):
+ """Get URL of VNC console."""
+ return self.action(server_id, "os-getVNCConsole",
+ "console", type=console_type)
diff --git a/tempest/services/compute/xml/services_client.py b/tempest/services/compute/xml/services_client.py
index d7b8a60..e1e78d0 100644
--- a/tempest/services/compute/xml/services_client.py
+++ b/tempest/services/compute/xml/services_client.py
@@ -19,10 +19,8 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import xml_to_json
CONF = config.CONF
@@ -41,7 +39,7 @@
resp, body = self.get(url)
node = etree.fromstring(body)
- body = [xml_to_json(x) for x in node.getchildren()]
+ body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
return resp, body
def enable_service(self, host_name, binary):
@@ -50,12 +48,13 @@
host_name: Name of host
binary: Service binary
"""
- post_body = Element("service")
+ post_body = xml_utils.Element("service")
post_body.add_attr('binary', binary)
post_body.add_attr('host', host_name)
- resp, body = self.put('os-services/enable', str(Document(post_body)))
- body = xml_to_json(etree.fromstring(body))
+ resp, body = self.put('os-services/enable', str(
+ xml_utils.Document(post_body)))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body
def disable_service(self, host_name, binary):
@@ -64,10 +63,11 @@
host_name: Name of host
binary: Service binary
"""
- post_body = Element("service")
+ post_body = xml_utils.Element("service")
post_body.add_attr('binary', binary)
post_body.add_attr('host', host_name)
- resp, body = self.put('os-services/disable', str(Document(post_body)))
- body = xml_to_json(etree.fromstring(body))
+ resp, body = self.put('os-services/disable', str(
+ xml_utils.Document(post_body)))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body
diff --git a/tempest/services/compute/xml/tenant_usages_client.py b/tempest/services/compute/xml/tenant_usages_client.py
index 79f0ac9..0b19f63 100644
--- a/tempest/services/compute/xml/tenant_usages_client.py
+++ b/tempest/services/compute/xml/tenant_usages_client.py
@@ -18,8 +18,8 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
-from tempest.services.compute.xml.common import xml_to_json
CONF = config.CONF
@@ -32,7 +32,7 @@
self.service = CONF.compute.catalog_type
def _parse_array(self, node):
- json = xml_to_json(node)
+ json = xml_utils.xml_to_json(node)
return json
def list_tenant_usages(self, params=None):
diff --git a/tempest/services/compute/xml/volumes_extensions_client.py b/tempest/services/compute/xml/volumes_extensions_client.py
index 570b715..e9c5035 100644
--- a/tempest/services/compute/xml/volumes_extensions_client.py
+++ b/tempest/services/compute/xml/volumes_extensions_client.py
@@ -19,13 +19,9 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils
from tempest import config
from tempest import exceptions
-from tempest.services.compute.xml.common import Document
-from tempest.services.compute.xml.common import Element
-from tempest.services.compute.xml.common import Text
-from tempest.services.compute.xml.common import xml_to_json
-from tempest.services.compute.xml.common import XMLNS_11
CONF = config.CONF
@@ -51,7 +47,7 @@
vol['metadata'] = dict((meta.get('key'),
meta.text) for meta in list(child))
else:
- vol[tag] = xml_to_json(child)
+ vol[tag] = xml_utils.xml_to_json(child)
return vol
def list_volumes(self, params=None):
@@ -96,23 +92,23 @@
:param display_name: Optional Volume Name.
:param metadata: An optional dictionary of values for metadata.
"""
- volume = Element("volume",
- xmlns=XMLNS_11,
- size=size)
+ volume = xml_utils.Element("volume",
+ xmlns=xml_utils.XMLNS_11,
+ size=size)
if display_name:
volume.add_attr('display_name', display_name)
if metadata:
- _metadata = Element('metadata')
+ _metadata = xml_utils.Element('metadata')
volume.append(_metadata)
for key, value in metadata.items():
- meta = Element('meta')
+ meta = xml_utils.Element('meta')
meta.add_attr('key', key)
- meta.append(Text(value))
+ meta.append(xml_utils.Text(value))
_metadata.append(meta)
- resp, body = self.post('os-volumes', str(Document(volume)))
- body = xml_to_json(etree.fromstring(body))
+ resp, body = self.post('os-volumes', str(xml_utils.Document(volume)))
+ body = xml_utils.xml_to_json(etree.fromstring(body))
return resp, body
def delete_volume(self, volume_id):
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index e96b44b..194e300 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -32,7 +32,6 @@
It returns pair: resp and parsed resource(s) body.
"""
-
resp, body = req_fun(uri, headers={
'Content-Type': 'application/json'
}, *args, **kwargs)
@@ -48,7 +47,7 @@
def get_node_group_template(self, tmpl_id):
"""Returns the details of a single node group template."""
- uri = "node-group-templates/%s" % tmpl_id
+ uri = 'node-group-templates/%s' % tmpl_id
return self._request_and_parse(self.get, uri, 'node_group_template')
def create_node_group_template(self, name, plugin_name, hadoop_version,
@@ -59,7 +58,7 @@
It supports passing additional params using kwargs and returns created
object.
"""
- uri = "node-group-templates"
+ uri = 'node-group-templates'
body = kwargs.copy()
body.update({
'name': name,
@@ -75,7 +74,7 @@
def delete_node_group_template(self, tmpl_id):
"""Deletes the specified node group template by id."""
- uri = "node-group-templates/%s" % tmpl_id
+ uri = 'node-group-templates/%s' % tmpl_id
return self.delete(uri)
def list_plugins(self):
@@ -87,7 +86,79 @@
def get_plugin(self, plugin_name, plugin_version=None):
"""Returns the details of a single plugin."""
- uri = "plugins/%s" % plugin_name
+ uri = 'plugins/%s' % plugin_name
if plugin_version:
uri += '/%s' % plugin_version
return self._request_and_parse(self.get, uri, 'plugin')
+
+ def list_cluster_templates(self):
+ """List all cluster templates for a user."""
+
+ uri = 'cluster-templates'
+ return self._request_and_parse(self.get, uri, 'cluster_templates')
+
+ def get_cluster_template(self, tmpl_id):
+ """Returns the details of a single cluster template."""
+
+ uri = 'cluster-templates/%s' % tmpl_id
+ return self._request_and_parse(self.get, uri, 'cluster_template')
+
+ def create_cluster_template(self, name, plugin_name, hadoop_version,
+ node_groups, cluster_configs=None,
+ **kwargs):
+ """Creates cluster template with specified params.
+
+ It supports passing additional params using kwargs and returns created
+ object.
+ """
+ uri = 'cluster-templates'
+ body = kwargs.copy()
+ body.update({
+ 'name': name,
+ 'plugin_name': plugin_name,
+ 'hadoop_version': hadoop_version,
+ 'node_groups': node_groups,
+ 'cluster_configs': cluster_configs or dict(),
+ })
+ return self._request_and_parse(self.post, uri, 'cluster_template',
+ body=json.dumps(body))
+
+ def delete_cluster_template(self, tmpl_id):
+ """Deletes the specified cluster template by id."""
+
+ uri = 'cluster-templates/%s' % tmpl_id
+ return self.delete(uri)
+
+ def list_data_sources(self):
+ """List all data sources for a user."""
+
+ uri = 'data-sources'
+ return self._request_and_parse(self.get, uri, 'data_sources')
+
+ def get_data_source(self, source_id):
+ """Returns the details of a single data source."""
+
+ uri = 'data-sources/%s' % source_id
+ return self._request_and_parse(self.get, uri, 'data_source')
+
+ def create_data_source(self, name, data_source_type, url, **kwargs):
+ """Creates data source with specified params.
+
+ It supports passing additional params using kwargs and returns created
+ object.
+ """
+ uri = 'data-sources'
+ body = kwargs.copy()
+ body.update({
+ 'name': name,
+ 'type': data_source_type,
+ 'url': url
+ })
+ return self._request_and_parse(self.post, uri, 'data_source',
+ body=json.dumps(body))
+
+ def delete_data_source(self, source_id):
+ """Deletes the specified data source by id."""
+
+ uri = 'data-sources/%s' % source_id
+ return self.delete(uri)
diff --git a/tempest/services/database/json/flavors_client.py b/tempest/services/database/json/flavors_client.py
index 1a8a4c1..2ec0405 100644
--- a/tempest/services/database/json/flavors_client.py
+++ b/tempest/services/database/json/flavors_client.py
@@ -13,9 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import urllib
+
from tempest.common import rest_client
from tempest import config
-import urllib
CONF = config.CONF
diff --git a/tempest/services/database/json/versions_client.py b/tempest/services/database/json/versions_client.py
new file mode 100644
index 0000000..0269c43
--- /dev/null
+++ b/tempest/services/database/json/versions_client.py
@@ -0,0 +1,38 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class DatabaseVersionsClientJSON(rest_client.RestClient):
+
+ def __init__(self, auth_provider):
+ super(DatabaseVersionsClientJSON, self).__init__(auth_provider)
+ self.skip_path()
+ self.service = CONF.database.catalog_type
+
+ def list_db_versions(self, params=None):
+ """List all versions."""
+ url = ''
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ return resp, self._parse_resp(body)
diff --git a/tempest/services/identity/json/identity_client.py b/tempest/services/identity/json/identity_client.py
index 9a31540..55239f7 100644
--- a/tempest/services/identity/json/identity_client.py
+++ b/tempest/services/identity/json/identity_client.py
@@ -49,6 +49,12 @@
resp, body = self.post('OS-KSADM/roles', post_body)
return resp, self._parse_resp(body)
+ def get_role(self, role_id):
+ """Get a role by its id."""
+ resp, body = self.get('OS-KSADM/roles/%s' % role_id)
+ body = json.loads(body)
+ return resp, body['role']
+
def create_tenant(self, name, **kwargs):
"""
Create a tenant
@@ -134,9 +140,10 @@
post_body = {
'name': name,
'password': password,
- 'tenantId': tenant_id,
'email': email
}
+ if tenant_id is not None:
+ post_body['tenantId'] = tenant_id
if kwargs.get('enabled') is not None:
post_body['enabled'] = kwargs.get('enabled')
post_body = json.dumps({'user': post_body})
@@ -212,7 +219,7 @@
def list_services(self):
"""List Service - Returns Services."""
- resp, body = self.get('/OS-KSADM/services/')
+ resp, body = self.get('/OS-KSADM/services')
return resp, self._parse_resp(body)
def delete_service(self, service_id):
@@ -233,32 +240,58 @@
self.auth_url = auth_url
- def auth(self, user, password, tenant):
+ def auth(self, user, password, tenant=None):
creds = {
'auth': {
'passwordCredentials': {
'username': user,
'password': password,
},
- 'tenantName': tenant,
}
}
+
+ if tenant:
+ creds['auth']['tenantName'] = tenant
+
body = json.dumps(creds)
resp, body = self.post(self.auth_url, body=body)
return resp, body['access']
- def request(self, method, url, headers=None, body=None):
+ def auth_token(self, token_id, tenant=None):
+ creds = {
+ 'auth': {
+ 'token': {
+ 'id': token_id,
+ },
+ }
+ }
+
+ if tenant:
+ creds['auth']['tenantName'] = tenant
+
+ body = json.dumps(creds)
+ resp, body = self.post(self.auth_url, body=body)
+
+ return resp, body['access']
+
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None):
"""A simple HTTP request interface."""
if headers is None:
# Always accept 'json', for TokenClientXML too.
# Because XML response is not easily
# converted to the corresponding JSON one
headers = self.get_headers(accept_type="json")
- self._log_request(method, url, headers, body)
+ elif extra_headers:
+ try:
+ headers.update(self.get_headers(accept_type="json"))
+ except (ValueError, TypeError):
+ headers = self.get_headers(accept_type="json")
+
resp, resp_body = self.http_obj.request(url, method,
headers=headers, body=body)
- self._log_response(resp, resp_body)
+ self._log_request(method, url, resp)
if resp.status in [401, 403]:
resp_body = json.loads(resp_body)
diff --git a/tempest/services/identity/v3/json/endpoints_client.py b/tempest/services/identity/v3/json/endpoints_client.py
index c3c1e15..f7a894b 100644
--- a/tempest/services/identity/v3/json/endpoints_client.py
+++ b/tempest/services/identity/v3/json/endpoints_client.py
@@ -36,9 +36,17 @@
return resp, body['endpoints']
def create_endpoint(self, service_id, interface, url, **kwargs):
- """Create endpoint."""
+ """Create endpoint.
+
+ Normally this function wouldn't allow setting values that are not
+ allowed for 'enabled'. Use `force_enabled` to set a non-boolean.
+
+ """
region = kwargs.get('region', None)
- enabled = kwargs.get('enabled', None)
+ if 'force_enabled' in kwargs:
+ enabled = kwargs.get('force_enabled', None)
+ else:
+ enabled = kwargs.get('enabled', None)
post_body = {
'service_id': service_id,
'interface': interface,
@@ -52,8 +60,13 @@
return resp, body['endpoint']
def update_endpoint(self, endpoint_id, service_id=None, interface=None,
- url=None, region=None, enabled=None):
- """Updates an endpoint with given parameters."""
+ url=None, region=None, enabled=None, **kwargs):
+ """Updates an endpoint with given parameters.
+
+ Normally this function wouldn't allow setting values that are not
+ allowed for 'enabled'. Use `force_enabled` to set a non-boolean.
+
+ """
post_body = {}
if service_id is not None:
post_body['service_id'] = service_id
@@ -63,7 +76,9 @@
post_body['url'] = url
if region is not None:
post_body['region'] = region
- if enabled is not None:
+ if 'force_enabled' in kwargs:
+ post_body['enabled'] = kwargs['force_enabled']
+ elif enabled is not None:
post_body['enabled'] = enabled
post_body = json.dumps({'endpoint': post_body})
resp, body = self.patch('endpoints/%s' % endpoint_id, post_body)
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index 65f3355..6829333 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -163,6 +163,12 @@
body = json.loads(body)
return resp, body['role']
+ def list_roles(self):
+ """Get the list of Roles."""
+ resp, body = self.get("roles")
+ body = json.loads(body)
+ return resp, body['roles']
+
def update_role(self, name, role_id):
"""Create a Role."""
post_body = {
@@ -297,6 +303,12 @@
body = json.loads(body)
return resp, body['users']
+ def list_user_groups(self, user_id):
+ """Lists groups which a user belongs to."""
+ resp, body = self.get('users/%s/groups' % user_id)
+ body = json.loads(body)
+ return resp, body['groups']
+
def delete_group_user(self, group_id, user_id):
"""Delete user in group."""
resp, body = self.delete('groups/%s/users/%s' % (group_id, user_id))
@@ -453,16 +465,20 @@
self.auth_url = auth_url
- def auth(self, user, password, tenant=None, user_type='id', domain=None):
+ def auth(self, user=None, password=None, tenant=None, user_type='id',
+ domain=None, token=None):
"""
:param user: user id or name, as specified in user_type
:param domain: the user and tenant domain
+ :param token: a token to re-scope.
Accepts different combinations of credentials. Restrictions:
- tenant and domain are only name (no id)
- user domain and tenant domain are assumed identical
- domain scope is not supported here
Sample sample valid combinations:
+ - token
+ - token, tenant, domain
- user_id, password
- username, password, domain
- username, password, tenant, domain
@@ -471,23 +487,32 @@
creds = {
'auth': {
'identity': {
- 'methods': ['password'],
- 'password': {
- 'user': {
- 'password': password,
- }
- }
+ 'methods': [],
}
}
}
- if user_type == 'id':
- creds['auth']['identity']['password']['user']['id'] = user
- else:
- creds['auth']['identity']['password']['user']['name'] = user
- if domain is not None:
- _domain = dict(name=domain)
- creds['auth']['identity']['password']['user']['domain'] = _domain
+ id_obj = creds['auth']['identity']
+ if token:
+ id_obj['methods'].append('token')
+ id_obj['token'] = {
+ 'id': token
+ }
+ if user and password:
+ id_obj['methods'].append('password')
+ id_obj['password'] = {
+ 'user': {
+ 'password': password,
+ }
+ }
+ if user_type == 'id':
+ id_obj['password']['user']['id'] = user
+ else:
+ id_obj['password']['user']['name'] = user
+ if domain is not None:
+ _domain = dict(name=domain)
+ id_obj['password']['user']['domain'] = _domain
if tenant is not None:
+ _domain = dict(name=domain)
project = dict(name=tenant, domain=_domain)
scope = dict(project=project)
creds['auth']['scope'] = scope
@@ -496,17 +521,23 @@
resp, body = self.post(self.auth_url, body=body)
return resp, body
- def request(self, method, url, headers=None, body=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None):
"""A simple HTTP request interface."""
if headers is None:
# Always accept 'json', for xml token client too.
# Because XML response is not easily
# converted to the corresponding JSON one
headers = self.get_headers(accept_type="json")
- self._log_request(method, url, headers, body)
+ elif extra_headers:
+ try:
+ headers.update(self.get_headers(accept_type="json"))
+ except (ValueError, TypeError):
+ headers = self.get_headers(accept_type="json")
+
resp, resp_body = self.http_obj.request(url, method,
headers=headers, body=body)
- self._log_response(resp, resp_body)
+ self._log_request(method, url, resp)
if resp.status in [401, 403]:
resp_body = json.loads(resp_body)
diff --git a/tempest/services/identity/v3/xml/credentials_client.py b/tempest/services/identity/v3/xml/credentials_client.py
index 70f85a1..3c44188 100644
--- a/tempest/services/identity/v3/xml/credentials_client.py
+++ b/tempest/services/identity/v3/xml/credentials_client.py
@@ -18,8 +18,8 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils as common
from tempest import config
-from tempest.services.compute.xml import common
CONF = config.CONF
diff --git a/tempest/services/identity/v3/xml/endpoints_client.py b/tempest/services/identity/v3/xml/endpoints_client.py
index cc9aa65..6490e34 100644
--- a/tempest/services/identity/v3/xml/endpoints_client.py
+++ b/tempest/services/identity/v3/xml/endpoints_client.py
@@ -17,8 +17,8 @@
from tempest.common import http
from tempest.common import rest_client
+from tempest.common import xml_utils as common
from tempest import config
-from tempest.services.compute.xml import common
CONF = config.CONF
@@ -46,12 +46,19 @@
json = common.xml_to_json(body)
return json
- def request(self, method, url, headers=None, body=None, wait=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None, wait=None):
"""Overriding the existing HTTP request in super class RestClient."""
+ if extra_headers:
+ try:
+ headers.update(self.get_headers())
+ except (ValueError, TypeError):
+ headers = self.get_headers()
dscv = CONF.identity.disable_ssl_certificate_validation
self.http_obj = http.ClosingHttp(
disable_ssl_certificate_validation=dscv)
return super(EndPointClientXML, self).request(method, url,
+ extra_headers,
headers=headers,
body=body)
@@ -62,11 +69,19 @@
return resp, body
def create_endpoint(self, service_id, interface, url, **kwargs):
- """Create endpoint."""
+ """Create endpoint.
+
+ Normally this function wouldn't allow setting values that are not
+ allowed for 'enabled'. Use `force_enabled` to set a non-boolean.
+
+ """
region = kwargs.get('region', None)
- enabled = kwargs.get('enabled', None)
- if enabled is not None:
- enabled = str(enabled).lower()
+ if 'force_enabled' in kwargs:
+ enabled = kwargs['force_enabled']
+ else:
+ enabled = kwargs.get('enabled', None)
+ if enabled is not None:
+ enabled = str(enabled).lower()
create_endpoint = common.Element("endpoint",
xmlns=XMLNS,
service_id=service_id,
@@ -79,8 +94,13 @@
return resp, body
def update_endpoint(self, endpoint_id, service_id=None, interface=None,
- url=None, region=None, enabled=None):
- """Updates an endpoint with given parameters."""
+ url=None, region=None, enabled=None, **kwargs):
+ """Updates an endpoint with given parameters.
+
+ Normally this function wouldn't allow setting values that are not
+ allowed for 'enabled'. Use `force_enabled` to set a non-boolean.
+
+ """
doc = common.Document()
endpoint = common.Element("endpoint")
doc.append(endpoint)
@@ -93,8 +113,12 @@
endpoint.add_attr("url", url)
if region:
endpoint.add_attr("region", region)
- if enabled is not None:
+
+ if 'force_enabled' in kwargs:
+ endpoint.add_attr("enabled", kwargs['force_enabled'])
+ elif enabled is not None:
endpoint.add_attr("enabled", str(enabled).lower())
+
resp, body = self.patch('endpoints/%s' % str(endpoint_id), str(doc))
body = self._parse_body(etree.fromstring(body))
return resp, body
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 6ff6d56..35295d7 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -18,9 +18,9 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils as common
from tempest import config
from tempest import exceptions
-from tempest.services.compute.xml import common
CONF = config.CONF
@@ -52,6 +52,14 @@
array.append(common.xml_to_json(child))
return array
+ def _parse_groups(self, node):
+ array = []
+ for child in node.getchildren():
+ tag_list = child.tag.split('}', 1)
+ if tag_list[1] == "group":
+ array.append(common.xml_to_json(child))
+ return array
+
def _parse_group_users(self, node):
array = []
for child in node.getchildren():
@@ -209,6 +217,12 @@
body = self._parse_body(etree.fromstring(body))
return resp, body
+ def list_roles(self):
+ """Get the list of Roles."""
+ resp, body = self.get("roles")
+ body = self._parse_roles(etree.fromstring(body))
+ return resp, body
+
def update_role(self, name, role_id):
"""Updates a Role."""
post_body = common.Element("role",
@@ -342,6 +356,12 @@
body = self._parse_group_users(etree.fromstring(body))
return resp, body
+ def list_user_groups(self, user_id):
+ """Lists the groups which a user belongs to."""
+ resp, body = self.get('users/%s/groups' % user_id)
+ body = self._parse_groups(etree.fromstring(body))
+ return resp, body
+
def delete_group_user(self, group_id, user_id):
"""Delete user in group."""
resp, body = self.delete('groups/%s/users/%s' % (group_id, user_id))
@@ -439,43 +459,61 @@
self.auth_url = auth_url
- def auth(self, user, password, tenant=None, user_type='id', domain=None):
+ def auth(self, user=None, password=None, tenant=None, user_type='id',
+ domain=None, token=None):
"""
:param user: user id or name, as specified in user_type
+ :param domain: the user and tenant domain
+ :param token: a token to re-scope.
Accepts different combinations of credentials. Restrictions:
- tenant and domain are only name (no id)
- user domain and tenant domain are assumed identical
+ - domain scope is not supported here
Sample sample valid combinations:
+ - token
+ - token, tenant, domain
- user_id, password
- username, password, domain
- username, password, tenant, domain
Validation is left to the server side.
"""
- if user_type == 'id':
- _user = common.Element('user', id=user, password=password)
- else:
- _user = common.Element('user', name=user, password=password)
- if domain is not None:
- _domain = common.Element('domain', name=domain)
- _user.append(_domain)
- password = common.Element('password')
- password.append(_user)
-
- method = common.Element('method')
- method.append(common.Text('password'))
methods = common.Element('methods')
- methods.append(method)
identity = common.Element('identity')
+
+ if token:
+ method = common.Element('method')
+ method.append(common.Text('token'))
+ methods.append(method)
+
+ token = common.Element('token', id=token)
+ identity.append(token)
+
+ if user and password:
+ if user_type == 'id':
+ _user = common.Element('user', id=user, password=password)
+ else:
+ _user = common.Element('user', name=user, password=password)
+ if domain is not None:
+ _domain = common.Element('domain', name=domain)
+ _user.append(_domain)
+
+ password = common.Element('password')
+ password.append(_user)
+ method = common.Element('method')
+ method.append(common.Text('password'))
+ methods.append(method)
+ identity.append(password)
+
identity.append(methods)
- identity.append(password)
auth = common.Element('auth')
auth.append(identity)
if tenant is not None:
project = common.Element('project', name=tenant)
+ _domain = common.Element('domain', name=domain)
project.append(_domain)
scope = common.Element('scope')
scope.append(project)
@@ -484,17 +522,22 @@
resp, body = self.post(self.auth_url, body=str(common.Document(auth)))
return resp, body
- def request(self, method, url, headers=None, body=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None):
"""A simple HTTP request interface."""
if headers is None:
# Always accept 'json', for xml token client too.
# Because XML response is not easily
# converted to the corresponding JSON one
headers = self.get_headers(accept_type="json")
- self._log_request(method, url, headers, body)
+ elif extra_headers:
+ try:
+ headers.update(self.get_headers(accept_type="json"))
+ except (ValueError, TypeError):
+ headers = self.get_headers(accept_type="json")
resp, resp_body = self.http_obj.request(url, method,
headers=headers, body=body)
- self._log_response(resp, resp_body)
+ self._log_request(method, url, resp)
if resp.status in [401, 403]:
resp_body = json.loads(resp_body)
diff --git a/tempest/services/identity/v3/xml/policy_client.py b/tempest/services/identity/v3/xml/policy_client.py
index bf4cce7..73d831b 100644
--- a/tempest/services/identity/v3/xml/policy_client.py
+++ b/tempest/services/identity/v3/xml/policy_client.py
@@ -17,8 +17,8 @@
from tempest.common import http
from tempest.common import rest_client
+from tempest.common import xml_utils as common
from tempest import config
-from tempest.services.compute.xml import common
CONF = config.CONF
@@ -46,12 +46,19 @@
json = common.xml_to_json(body)
return json
- def request(self, method, url, headers=None, body=None, wait=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None, wait=None):
"""Overriding the existing HTTP request in super class RestClient."""
+ if extra_headers:
+ try:
+ headers.update(self.get_headers())
+ except (ValueError, TypeError):
+ headers = self.get_headers()
dscv = CONF.identity.disable_ssl_certificate_validation
self.http_obj = http.ClosingHttp(
disable_ssl_certificate_validation=dscv)
return super(PolicyClientXML, self).request(method, url,
+ extra_headers,
headers=headers,
body=body)
diff --git a/tempest/services/identity/v3/xml/service_client.py b/tempest/services/identity/v3/xml/service_client.py
index 966d7f7..37ed892 100644
--- a/tempest/services/identity/v3/xml/service_client.py
+++ b/tempest/services/identity/v3/xml/service_client.py
@@ -16,8 +16,8 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils as common
from tempest import config
-from tempest.services.compute.xml import common
CONF = config.CONF
diff --git a/tempest/services/identity/xml/identity_client.py b/tempest/services/identity/xml/identity_client.py
index 50403fb..c48bc90 100644
--- a/tempest/services/identity/xml/identity_client.py
+++ b/tempest/services/identity/xml/identity_client.py
@@ -12,8 +12,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.common import xml_utils as xml
from tempest import config
-from tempest.services.compute.xml import common as xml
from tempest.services.identity.json import identity_client
CONF = config.CONF
@@ -31,6 +31,11 @@
str(xml.Document(create_role)))
return resp, self._parse_resp(body)
+ def get_role(self, role_id):
+ """Get a role by its id."""
+ resp, body = self.get('OS-KSADM/roles/%s' % role_id)
+ return resp, self._parse_resp(body)
+
def create_tenant(self, name, **kwargs):
"""
Create a tenant
@@ -75,8 +80,9 @@
xmlns=XMLNS,
name=name,
password=password,
- tenantId=tenant_id,
email=email)
+ if tenant_id:
+ create_user.add_attr('tenantId', tenant_id)
if 'enabled' in kwargs:
create_user.add_attr('enabled', str(kwargs['enabled']).lower())
@@ -116,11 +122,24 @@
class TokenClientXML(identity_client.TokenClientJSON):
TYPE = "xml"
- def auth(self, user, password, tenant):
- passwordCreds = xml.Element("passwordCredentials",
+ def auth(self, user, password, tenant=None):
+ passwordCreds = xml.Element('passwordCredentials',
username=user,
password=password)
- auth = xml.Element("auth", tenantName=tenant)
+ auth_kwargs = {}
+ if tenant:
+ auth_kwargs['tenantName'] = tenant
+ auth = xml.Element('auth', **auth_kwargs)
auth.append(passwordCreds)
resp, body = self.post(self.auth_url, body=str(xml.Document(auth)))
return resp, body['access']
+
+ def auth_token(self, token_id, tenant=None):
+ tokenCreds = xml.Element('token', id=token_id)
+ auth_kwargs = {}
+ if tenant:
+ auth_kwargs['tenantName'] = tenant
+ auth = xml.Element('auth', **auth_kwargs)
+ auth.append(tokenCreds)
+ resp, body = self.post(self.auth_url, body=str(xml.Document(auth)))
+ return resp, body['access']
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index a804e8e..8e53b8d 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -144,25 +144,6 @@
body = json.loads(body)
return resp, body
- def create_floating_ip(self, ext_network_id, **kwargs):
- post_body = {
- 'floatingip': kwargs}
- post_body['floatingip']['floating_network_id'] = ext_network_id
- body = json.dumps(post_body)
- uri = '%s/floatingips' % (self.uri_prefix)
- resp, body = self.post(uri, body=body)
- body = json.loads(body)
- return resp, body
-
- def update_floating_ip(self, floating_ip_id, **kwargs):
- post_body = {
- 'floatingip': kwargs}
- body = json.dumps(post_body)
- uri = '%s/floatingips/%s' % (self.uri_prefix, floating_ip_id)
- resp, body = self.put(uri, body)
- body = json.loads(body)
- return resp, body
-
def associate_health_monitor_with_pool(self, health_monitor_id,
pool_id):
post_body = {
@@ -184,33 +165,6 @@
resp, body = self.delete(uri)
return resp, body
- def create_vpnservice(self, subnet_id, router_id, **kwargs):
- post_body = {
- "vpnservice": {
- "subnet_id": subnet_id,
- "router_id": router_id
- }
- }
- for key, val in kwargs.items():
- post_body['vpnservice'][key] = val
- body = json.dumps(post_body)
- uri = '%s/vpn/vpnservices' % (self.uri_prefix)
- resp, body = self.post(uri, body)
- body = json.loads(body)
- return resp, body
-
- def update_vpnservice(self, uuid, description):
- put_body = {
- "vpnservice": {
- "description": description
- }
- }
- body = json.dumps(put_body)
- uri = '%s/vpn/vpnservices/%s' % (self.uri_prefix, uuid)
- resp, body = self.put(uri, body)
- body = json.loads(body)
- return resp, body
-
def list_router_interfaces(self, uuid):
uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid)
resp, body = self.get(uri)
@@ -300,14 +254,6 @@
body = json.loads(body)
return resp, body
- def update_ikepolicy(self, uuid, **kwargs):
- put_body = {'ikepolicy': kwargs}
- body = json.dumps(put_body)
- uri = '%s/vpn/ikepolicies/%s' % (self.uri_prefix, uuid)
- resp, body = self.put(uri, body)
- body = json.loads(body)
- return resp, body
-
def update_extra_routes(self, router_id, nexthop, destination):
uri = '%s/routers/%s' % (self.uri_prefix, router_id)
put_body = {
@@ -339,3 +285,11 @@
resp, body = self.get(uri)
body = json.loads(body)
return resp, body
+
+ def add_dhcp_agent_to_network(self, agent_id, network_id):
+ post_body = {'network_id': network_id}
+ body = json.dumps(post_body)
+ uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
+ resp, body = self.post(uri, body)
+ body = json.loads(body)
+ return resp, body
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index 41a7aa4..2a797b2 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -31,12 +31,15 @@
'vpnservices': 'vpn',
'ikepolicies': 'vpn',
'metering_labels': 'metering',
- 'metering_label_rules': 'metering'
+ 'metering_label_rules': 'metering',
+ 'firewall_rules': 'fw',
+ 'firewall_policies': 'fw',
+ 'firewalls': 'fw'
}
# The following list represents resource names that do not require
# changing underscore to a hyphen
-hyphen_exceptions = ["health_monitors"]
+hyphen_exceptions = ["health_monitors", "firewall_rules", "firewall_policies"]
# map from resource name to a plural name
# needed only for those which can't be constructed as name + 's'
@@ -44,8 +47,8 @@
'security_groups': 'security_groups',
'security_group_rules': 'security_group_rules',
'ikepolicy': 'ikepolicies',
- 'floating_ip': 'floatingips',
- 'quotas': 'quotas'
+ 'quotas': 'quotas',
+ 'firewall_policy': 'firewall_policies'
}
@@ -101,7 +104,7 @@
def _list(**filters):
uri = self.get_uri(plural_name)
if filters:
- uri += '?' + urllib.urlencode(filters)
+ uri += '?' + urllib.urlencode(filters, doseq=1)
resp, body = self.get(uri)
result = {plural_name: self.deserialize_list(body)}
return resp, result
@@ -117,14 +120,14 @@
return _delete
def _shower(self, resource_name):
- def _show(resource_id, field_list=[]):
- # field_list is a sequence of two-element tuples, with the
- # first element being 'fields'. An example:
- # [('fields', 'id'), ('fields', 'name')]
+ def _show(resource_id, **fields):
+ # fields is a dict which key is 'fields' and value is a
+ # list of field's name. An example:
+ # {'fields': ['id', 'name']}
plural = self.pluralize(resource_name)
uri = '%s/%s' % (self.get_uri(plural), resource_id)
- if field_list:
- uri += '?' + urllib.urlencode(field_list)
+ if fields:
+ uri += '?' + urllib.urlencode(fields, doseq=1)
resp, body = self.get(uri)
body = self.deserialize_single(body)
return resp, body
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
index 2a5083c..a9d4880 100644
--- a/tempest/services/network/xml/network_client.py
+++ b/tempest/services/network/xml/network_client.py
@@ -14,7 +14,7 @@
import xml.etree.ElementTree as ET
from tempest.common import rest_client
-from tempest.services.compute.xml import common
+from tempest.common import xml_utils as common
from tempest.services.network import network_client_base as client_base
@@ -24,7 +24,7 @@
# list of plurals used for xml serialization
PLURALS = ['dns_nameservers', 'host_routes', 'allocation_pools',
'fixed_ips', 'extensions', 'extra_dhcp_opts', 'pools',
- 'health_monitors', 'vips']
+ 'health_monitors', 'vips', 'members']
def get_rest_client(self, auth_provider):
rc = rest_client.RestClient(auth_provider)
@@ -166,33 +166,6 @@
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
- def create_floating_ip(self, ext_network_id, **kwargs):
- uri = '%s/floatingips' % (self.uri_prefix)
- floatingip = common.Element('floatingip')
- floatingip.append(common.Element("floating_network_id",
- ext_network_id))
- for element, content in kwargs.iteritems():
- floatingip.append(common.Element(element, content))
- resp, body = self.post(uri, str(common.Document(floatingip)))
- body = _root_tag_fetcher_and_xml_to_json_parse(body)
- return resp, body
-
- def update_floating_ip(self, floating_ip_id, **kwargs):
- uri = '%s/floatingips/%s' % (self.uri_prefix, floating_ip_id)
- floatingip = common.Element('floatingip')
- floatingip.add_attr('xmlns:xsi',
- 'http://www.w3.org/2001/XMLSchema-instance')
- for element, content in kwargs.iteritems():
- if content is None:
- xml_elem = common.Element(element)
- xml_elem.add_attr("xsi:nil", "true")
- floatingip.append(xml_elem)
- else:
- floatingip.append(common.Element(element, content))
- resp, body = self.put(uri, str(common.Document(floatingip)))
- body = _root_tag_fetcher_and_xml_to_json_parse(body)
- return resp, body
-
def list_router_interfaces(self, uuid):
uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid)
resp, body = self.get(uri)
@@ -277,6 +250,13 @@
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
+ def add_dhcp_agent_to_network(self, agent_id, network_id):
+ uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
+ network = common.Element("network_id", network_id)
+ resp, body = self.post(uri, str(common.Document(network)))
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
body = ET.fromstring(xml_returned_body)
diff --git a/tempest/services/object_storage/account_client.py b/tempest/services/object_storage/account_client.py
index 7c3fa85..a0506f2 100644
--- a/tempest/services/object_storage/account_client.py
+++ b/tempest/services/object_storage/account_client.py
@@ -162,23 +162,28 @@
self.service = CONF.object_storage.catalog_type
self.format = 'json'
- def request(self, method, url, headers=None, body=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None):
"""A simple HTTP request interface."""
self.http_obj = http.ClosingHttp()
if headers is None:
headers = {}
+ elif extra_headers:
+ try:
+ headers.update(self.get_headers())
+ except (ValueError, TypeError):
+ headers = {}
# Authorize the request
req_url, req_headers, req_body = self.auth_provider.auth_request(
method=method, url=url, headers=headers, body=body,
filters=self.filters
)
- self._log_request(method, req_url, headers, body)
# use original body
resp, resp_body = self.http_obj.request(req_url, method,
headers=req_headers,
body=req_body)
- self._log_response(resp, resp_body)
+ self._log_request(method, req_url, resp)
if resp.status == 401 or resp.status == 403:
raise exceptions.Unauthorized()
diff --git a/tempest/services/object_storage/object_client.py b/tempest/services/object_storage/object_client.py
index 77d29a5..f3f4eb6 100644
--- a/tempest/services/object_storage/object_client.py
+++ b/tempest/services/object_storage/object_client.py
@@ -29,12 +29,16 @@
self.service = CONF.object_storage.catalog_type
- def create_object(self, container, object_name, data, params=None):
+ def create_object(self, container, object_name, data,
+ params=None, metadata=None):
"""Create storage object."""
headers = self.get_headers()
if not data:
headers['content-length'] = '0'
+ if metadata:
+ for key in metadata:
+ headers[str(key)] = metadata[key]
url = "%s/%s" % (str(container), str(object_name))
if params:
url += '?%s' % urllib.urlencode(params)
@@ -73,11 +77,16 @@
resp, body = self.head(url)
return resp, body
- def get_object(self, container, object_name):
+ def get_object(self, container, object_name, metadata=None):
"""Retrieve object's data."""
+ headers = {}
+ if metadata:
+ for key in metadata:
+ headers[str(key)] = metadata[key]
+
url = "{0}/{1}".format(container, object_name)
- resp, body = self.get(url)
+ resp, body = self.get(url, headers=headers)
return resp, body
def copy_object_in_same_container(self, container, src_object_name,
@@ -146,13 +155,19 @@
self.service = CONF.object_storage.catalog_type
self.format = 'json'
- def request(self, method, url, headers=None, body=None):
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None):
"""A simple HTTP request interface."""
dscv = CONF.identity.disable_ssl_certificate_validation
self.http_obj = http.ClosingHttp(
disable_ssl_certificate_validation=dscv)
if headers is None:
headers = {}
+ elif extra_headers:
+ try:
+ headers.update(self.get_headers())
+ except (ValueError, TypeError):
+ headers = {}
# Authorize the request
req_url, req_headers, req_body = self.auth_provider.auth_request(
@@ -160,11 +175,10 @@
filters=self.filters
)
# Use original method
- self._log_request(method, req_url, headers, body)
resp, resp_body = self.http_obj.request(req_url, method,
headers=req_headers,
body=req_body)
- self._log_response(resp, resp_body)
+ self._log_request(method, req_url, resp)
if resp.status == 401 or resp.status == 403:
raise exceptions.Unauthorized()
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 113003c..2311bdd 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -154,7 +154,8 @@
if resource_status == status:
return
if fail_regexp.search(resource_status):
- raise exceptions.StackBuildErrorException(
+ raise exceptions.StackResourceBuildErrorException(
+ resource_name=resource_name,
stack_identifier=stack_identifier,
resource_status=resource_status,
resource_status_reason=body['resource_status_reason'])
diff --git a/tempest/services/telemetry/telemetry_client_base.py b/tempest/services/telemetry/telemetry_client_base.py
index 610f07b..a073f54 100644
--- a/tempest/services/telemetry/telemetry_client_base.py
+++ b/tempest/services/telemetry/telemetry_client_base.py
@@ -73,7 +73,10 @@
return resp, body
def put(self, uri, body):
- return self.rest_client.put(uri, body)
+ body = self.serialize(body)
+ resp, body = self.rest_client.put(uri, body)
+ body = self.deserialize(body)
+ return resp, body
def get(self, uri):
resp, body = self.rest_client.get(uri)
@@ -133,3 +136,15 @@
def create_alarm(self, **kwargs):
uri = "%s/alarms" % self.uri_prefix
return self.post(uri, kwargs)
+
+ def update_alarm(self, alarm_id, **kwargs):
+ uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
+ return self.put(uri, kwargs)
+
+ def alarm_get_state(self, alarm_id):
+ uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
+ return self.get(uri)
+
+ def alarm_set_state(self, alarm_id, state):
+ uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
+ return self.put(uri, state)
diff --git a/tempest/services/telemetry/xml/telemetry_client.py b/tempest/services/telemetry/xml/telemetry_client.py
index 673f98e..3bee8bf 100644
--- a/tempest/services/telemetry/xml/telemetry_client.py
+++ b/tempest/services/telemetry/xml/telemetry_client.py
@@ -16,7 +16,7 @@
from lxml import etree
from tempest.common import rest_client
-from tempest.services.compute.xml import common
+from tempest.common import xml_utils as common
import tempest.services.telemetry.telemetry_client_base as client
diff --git a/tempest/services/volume/json/admin/volume_services_client.py b/tempest/services/volume/json/admin/volume_services_client.py
new file mode 100644
index 0000000..d43c04a
--- /dev/null
+++ b/tempest/services/volume/json/admin/volume_services_client.py
@@ -0,0 +1,38 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import urllib
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumesServicesClientJSON(rest_client.RestClient):
+
+ def __init__(self, auth_provider):
+ super(VolumesServicesClientJSON, self).__init__(auth_provider)
+ self.service = CONF.volume.catalog_type
+
+ def list_services(self, params=None):
+ url = 'os-services'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['services']
diff --git a/tempest/services/volume/json/admin/volume_types_client.py b/tempest/services/volume/json/admin/volume_types_client.py
index 5554362..c9c0582 100644
--- a/tempest/services/volume/json/admin/volume_types_client.py
+++ b/tempest/services/volume/json/admin/volume_types_client.py
@@ -122,3 +122,31 @@
resp, body = self.put(url, put_body)
body = json.loads(body)
return resp, body
+
+ def get_encryption_type(self, vol_type_id):
+ """
+ Get the volume encryption type for the specified volume type.
+ vol_type_id: Id of volume_type.
+ """
+ url = "/types/%s/encryption" % str(vol_type_id)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body
+
+ def create_encryption_type(self, vol_type_id, **kwargs):
+ """
+ Create a new encryption type for the specified volume type.
+
+ vol_type_id: Id of volume_type.
+ provider: Class providing encryption support.
+ cipher: Encryption algorithm/mode to use.
+ key_size: Size of the encryption key, in bits.
+ control_location: Notional service where encryption is performed.
+ """
+ url = "/types/%s/encryption" % str(vol_type_id)
+ post_body = {}
+ post_body.update(kwargs)
+ post_body = json.dumps({'encryption': post_body})
+ resp, body = self.post(url, post_body)
+ body = json.loads(body)
+ return resp, body['encryption']
diff --git a/tempest/services/volume/json/volumes_client.py b/tempest/services/volume/json/volumes_client.py
index e4d2e8d..b55a037 100644
--- a/tempest/services/volume/json/volumes_client.py
+++ b/tempest/services/volume/json/volumes_client.py
@@ -67,10 +67,10 @@
body = json.loads(body)
return resp, body['volume']
- def create_volume(self, size, **kwargs):
+ def create_volume(self, size=None, **kwargs):
"""
Creates a new Volume.
- size(Required): Size of volume in GB.
+ size: Size of volume in GB.
Following optional keyword arguments are accepted:
display_name: Optional Volume Name.
metadata: A dictionary of values to be used as metadata.
@@ -78,6 +78,10 @@
snapshot_id: When specified the volume is created from this snapshot
imageRef: When specified the volume is created from this image
"""
+ # for bug #1293885:
+ # If no size specified, read volume size from CONF
+ if size is None:
+ size = CONF.volume.volume_size
post_body = {'size': size}
post_body.update(kwargs)
post_body = json.dumps({'volume': post_body})
diff --git a/tempest/services/volume/v2/json/volumes_client.py b/tempest/services/volume/v2/json/volumes_client.py
index 5bfa75f..df20a2a 100644
--- a/tempest/services/volume/v2/json/volumes_client.py
+++ b/tempest/services/volume/v2/json/volumes_client.py
@@ -68,10 +68,10 @@
body = json.loads(body)
return resp, body['volume']
- def create_volume(self, size, **kwargs):
+ def create_volume(self, size=None, **kwargs):
"""
Creates a new Volume.
- size(Required): Size of volume in GB.
+ size: Size of volume in GB.
Following optional keyword arguments are accepted:
name: Optional Volume Name.
metadata: A dictionary of values to be used as metadata.
@@ -79,6 +79,10 @@
snapshot_id: When specified the volume is created from this snapshot
imageRef: When specified the volume is created from this image
"""
+ # for bug #1293885:
+ # If no size specified, read volume size from CONF
+ if size is None:
+ size = CONF.volume.volume_size
post_body = {'size': size}
post_body.update(kwargs)
post_body = json.dumps({'volume': post_body})
diff --git a/tempest/services/volume/v2/xml/volumes_client.py b/tempest/services/volume/v2/xml/volumes_client.py
index 0b8f47c..1fdaf19 100644
--- a/tempest/services/volume/v2/xml/volumes_client.py
+++ b/tempest/services/volume/v2/xml/volumes_client.py
@@ -19,9 +19,9 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils as common
from tempest import config
from tempest import exceptions
-from tempest.services.compute.xml import common
CONF = config.CONF
@@ -117,10 +117,10 @@
body = self._check_if_bootable(body)
return resp, body
- def create_volume(self, size, **kwargs):
+ def create_volume(self, size=None, **kwargs):
"""Creates a new Volume.
- :param size: Size of volume in GB. (Required)
+ :param size: Size of volume in GB.
:param name: Optional Volume Name.
:param metadata: An optional dictionary of values for metadata.
:param volume_type: Optional Name of volume_type for the volume
@@ -129,6 +129,10 @@
:param imageRef: When specified the volume is created from this
image
"""
+ # for bug #1293885:
+ # If no size specified, read volume size from CONF
+ if size is None:
+ size = CONF.volume.volume_size
# NOTE(afazekas): it should use a volume namespace
volume = common.Element("volume", xmlns=common.XMLNS_11, size=size)
diff --git a/tempest/services/volume/xml/admin/volume_hosts_client.py b/tempest/services/volume/xml/admin/volume_hosts_client.py
index e34b9f0..967c7c2 100644
--- a/tempest/services/volume/xml/admin/volume_hosts_client.py
+++ b/tempest/services/volume/xml/admin/volume_hosts_client.py
@@ -18,8 +18,8 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils as common
from tempest import config
-from tempest.services.compute.xml import common
CONF = config.CONF
diff --git a/tempest/services/volume/xml/admin/volume_quotas_client.py b/tempest/services/volume/xml/admin/volume_quotas_client.py
index d2eac34..710fb3a 100644
--- a/tempest/services/volume/xml/admin/volume_quotas_client.py
+++ b/tempest/services/volume/xml/admin/volume_quotas_client.py
@@ -17,8 +17,8 @@
from ast import literal_eval
from lxml import etree
+from tempest.common import xml_utils as xml
from tempest import config
-from tempest.services.compute.xml import common as xml
from tempest.services.volume.json.admin import volume_quotas_client
CONF = config.CONF
diff --git a/tempest/services/volume/xml/admin/volume_services_client.py b/tempest/services/volume/xml/admin/volume_services_client.py
new file mode 100644
index 0000000..7bad16d
--- /dev/null
+++ b/tempest/services/volume/xml/admin/volume_services_client.py
@@ -0,0 +1,42 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+
+from lxml import etree
+
+from tempest.common import rest_client
+from tempest.common import xml_utils
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumesServicesClientXML(rest_client.RestClient):
+ TYPE = "xml"
+
+ def __init__(self, auth_provider):
+ super(VolumesServicesClientXML, self).__init__(auth_provider)
+ self.service = CONF.volume.catalog_type
+
+ def list_services(self, params=None):
+ url = 'os-services'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ node = etree.fromstring(body)
+ body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
+ return resp, body
diff --git a/tempest/services/volume/xml/admin/volume_types_client.py b/tempest/services/volume/xml/admin/volume_types_client.py
index 1fa3e73..90897ee 100644
--- a/tempest/services/volume/xml/admin/volume_types_client.py
+++ b/tempest/services/volume/xml/admin/volume_types_client.py
@@ -18,9 +18,9 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils as common
from tempest import config
from tempest import exceptions
-from tempest.services.compute.xml import common
CONF = config.CONF
diff --git a/tempest/services/volume/xml/extensions_client.py b/tempest/services/volume/xml/extensions_client.py
index 4861733..2986fcd 100644
--- a/tempest/services/volume/xml/extensions_client.py
+++ b/tempest/services/volume/xml/extensions_client.py
@@ -16,8 +16,8 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils as common
from tempest import config
-from tempest.services.compute.xml import common
CONF = config.CONF
diff --git a/tempest/services/volume/xml/snapshots_client.py b/tempest/services/volume/xml/snapshots_client.py
index 9ad86d2..4b1ba25 100644
--- a/tempest/services/volume/xml/snapshots_client.py
+++ b/tempest/services/volume/xml/snapshots_client.py
@@ -16,10 +16,10 @@
from lxml import etree
from tempest.common import rest_client
+from tempest.common import xml_utils as common
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
-from tempest.services.compute.xml import common
CONF = config.CONF
diff --git a/tempest/services/volume/xml/volumes_client.py b/tempest/services/volume/xml/volumes_client.py
index 2078af1..9799e55 100644
--- a/tempest/services/volume/xml/volumes_client.py
+++ b/tempest/services/volume/xml/volumes_client.py
@@ -20,9 +20,9 @@
from xml.sax import saxutils
from tempest.common import rest_client
+from tempest.common import xml_utils as common
from tempest import config
from tempest import exceptions
-from tempest.services.compute.xml import common
CONF = config.CONF
@@ -137,10 +137,10 @@
body = self._parse_volume(etree.fromstring(body))
return resp, body
- def create_volume(self, size, **kwargs):
+ def create_volume(self, size=None, **kwargs):
"""Creates a new Volume.
- :param size: Size of volume in GB. (Required)
+ :param size: Size of volume in GB.
:param display_name: Optional Volume Name.
:param metadata: An optional dictionary of values for metadata.
:param volume_type: Optional Name of volume_type for the volume
@@ -149,6 +149,10 @@
:param imageRef: When specified the volume is created from this
image
"""
+ # for bug #1293885:
+ # If no size specified, read volume size from CONF
+ if size is None:
+ size = CONF.volume.volume_size
# NOTE(afazekas): it should use a volume namespace
volume = common.Element("volume", xmlns=common.XMLNS_11, size=size)
diff --git a/tempest/stress/actions/ssh_floating.py b/tempest/stress/actions/ssh_floating.py
index a34a20d..478cd07 100644
--- a/tempest/stress/actions/ssh_floating.py
+++ b/tempest/stress/actions/ssh_floating.py
@@ -32,8 +32,6 @@
stderr=subprocess.PIPE)
proc.wait()
success = proc.returncode == 0
- self.logger.info("%s(%s): %s", self.server_id, self.floating['ip'],
- "pong!" if success else "no pong :(")
return success
def tcp_connect_scan(self, addr, port):
@@ -58,18 +56,24 @@
raise RuntimeError("Cannot connect to the ssh port.")
def check_icmp_echo(self):
+ self.logger.info("%s(%s): Pinging..",
+ self.server_id, self.floating['ip'])
+
def func():
return self.ping_ip_address(self.floating['ip'])
if not tempest.test.call_until_true(func, self.check_timeout,
self.check_interval):
- raise RuntimeError("Cannot ping the machine.")
+ raise RuntimeError("%s(%s): Cannot ping the machine.",
+ self.server_id, self.floating['ip'])
+ self.logger.info("%s(%s): pong :)",
+ self.server_id, self.floating['ip'])
def _create_vm(self):
self.name = name = data_utils.rand_name("instance")
servers_client = self.manager.servers_client
self.logger.info("creating %s" % name)
vm_args = self.vm_extra_args.copy()
- vm_args['security_groups'] = [{'name': self.sec_grp}]
+ vm_args['security_groups'] = [self.sec_grp]
resp, server = servers_client.create_server(name, self.image,
self.flavor,
**vm_args)
@@ -90,16 +94,15 @@
sec_grp_cli = self.manager.security_groups_client
s_name = data_utils.rand_name('sec_grp-')
s_description = data_utils.rand_name('desc-')
- _, _sec_grp = sec_grp_cli.create_security_group(s_name,
- s_description)
- self.sec_grp = _sec_grp['id']
+ _, self.sec_grp = sec_grp_cli.create_security_group(s_name,
+ s_description)
create_rule = sec_grp_cli.create_security_group_rule
- create_rule(self.sec_grp, 'tcp', 22, 22)
- create_rule(self.sec_grp, 'icmp', -1, -1)
+ create_rule(self.sec_grp['id'], 'tcp', 22, 22)
+ create_rule(self.sec_grp['id'], 'icmp', -1, -1)
def _destroy_sec_grp(self):
sec_grp_cli = self.manager.security_groups_client
- sec_grp_cli.delete_security_group(self.sec_grp)
+ sec_grp_cli.delete_security_group(self.sec_grp['id'])
def _create_floating_ip(self):
floating_cli = self.manager.floating_ips_client
@@ -171,6 +174,8 @@
self._create_vm()
if self.reboot:
self.manager.servers_client.reboot(self.server_id, 'HARD')
+ self.manager.servers_client.wait_for_server_status(self.server_id,
+ 'ACTIVE')
self.run_core()
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
new file mode 100644
index 0000000..1bc3b06
--- /dev/null
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -0,0 +1,232 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.common.utils import data_utils
+from tempest.common.utils.linux import remote_client
+from tempest import config
+import tempest.stress.stressaction as stressaction
+import tempest.test
+
+import re
+CONF = config.CONF
+
+
+class VolumeVerifyStress(stressaction.StressAction):
+
+ def _create_keypair(self):
+ keyname = data_utils.rand_name("key")
+ resp, self.key = self.manager.keypairs_client.create_keypair(keyname)
+ assert(resp.status == 200)
+
+ def _delete_keypair(self):
+ resp, _ = self.manager.keypairs_client.delete_keypair(self.key['name'])
+ assert(resp.status == 202)
+
+ def _create_vm(self):
+ self.name = name = data_utils.rand_name("instance")
+ servers_client = self.manager.servers_client
+ self.logger.info("creating %s" % name)
+ vm_args = self.vm_extra_args.copy()
+ vm_args['security_groups'] = [self.sec_grp]
+ vm_args['key_name'] = self.key['name']
+ resp, server = servers_client.create_server(name, self.image,
+ self.flavor,
+ **vm_args)
+ self.server_id = server['id']
+ assert(resp.status == 202)
+ self.manager.servers_client.wait_for_server_status(self.server_id,
+ 'ACTIVE')
+
+ def _destroy_vm(self):
+ self.logger.info("deleting server: %s" % self.server_id)
+ resp, _ = self.manager.servers_client.delete_server(self.server_id)
+ assert(resp.status == 204) # It cannot be 204 if I had to wait..
+ self.manager.servers_client.wait_for_server_termination(self.server_id)
+ self.logger.info("deleted server: %s" % self.server_id)
+
+ def _create_sec_group(self):
+ sec_grp_cli = self.manager.security_groups_client
+ s_name = data_utils.rand_name('sec_grp-')
+ s_description = data_utils.rand_name('desc-')
+ _, self.sec_grp = sec_grp_cli.create_security_group(s_name,
+ s_description)
+ create_rule = sec_grp_cli.create_security_group_rule
+ create_rule(self.sec_grp['id'], 'tcp', 22, 22)
+ create_rule(self.sec_grp['id'], 'icmp', -1, -1)
+
+ def _destroy_sec_grp(self):
+ sec_grp_cli = self.manager.security_groups_client
+ sec_grp_cli.delete_security_group(self.sec_grp['id'])
+
+ def _create_floating_ip(self):
+ floating_cli = self.manager.floating_ips_client
+ _, self.floating = floating_cli.create_floating_ip(self.floating_pool)
+
+ def _destroy_floating_ip(self):
+ cli = self.manager.floating_ips_client
+ cli.delete_floating_ip(self.floating['id'])
+ cli.wait_for_resource_deletion(self.floating['id'])
+ self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
+
+ def _create_volume(self):
+ name = data_utils.rand_name("volume")
+ self.logger.info("creating volume: %s" % name)
+ volumes_client = self.manager.volumes_client
+ resp, self.volume = volumes_client.create_volume(size=1,
+ display_name=
+ name)
+ assert(resp.status == 200)
+ volumes_client.wait_for_volume_status(self.volume['id'],
+ 'available')
+ self.logger.info("created volume: %s" % self.volume['id'])
+
+ def _delete_volume(self):
+ self.logger.info("deleting volume: %s" % self.volume['id'])
+ volumes_client = self.manager.volumes_client
+ resp, _ = volumes_client.delete_volume(self.volume['id'])
+ assert(resp.status == 202)
+ volumes_client.wait_for_resource_deletion(self.volume['id'])
+ self.logger.info("deleted volume: %s" % self.volume['id'])
+
+ def _wait_disassociate(self):
+ cli = self.manager.floating_ips_client
+
+ def func():
+ _, floating = cli.get_floating_ip_details(self.floating['id'])
+ return floating['instance_id'] is None
+
+ if not tempest.test.call_until_true(func, CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ raise RuntimeError("IP disassociate timeout!")
+
+ def new_server_ops(self):
+ self._create_vm()
+ cli = self.manager.floating_ips_client
+ cli.associate_floating_ip_to_server(self.floating['ip'],
+ self.server_id)
+ if self.ssh_test_before_attach and self.enable_ssh_verify:
+ self.logger.info("Scanning for block devices via ssh on %s"
+ % self.server_id)
+ self.part_wait(self.detach_match_count)
+
+ def setUp(self, **kwargs):
+ """Note able configuration combinations:
+ Closest options to the test_stamp_pattern:
+ new_server = True
+ new_volume = True
+ enable_ssh_verify = True
+ ssh_test_before_attach = False
+ Just attaching:
+ new_server = False
+ new_volume = False
+ enable_ssh_verify = True
+ ssh_test_before_attach = True
+ Mostly API load by repeated attachment:
+ new_server = False
+ new_volume = False
+ enable_ssh_verify = False
+ ssh_test_before_attach = False
+ Minimal Nova load, but cinder load not decreased:
+ new_server = False
+ new_volume = True
+ enable_ssh_verify = True
+ ssh_test_before_attach = True
+ """
+ self.image = CONF.compute.image_ref
+ self.flavor = CONF.compute.flavor_ref
+ self.vm_extra_args = kwargs.get('vm_extra_args', {})
+ self.floating_pool = kwargs.get('floating_pool', None)
+ self.new_volume = kwargs.get('new_volume', True)
+ self.new_server = kwargs.get('new_server', False)
+ self.enable_ssh_verify = kwargs.get('enable_ssh_verify', True)
+ self.ssh_test_before_attach = kwargs.get('ssh_test_before_attach',
+ False)
+ self.part_line_re = re.compile(kwargs.get('part_line_re', '.*vd.*'))
+ self.detach_match_count = kwargs.get('detach_match_count', 1)
+ self.attach_match_count = kwargs.get('attach_match_count', 2)
+ self.part_name = kwargs.get('part_name', '/dev/vdc')
+
+ self._create_floating_ip()
+ self._create_sec_group()
+ self._create_keypair()
+ private_key = self.key['private_key']
+ username = CONF.compute.image_ssh_user
+ self.remote_client = remote_client.RemoteClient(self.floating['ip'],
+ username,
+ pkey=private_key)
+ if not self.new_volume:
+ self._create_volume()
+ if not self.new_server:
+ self.new_server_ops()
+
+ # now we just test is number of partition increased or decrised
+ def part_wait(self, num_match):
+ def _part_state():
+ self.partitions = self.remote_client.get_partitions().split('\n')
+ matching = 0
+ for part_line in self.partitions[1:]:
+ if self.part_line_re.match(part_line):
+ matching += 1
+ return matching == num_match
+ if tempest.test.call_until_true(_part_state,
+ CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ return
+ else:
+ raise RuntimeError("Unexpected partitions: %s",
+ str(self.partitions))
+
+ def run(self):
+ if self.new_server:
+ self.new_server_ops()
+ if self.new_volume:
+ self._create_volume()
+ servers_client = self.manager.servers_client
+ self.logger.info("attach volume (%s) to vm %s" %
+ (self.volume['id'], self.server_id))
+ resp, body = servers_client.attach_volume(self.server_id,
+ self.volume['id'],
+ self.part_name)
+ assert(resp.status == 200)
+ self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+ 'in-use')
+ if self.enable_ssh_verify:
+ self.logger.info("Scanning for new block device on %s"
+ % self.server_id)
+ self.part_wait(self.attach_match_count)
+
+ resp, body = servers_client.detach_volume(self.server_id,
+ self.volume['id'])
+ assert(resp.status == 202)
+ self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+ 'available')
+ if self.enable_ssh_verify:
+ self.logger.info("Scanning for block device disapperance on %s"
+ % self.server_id)
+ self.part_wait(self.detach_match_count)
+ if self.new_volume:
+ self._delete_volume()
+ if self.new_server:
+ self._destroy_vm()
+
+ def tearDown(self):
+ cli = self.manager.floating_ips_client
+ cli.disassociate_floating_ip_from_server(self.floating['ip'],
+ self.server_id)
+ self._wait_disassociate()
+ if not self.new_server:
+ self._destroy_vm()
+ self._delete_keypair()
+ self._destroy_floating_ip()
+ self._destroy_sec_grp()
+ if not self.new_volume:
+ self._delete_volume()
diff --git a/tempest/stress/cleanup.py b/tempest/stress/cleanup.py
index b46de35..2587331 100644
--- a/tempest/stress/cleanup.py
+++ b/tempest/stress/cleanup.py
@@ -45,6 +45,16 @@
except Exception:
pass
+ secgrp_client = admin_manager.security_groups_client
+ _, secgrp = secgrp_client.list_security_groups({"all_tenants": True})
+ secgrp_del = [grp for grp in secgrp if grp['name'] != 'default']
+ LOG.info("Cleanup::remove %s Security Group" % len(secgrp_del))
+ for g in secgrp_del:
+ try:
+ secgrp_client.delete_security_group(g['id'])
+ except Exception:
+ pass
+
_, floating_ips = admin_manager.floating_ips_client.list_floating_ips()
LOG.info("Cleanup::remove %s floating ips" % len(floating_ips))
for f in floating_ips:
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index 9660081..642108a 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -19,6 +19,7 @@
from six import moves
+from tempest import auth
from tempest import clients
from tempest.common import ssh
from tempest.common.utils import data_utils
@@ -80,17 +81,23 @@
return ret
-def sigchld_handler(signal, frame):
+def sigchld_handler(signalnum, frame):
"""
Signal handler (only active if stop_on_error is True).
"""
- terminate_all_processes()
+ for process in processes:
+ if (not process['process'].is_alive() and
+ process['process'].exitcode != 0):
+ signal.signal(signalnum, signal.SIG_DFL)
+ terminate_all_processes()
+ break
def terminate_all_processes(check_interval=20):
"""
Goes through the process list and terminates all child processes.
"""
+ LOG.info("Stopping all processes.")
for process in processes:
if process['process'].is_alive():
try:
@@ -141,9 +148,10 @@
password,
tenant['id'],
"email")
- manager = clients.Manager(username=username,
- password="pass",
- tenant_name=tenant_name)
+ creds = auth.get_credentials(username=username,
+ password=password,
+ tenant_name=tenant_name)
+ manager = clients.Manager(credentials=creds)
test_obj = importutils.import_class(test['action'])
test_run = test_obj(manager, max_runs, stop_on_error)
@@ -174,34 +182,39 @@
signal.signal(signal.SIGCHLD, sigchld_handler)
end_time = time.time() + duration
had_errors = False
- while True:
- if max_runs is None:
- remaining = end_time - time.time()
- if remaining <= 0:
- break
- else:
- remaining = log_check_interval
- all_proc_term = True
- for process in processes:
- if process['process'].is_alive():
- all_proc_term = False
+ try:
+ while True:
+ if max_runs is None:
+ remaining = end_time - time.time()
+ if remaining <= 0:
break
- if all_proc_term:
- break
-
- time.sleep(min(remaining, log_check_interval))
- if stop_on_error:
- for process in processes:
- if process['statistic']['fails'] > 0:
+ else:
+ remaining = log_check_interval
+ all_proc_term = True
+ for process in processes:
+ if process['process'].is_alive():
+ all_proc_term = False
+ break
+ if all_proc_term:
break
- if not logfiles:
- continue
- if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
- stop_on_error):
- had_errors = True
- break
+ time.sleep(min(remaining, log_check_interval))
+ if stop_on_error:
+ if any([True for proc in processes
+ if proc['statistic']['fails'] > 0]):
+ break
+ if not logfiles:
+ continue
+ if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
+ stop_on_error):
+ had_errors = True
+ break
+ except KeyboardInterrupt:
+ LOG.warning("Interrupted, going to print statistics and exit ...")
+
+ if stop_on_error:
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
terminate_all_processes()
sum_fails = 0
diff --git a/tempest/stress/etc/volume-attach-verify.json b/tempest/stress/etc/volume-attach-verify.json
new file mode 100644
index 0000000..731f5ed
--- /dev/null
+++ b/tempest/stress/etc/volume-attach-verify.json
@@ -0,0 +1,11 @@
+[{"action": "tempest.stress.actions.volume_attach_verify.VolumeVerifyStress",
+ "threads": 1,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {"vm_extra_args": {},
+ "new_volume": true,
+ "new_server": false,
+ "ssh_test_before_attach": false,
+ "enable_ssh_verify": true}
+}
+]
diff --git a/tempest/test.py b/tempest/test.py
index d358510..748a98c 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -17,6 +17,7 @@
import functools
import json
import os
+import re
import sys
import time
import urllib
@@ -24,6 +25,7 @@
import fixtures
import testresources
+import testscenarios
import testtools
from tempest import clients
@@ -64,6 +66,29 @@
return decorator
+def safe_setup(f):
+ """A decorator used to wrap the setUpClass for cleaning up resources
+ when setUpClass failed.
+ """
+
+ def decorator(cls):
+ try:
+ f(cls)
+ except Exception as se:
+ etype, value, trace = sys.exc_info()
+ LOG.exception("setUpClass failed: %s" % se)
+ try:
+ cls.tearDownClass()
+ except Exception as te:
+ LOG.exception("tearDownClass failed: %s" % te)
+ try:
+ raise etype(value), None, trace
+ finally:
+ del trace # for avoiding circular refs
+
+ return decorator
+
+
def services(*args, **kwargs):
"""A decorator used to set an attr for each service used in a test case
@@ -73,6 +98,7 @@
service_list = {
'compute': CONF.service_available.nova,
'image': CONF.service_available.glance,
+ 'baremetal': CONF.service_available.ironic,
'volume': CONF.service_available.cinder,
'orchestration': CONF.service_available.heat,
# NOTE(mtreinish) nova-network will provide networking functionality
@@ -199,13 +225,11 @@
def validate_tearDownClass():
if at_exit_set:
- raise RuntimeError("tearDownClass does not call the super's "
- "tearDownClass in these classes: "
- + str(at_exit_set) + "\n"
- "If you see the exception, with another "
- "exception please do not report this one! "
- "If you are changing tempest code, make sure you "
- "are calling the super class's tearDownClass!")
+ LOG.error(
+ "tearDownClass does not call the super's "
+ "tearDownClass in these classes: \n"
+ + str(at_exit_set))
+
atexit.register(validate_tearDownClass)
@@ -283,26 +307,18 @@
cls.__name__, network_resources=cls.network_resources)
force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
- if (CONF.compute.allow_tenant_isolation or
- force_tenant_isolation):
+ if CONF.compute.allow_tenant_isolation or force_tenant_isolation:
creds = cls.isolated_creds.get_primary_creds()
- username, tenant_name, password = creds
if getattr(cls, '_interface', None):
- os = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name,
+ os = clients.Manager(credentials=creds,
interface=cls._interface,
service=cls._service)
elif interface:
- os = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name,
+ os = clients.Manager(credentials=creds,
interface=interface,
service=cls._service)
else:
- os = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name,
+ os = clients.Manager(credentials=creds,
service=cls._service)
else:
if getattr(cls, '_interface', None):
@@ -388,6 +404,24 @@
return json.load(open(fn))
@staticmethod
+ def load_tests(*args):
+ """
+ Wrapper for testscenarios to set the mandatory scenarios variable
+ only in case a real test loader is in place. Will be automatically
+ called in case the variable "load_tests" is set.
+ """
+ if getattr(args[0], 'suiteClass', None) is not None:
+ loader, standard_tests, pattern = args
+ else:
+ standard_tests, module, loader = args
+ for test in testtools.iterate_tests(standard_tests):
+ schema_file = getattr(test, '_schema_file', None)
+ if schema_file is not None:
+ setattr(test, 'scenarios',
+ NegativeAutoTest.generate_scenario(schema_file))
+ return testscenarios.load_tests_apply_scenarios(*args)
+
+ @staticmethod
def generate_scenario(description_file):
"""
Generates the test scenario list for a given description.
@@ -410,7 +444,8 @@
"""
description = NegativeAutoTest.load_schema(description_file)
LOG.debug(description)
- generator = importutils.import_class(CONF.negative.test_generator)()
+ generator = importutils.import_class(
+ CONF.negative.test_generator)()
generator.validate_schema(description)
schema = description.get("json-schema", None)
resources = description.get("resources", [])
@@ -481,6 +516,10 @@
new_url, body = self._http_arguments(valid_schema, url, method)
elif hasattr(self, "schema"):
new_url, body = self._http_arguments(self.schema, url, method)
+ else:
+ raise Exception("testscenarios are not active. Please make sure "
+ "that your test runner supports the load_tests "
+ "mechanism")
if "admin_client" in description and description["admin_client"]:
client = self.admin_client
@@ -540,6 +579,24 @@
return None
+def SimpleNegativeAutoTest(klass):
+ """
+ This decorator registers a test function on basis of the class name.
+ """
+ @attr(type=['negative', 'gate'])
+ def generic_test(self):
+ self.execute(self._schema_file)
+
+ cn = klass.__name__
+ cn = cn.replace('JSON', '')
+ cn = cn.replace('Test', '')
+ # NOTE(mkoderer): replaces uppercase chars inside the class name with '_'
+ lower_cn = re.sub('(?<!^)(?=[A-Z])', '_', cn).lower()
+ func_name = 'test_%s' % lower_cn
+ setattr(klass, func_name, generic_test)
+ return klass
+
+
def call_until_true(func, duration, sleep_for):
"""
Call the given function until it returns True (and return True) or
diff --git a/tempest/api/compute/api_schema/__init__.py b/tempest/tests/cli/__init__.py
similarity index 100%
copy from tempest/api/compute/api_schema/__init__.py
copy to tempest/tests/cli/__init__.py
diff --git a/tempest/tests/cli/test_output_parser.py b/tempest/tests/cli/test_output_parser.py
new file mode 100644
index 0000000..7ad270c
--- /dev/null
+++ b/tempest/tests/cli/test_output_parser.py
@@ -0,0 +1,177 @@
+# Copyright 2014 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from tempest.cli import output_parser
+from tempest import exceptions
+from tempest.tests import base
+
+
+class TestOutputParser(base.TestCase):
+ OUTPUT_LINES = """
++----+------+---------+
+| ID | Name | Status |
++----+------+---------+
+| 11 | foo | BUILD |
+| 21 | bar | ERROR |
+| 31 | bee | None |
++----+------+---------+
+"""
+ OUTPUT_LINES2 = """
++----+-------+---------+
+| ID | Name2 | Status2 |
++----+-------+---------+
+| 41 | aaa | SSSSS |
+| 51 | bbb | TTTTT |
+| 61 | ccc | AAAAA |
++----+-------+---------+
+"""
+
+ EXPECTED_TABLE = {'headers': ['ID', 'Name', 'Status'],
+ 'values': [['11', 'foo', 'BUILD'],
+ ['21', 'bar', 'ERROR'],
+ ['31', 'bee', 'None']]}
+ EXPECTED_TABLE2 = {'headers': ['ID', 'Name2', 'Status2'],
+ 'values': [['41', 'aaa', 'SSSSS'],
+ ['51', 'bbb', 'TTTTT'],
+ ['61', 'ccc', 'AAAAA']]}
+
+ def test_table_with_normal_values(self):
+ actual = output_parser.table(self.OUTPUT_LINES)
+ self.assertIsInstance(actual, dict)
+ self.assertEqual(self.EXPECTED_TABLE, actual)
+
+ def test_table_with_list(self):
+ output_lines = self.OUTPUT_LINES.split('\n')
+ actual = output_parser.table(output_lines)
+ self.assertIsInstance(actual, dict)
+ self.assertEqual(self.EXPECTED_TABLE, actual)
+
+ def test_table_with_invalid_line(self):
+ output_lines = self.OUTPUT_LINES + "aaaa"
+ actual = output_parser.table(output_lines)
+ self.assertIsInstance(actual, dict)
+ self.assertEqual(self.EXPECTED_TABLE, actual)
+
+ def test_tables_with_normal_values(self):
+ output_lines = 'test' + self.OUTPUT_LINES +\
+ 'test2' + self.OUTPUT_LINES2
+ expected = [{'headers': self.EXPECTED_TABLE['headers'],
+ 'label': 'test',
+ 'values': self.EXPECTED_TABLE['values']},
+ {'headers': self.EXPECTED_TABLE2['headers'],
+ 'label': 'test2',
+ 'values': self.EXPECTED_TABLE2['values']}]
+ actual = output_parser.tables(output_lines)
+ self.assertIsInstance(actual, list)
+ self.assertEqual(expected, actual)
+
+ def test_tables_with_invalid_values(self):
+ output_lines = 'test' + self.OUTPUT_LINES +\
+ 'test2' + self.OUTPUT_LINES2 + '\n'
+ expected = [{'headers': self.EXPECTED_TABLE['headers'],
+ 'label': 'test',
+ 'values': self.EXPECTED_TABLE['values']},
+ {'headers': self.EXPECTED_TABLE2['headers'],
+ 'label': 'test2',
+ 'values': self.EXPECTED_TABLE2['values']}]
+ actual = output_parser.tables(output_lines)
+ self.assertIsInstance(actual, list)
+ self.assertEqual(expected, actual)
+
+ def test_tables_with_invalid_line(self):
+ output_lines = 'test' + self.OUTPUT_LINES +\
+ 'test2' + self.OUTPUT_LINES2 +\
+ '+----+-------+---------+'
+ expected = [{'headers': self.EXPECTED_TABLE['headers'],
+ 'label': 'test',
+ 'values': self.EXPECTED_TABLE['values']},
+ {'headers': self.EXPECTED_TABLE2['headers'],
+ 'label': 'test2',
+ 'values': self.EXPECTED_TABLE2['values']}]
+
+ actual = output_parser.tables(output_lines)
+ self.assertIsInstance(actual, list)
+ self.assertEqual(expected, actual)
+
+ LISTING_OUTPUT = """
++----+
+| ID |
++----+
+| 11 |
+| 21 |
+| 31 |
++----+
+"""
+
+ def test_listing(self):
+ expected = [{'ID': '11'}, {'ID': '21'}, {'ID': '31'}]
+ actual = output_parser.listing(self.LISTING_OUTPUT)
+ self.assertIsInstance(actual, list)
+ self.assertEqual(expected, actual)
+
+ def test_details_multiple_with_invalid_line(self):
+ self.assertRaises(exceptions.InvalidStructure,
+ output_parser.details_multiple,
+ self.OUTPUT_LINES)
+
+ DETAILS_LINES1 = """First Table
++----------+--------+
+| Property | Value |
++----------+--------+
+| foo | BUILD |
+| bar | ERROR |
+| bee | None |
++----------+--------+
+"""
+ DETAILS_LINES2 = """Second Table
++----------+--------+
+| Property | Value |
++----------+--------+
+| aaa | VVVVV |
+| bbb | WWWWW |
+| ccc | XXXXX |
++----------+--------+
+"""
+
+ def test_details_with_normal_line_label_false(self):
+ expected = {'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'}
+ actual = output_parser.details(self.DETAILS_LINES1)
+ self.assertEqual(expected, actual)
+
+ def test_details_with_normal_line_label_true(self):
+ expected = {'__label': 'First Table',
+ 'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'}
+ actual = output_parser.details(self.DETAILS_LINES1, with_label=True)
+ self.assertEqual(expected, actual)
+
+ def test_details_multiple_with_normal_line_label_false(self):
+ expected = [{'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'},
+ {'aaa': 'VVVVV', 'bbb': 'WWWWW', 'ccc': 'XXXXX'}]
+ actual = output_parser.details_multiple(self.DETAILS_LINES1 +
+ self.DETAILS_LINES2)
+ self.assertIsInstance(actual, list)
+ self.assertEqual(expected, actual)
+
+ def test_details_multiple_with_normal_line_label_true(self):
+ expected = [{'__label': 'First Table',
+ 'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'},
+ {'__label': 'Second Table',
+ 'aaa': 'VVVVV', 'bbb': 'WWWWW', 'ccc': 'XXXXX'}]
+ actual = output_parser.details_multiple(self.DETAILS_LINES1 +
+ self.DETAILS_LINES2,
+ with_label=True)
+ self.assertIsInstance(actual, list)
+ self.assertEqual(expected, actual)
diff --git a/tempest/api/compute/api_schema/__init__.py b/tempest/tests/cmd/__init__.py
similarity index 100%
copy from tempest/api/compute/api_schema/__init__.py
copy to tempest/tests/cmd/__init__.py
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
new file mode 100644
index 0000000..40caf30
--- /dev/null
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -0,0 +1,397 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+import mock
+
+from tempest.cmd import verify_tempest_config
+from tempest import config
+from tempest.openstack.common.fixture import mockpatch
+from tempest.tests import base
+from tempest.tests import fake_config
+
+
+class TestGetAPIVersions(base.TestCase):
+
+ def test_url_grab_versioned_nova_nossl(self):
+ base_url = 'http://127.0.0.1:8774/v2/'
+ endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
+ self.assertEqual('http://127.0.0.1:8774', endpoint)
+
+ def test_url_grab_versioned_nova_ssl(self):
+ base_url = 'https://127.0.0.1:8774/v3/'
+ endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
+ self.assertEqual('https://127.0.0.1:8774', endpoint)
+
+
+class TestDiscovery(base.TestCase):
+
+ def setUp(self):
+ super(TestDiscovery, self).setUp()
+ self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+ def test_get_keystone_api_versions(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': {'values': [{'id': 'v2.0'}, {'id': 'v3.0'}]}}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ versions = verify_tempest_config._get_api_versions(fake_os, 'keystone')
+ self.assertIn('v2.0', versions)
+ self.assertIn('v3.0', versions)
+
+ def test_get_cinder_api_versions(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v1.0'}, {'id': 'v2.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ versions = verify_tempest_config._get_api_versions(fake_os, 'cinder')
+ self.assertIn('v1.0', versions)
+ self.assertIn('v2.0', versions)
+
+ def test_get_nova_versions(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ versions = verify_tempest_config._get_api_versions(fake_os, 'nova')
+ self.assertIn('v2.0', versions)
+ self.assertIn('v3.0', versions)
+
+ def test_verify_keystone_api_versions_no_v3(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': {'values': [{'id': 'v2.0'}]}}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_keystone_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v3',
+ 'identity_feature_enabled',
+ False, True)
+
+ def test_verify_keystone_api_versions_no_v2(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': {'values': [{'id': 'v3.0'}]}}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_keystone_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v2',
+ 'identity_feature_enabled',
+ False, True)
+
+ def test_verify_cinder_api_versions_no_v2(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v1.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_cinder_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v2', 'volume_feature_enabled',
+ False, True)
+
+ def test_verify_cinder_api_versions_no_v1(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v2.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_cinder_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v1', 'volume_feature_enabled',
+ False, True)
+
+ def test_verify_nova_versions(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v2.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_nova_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v3', 'compute_feature_enabled',
+ False, True)
+
+ def test_verify_glance_version_no_v2_with_v1_1(self):
+ def fake_get_versions():
+ return (None, ['v1.1'])
+ fake_os = mock.MagicMock()
+ fake_os.image_client.get_versions = fake_get_versions
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_glance_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
+ False, True)
+
+ def test_verify_glance_version_no_v2_with_v1_0(self):
+ def fake_get_versions():
+ return (None, ['v1.0'])
+ fake_os = mock.MagicMock()
+ fake_os.image_client.get_versions = fake_get_versions
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_glance_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
+ False, True)
+
+ def test_verify_glance_version_no_v1(self):
+ def fake_get_versions():
+ return (None, ['v2.0'])
+ fake_os = mock.MagicMock()
+ fake_os.image_client.get_versions = fake_get_versions
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_glance_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v1', 'image_feature_enabled',
+ False, True)
+
+ def test_verify_extensions_neutron(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'alias': 'fake1'},
+ {'alias': 'fake2'},
+ {'alias': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.network_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'neutron', {})
+ self.assertIn('neutron', results)
+ self.assertIn('fake1', results['neutron'])
+ self.assertTrue(results['neutron']['fake1'])
+ self.assertIn('fake2', results['neutron'])
+ self.assertTrue(results['neutron']['fake2'])
+ self.assertIn('fake3', results['neutron'])
+ self.assertFalse(results['neutron']['fake3'])
+ self.assertIn('not_fake', results['neutron'])
+ self.assertFalse(results['neutron']['not_fake'])
+
+ def test_verify_extensions_neutron_all(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'alias': 'fake1'},
+ {'alias': 'fake2'},
+ {'alias': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.network_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'neutron', {})
+ self.assertIn('neutron', results)
+ self.assertIn('extensions', results['neutron'])
+ self.assertEqual(['fake1', 'fake2', 'not_fake'],
+ results['neutron']['extensions'])
+
+ def test_verify_extensions_cinder(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.volumes_extension_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'cinder', {})
+ self.assertIn('cinder', results)
+ self.assertIn('fake1', results['cinder'])
+ self.assertTrue(results['cinder']['fake1'])
+ self.assertIn('fake2', results['cinder'])
+ self.assertTrue(results['cinder']['fake2'])
+ self.assertIn('fake3', results['cinder'])
+ self.assertFalse(results['cinder']['fake3'])
+ self.assertIn('not_fake', results['cinder'])
+ self.assertFalse(results['cinder']['not_fake'])
+
+ def test_verify_extensions_cinder_all(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.volumes_extension_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'cinder', {})
+ self.assertIn('cinder', results)
+ self.assertIn('extensions', results['cinder'])
+ self.assertEqual(['fake1', 'fake2', 'not_fake'],
+ results['cinder']['extensions'])
+
+ def test_verify_extensions_nova(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.extensions_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'nova', {})
+ self.assertIn('nova', results)
+ self.assertIn('fake1', results['nova'])
+ self.assertTrue(results['nova']['fake1'])
+ self.assertIn('fake2', results['nova'])
+ self.assertTrue(results['nova']['fake2'])
+ self.assertIn('fake3', results['nova'])
+ self.assertFalse(results['nova']['fake3'])
+ self.assertIn('not_fake', results['nova'])
+ self.assertFalse(results['nova']['not_fake'])
+
+ def test_verify_extensions_nova_all(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.extensions_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'nova', {})
+ self.assertIn('nova', results)
+ self.assertIn('extensions', results['nova'])
+ self.assertEqual(['fake1', 'fake2', 'not_fake'],
+ results['nova']['extensions'])
+
+ def test_verify_extensions_nova_v3(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.extensions_v3_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'nova_v3', {})
+ self.assertIn('nova_v3', results)
+ self.assertIn('fake1', results['nova_v3'])
+ self.assertTrue(results['nova_v3']['fake1'])
+ self.assertIn('fake2', results['nova_v3'])
+ self.assertTrue(results['nova_v3']['fake2'])
+ self.assertIn('fake3', results['nova_v3'])
+ self.assertFalse(results['nova_v3']['fake3'])
+ self.assertIn('not_fake', results['nova_v3'])
+ self.assertFalse(results['nova_v3']['not_fake'])
+
+ def test_verify_extensions_nova_v3_all(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.extensions_v3_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'nova_v3', {})
+ self.assertIn('nova_v3', results)
+ self.assertIn('extensions', results['nova_v3'])
+ self.assertEqual(['fake1', 'fake2', 'not_fake'],
+ results['nova_v3']['extensions'])
+
+ def test_verify_extensions_swift(self):
+ def fake_list_extensions():
+ return (None, {'fake1': 'metadata',
+ 'fake2': 'metadata',
+ 'not_fake': 'metadata',
+ 'swift': 'metadata'})
+ fake_os = mock.MagicMock()
+ fake_os.account_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os, 'swift', {})
+ self.assertIn('swift', results)
+ self.assertIn('fake1', results['swift'])
+ self.assertTrue(results['swift']['fake1'])
+ self.assertIn('fake2', results['swift'])
+ self.assertTrue(results['swift']['fake2'])
+ self.assertIn('fake3', results['swift'])
+ self.assertFalse(results['swift']['fake3'])
+ self.assertIn('not_fake', results['swift'])
+ self.assertFalse(results['swift']['not_fake'])
+
+ def test_verify_extensions_swift_all(self):
+ def fake_list_extensions():
+ return (None, {'fake1': 'metadata',
+ 'fake2': 'metadata',
+ 'not_fake': 'metadata',
+ 'swift': 'metadata'})
+ fake_os = mock.MagicMock()
+ fake_os.account_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'swift', {})
+ self.assertIn('swift', results)
+ self.assertIn('extensions', results['swift'])
+ self.assertEqual(['not_fake', 'fake1', 'fake2'],
+ results['swift']['extensions'])
diff --git a/tempest/api/compute/api_schema/__init__.py b/tempest/tests/common/__init__.py
similarity index 100%
copy from tempest/api/compute/api_schema/__init__.py
copy to tempest/tests/common/__init__.py
diff --git a/tempest/tests/common/test_debug.py b/tempest/tests/common/test_debug.py
new file mode 100644
index 0000000..8a880f2
--- /dev/null
+++ b/tempest/tests/common/test_debug.py
@@ -0,0 +1,122 @@
+# Copyright 2014 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from tempest.common import debug
+from tempest import config
+from tempest.openstack.common.fixture import mockpatch
+from tempest import test
+from tempest.tests import base
+from tempest.tests import fake_config
+
+
+class TestDebug(base.TestCase):
+
+ def setUp(self):
+ super(TestDebug, self).setUp()
+ self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+ common_pre = 'tempest.common.commands'
+ self.ip_addr_raw_mock = self.patch(common_pre + '.ip_addr_raw')
+ self.ip_route_raw_mock = self.patch(common_pre + '.ip_route_raw')
+ self.iptables_raw_mock = self.patch(common_pre + '.iptables_raw')
+ self.ip_ns_list_mock = self.patch(common_pre + '.ip_ns_list')
+ self.ip_ns_addr_mock = self.patch(common_pre + '.ip_ns_addr')
+ self.ip_ns_route_mock = self.patch(common_pre + '.ip_ns_route')
+ self.iptables_ns_mock = self.patch(common_pre + '.iptables_ns')
+ self.ovs_db_dump_mock = self.patch(common_pre + '.ovs_db_dump')
+
+ self.log_mock = self.patch('tempest.common.debug.LOG')
+
+ def test_log_ip_ns_debug_disabled(self):
+ self.useFixture(mockpatch.PatchObject(test.CONF.debug,
+ 'enable', False))
+ debug.log_ip_ns()
+ self.assertFalse(self.ip_addr_raw_mock.called)
+ self.assertFalse(self.log_mock.info.called)
+
+ def test_log_ip_ns_debug_enabled(self):
+ self.useFixture(mockpatch.PatchObject(test.CONF.debug,
+ 'enable', True))
+
+ self.ip_ns_list_mock.return_value = [1, 2]
+
+ debug.log_ip_ns()
+ self.ip_addr_raw_mock.assert_called_with()
+ self.assertTrue(self.log_mock.info.called)
+ self.ip_route_raw_mock.assert_called_with()
+ self.assertEqual(len(debug.TABLES), self.iptables_raw_mock.call_count)
+ for table in debug.TABLES:
+ self.assertIn(mock.call(table),
+ self.iptables_raw_mock.call_args_list)
+
+ self.ip_ns_list_mock.assert_called_with()
+ self.assertEqual(len(self.ip_ns_list_mock.return_value),
+ self.ip_ns_addr_mock.call_count)
+ self.assertEqual(len(self.ip_ns_list_mock.return_value),
+ self.ip_ns_route_mock.call_count)
+ for ns in self.ip_ns_list_mock.return_value:
+ self.assertIn(mock.call(ns),
+ self.ip_ns_addr_mock.call_args_list)
+ self.assertIn(mock.call(ns),
+ self.ip_ns_route_mock.call_args_list)
+
+ self.assertEqual(len(debug.TABLES) *
+ len(self.ip_ns_list_mock.return_value),
+ self.iptables_ns_mock.call_count)
+ for ns in self.ip_ns_list_mock.return_value:
+ for table in debug.TABLES:
+ self.assertIn(mock.call(ns, table),
+ self.iptables_ns_mock.call_args_list)
+
+ def test_log_ovs_db_debug_disabled(self):
+ self.useFixture(mockpatch.PatchObject(test.CONF.debug,
+ 'enable', False))
+ self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
+ 'neutron', False))
+ debug.log_ovs_db()
+ self.assertFalse(self.ovs_db_dump_mock.called)
+
+ self.useFixture(mockpatch.PatchObject(test.CONF.debug,
+ 'enable', True))
+ self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
+ 'neutron', False))
+ debug.log_ovs_db()
+ self.assertFalse(self.ovs_db_dump_mock.called)
+
+ self.useFixture(mockpatch.PatchObject(test.CONF.debug,
+ 'enable', False))
+ self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
+ 'neutron', True))
+ debug.log_ovs_db()
+ self.assertFalse(self.ovs_db_dump_mock.called)
+
+ def test_log_ovs_db_debug_enabled(self):
+ self.useFixture(mockpatch.PatchObject(test.CONF.debug,
+ 'enable', True))
+ self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
+ 'neutron', True))
+ debug.log_ovs_db()
+ self.ovs_db_dump_mock.assert_called_with()
+
+ def test_log_net_debug(self):
+ self.log_ip_ns_mock = self.patch('tempest.common.debug.log_ip_ns')
+ self.log_ovs_db_mock = self.patch('tempest.common.debug.log_ovs_db')
+
+ debug.log_net_debug()
+ self.log_ip_ns_mock.assert_called_with()
+ self.log_ovs_db_mock.assert_called_with()
diff --git a/tempest/api/compute/api_schema/__init__.py b/tempest/tests/common/utils/__init__.py
similarity index 100%
copy from tempest/api/compute/api_schema/__init__.py
copy to tempest/tests/common/utils/__init__.py
diff --git a/tempest/tests/common/utils/test_data_utils.py b/tempest/tests/common/utils/test_data_utils.py
new file mode 100644
index 0000000..7aafdb2
--- /dev/null
+++ b/tempest/tests/common/utils/test_data_utils.py
@@ -0,0 +1,77 @@
+# Copyright 2014 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from tempest.common.utils import data_utils
+from tempest.tests import base
+
+
+class TestDataUtils(base.TestCase):
+
+ def test_rand_uuid(self):
+ actual = data_utils.rand_uuid()
+ self.assertIsInstance(actual, str)
+ self.assertRegexpMatches(actual, "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]"
+ "{4}-[0-9a-f]{4}-[0-9a-f]{12}$")
+ actual2 = data_utils.rand_uuid()
+ self.assertNotEqual(actual, actual2)
+
+ def test_rand_uuid_hex(self):
+ actual = data_utils.rand_uuid_hex()
+ self.assertIsInstance(actual, str)
+ self.assertRegexpMatches(actual, "^[0-9a-f]{32}$")
+
+ actual2 = data_utils.rand_uuid_hex()
+ self.assertNotEqual(actual, actual2)
+
+ def test_rand_name(self):
+ actual = data_utils.rand_name()
+ self.assertIsInstance(actual, str)
+ actual2 = data_utils.rand_name()
+ self.assertNotEqual(actual, actual2)
+
+ actual = data_utils.rand_name('foo')
+ self.assertTrue(actual.startswith('foo'))
+ actual2 = data_utils.rand_name('foo')
+ self.assertTrue(actual.startswith('foo'))
+ self.assertNotEqual(actual, actual2)
+
+ def test_rand_int(self):
+ actual = data_utils.rand_int_id()
+ self.assertIsInstance(actual, int)
+
+ actual2 = data_utils.rand_int_id()
+ self.assertNotEqual(actual, actual2)
+
+ def test_rand_mac_address(self):
+ actual = data_utils.rand_mac_address()
+ self.assertIsInstance(actual, str)
+ self.assertRegexpMatches(actual, "^([0-9a-f][0-9a-f]:){5}"
+ "[0-9a-f][0-9a-f]$")
+
+ actual2 = data_utils.rand_mac_address()
+ self.assertNotEqual(actual, actual2)
+
+ def test_parse_image_id(self):
+ actual = data_utils.parse_image_id("/foo/bar/deadbeaf")
+ self.assertEqual("deadbeaf", actual)
+
+ def test_arbitrary_string(self):
+ actual = data_utils.arbitrary_string()
+ self.assertEqual(actual, "test")
+ actual = data_utils.arbitrary_string(size=30, base_text="abc")
+ self.assertEqual(actual, "abc" * (30 / len("abc")))
+ actual = data_utils.arbitrary_string(size=5, base_text="deadbeaf")
+ self.assertEqual(actual, "deadb")
diff --git a/tempest/tests/common/utils/test_file_utils.py b/tempest/tests/common/utils/test_file_utils.py
new file mode 100644
index 0000000..99ae033
--- /dev/null
+++ b/tempest/tests/common/utils/test_file_utils.py
@@ -0,0 +1,32 @@
+# Copyright 2014 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from mock import patch
+
+from tempest.common.utils import file_utils
+from tempest.tests import base
+
+
+class TestFileUtils(base.TestCase):
+
+ def test_have_effective_read_path(self):
+ with patch('__builtin__.open', mock.mock_open(), create=True):
+ result = file_utils.have_effective_read_access('fake_path')
+ self.assertTrue(result)
+
+ def test_not_effective_read_path(self):
+ result = file_utils.have_effective_read_access('fake_path')
+ self.assertFalse(result)
diff --git a/tempest/tests/common/utils/test_misc.py b/tempest/tests/common/utils/test_misc.py
new file mode 100644
index 0000000..aee9805
--- /dev/null
+++ b/tempest/tests/common/utils/test_misc.py
@@ -0,0 +1,88 @@
+# Copyright 2014 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from tempest.common.utils import misc
+from tempest.tests import base
+
+
+@misc.singleton
+class TestFoo(object):
+
+ count = 0
+
+ def increment(self):
+ self.count += 1
+ return self.count
+
+
+@misc.singleton
+class TestBar(object):
+
+ count = 0
+
+ def increment(self):
+ self.count += 1
+ return self.count
+
+
+class TestMisc(base.TestCase):
+
+ def test_singleton(self):
+ test = TestFoo()
+ self.assertEqual(0, test.count)
+ self.assertEqual(1, test.increment())
+ test2 = TestFoo()
+ self.assertEqual(1, test.count)
+ self.assertEqual(1, test2.count)
+ self.assertEqual(test, test2)
+ test3 = TestBar()
+ self.assertNotEqual(test, test3)
+
+ def test_find_test_caller_test_case(self):
+ # Calling it from here should give us the method we're in.
+ self.assertEqual('TestMisc:test_find_test_caller_test_case',
+ misc.find_test_caller())
+
+ def test_find_test_caller_setup_self(self):
+ def setUp(self):
+ return misc.find_test_caller()
+ self.assertEqual('TestMisc:setUp', setUp(self))
+
+ def test_find_test_caller_setup_no_self(self):
+ def setUp():
+ return misc.find_test_caller()
+ self.assertEqual(':setUp', setUp())
+
+ def test_find_test_caller_setupclass_cls(self):
+ def setUpClass(cls): # noqa
+ return misc.find_test_caller()
+ self.assertEqual('TestMisc:setUpClass', setUpClass(self.__class__))
+
+ def test_find_test_caller_teardown_self(self):
+ def tearDown(self):
+ return misc.find_test_caller()
+ self.assertEqual('TestMisc:tearDown', tearDown(self))
+
+ def test_find_test_caller_teardown_no_self(self):
+ def tearDown():
+ return misc.find_test_caller()
+ self.assertEqual(':tearDown', tearDown())
+
+ def test_find_test_caller_teardown_class(self):
+ def tearDownClass(cls):
+ return misc.find_test_caller()
+ self.assertEqual('TestMisc:tearDownClass',
+ tearDownClass(self.__class__))
diff --git a/tempest/tests/fake_auth_provider.py b/tempest/tests/fake_auth_provider.py
index bc68d26..44c331e 100644
--- a/tempest/tests/fake_auth_provider.py
+++ b/tempest/tests/fake_auth_provider.py
@@ -13,6 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.tests import fake_credentials
+
+
+def get_default_credentials(credential_type, fill_in=True):
+ return fake_credentials.FakeCredentials()
+
+
+def get_credentials(credential_type=None, fill_in=True, **kwargs):
+ return fake_credentials.FakeCredentials()
+
class FakeAuthProvider(object):
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index e941606..4bed0c2 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -12,49 +12,52 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
-class FakeConfig(object):
+from oslo.config import cfg
- class fake_compute(object):
- build_interval = 10
- build_timeout = 10
+from tempest import config
+from tempest.openstack.common.fixture import config as conf_fixture
+from tempest.openstack.common import importutils
- class fake_identity(object):
- disable_ssl_certificate_validation = True
- catalog_type = 'identity'
- uri = 'http://fake_uri.com/auth'
- uri_v3 = 'http://fake_uri_v3.com/auth'
- class fake_default_feature_enabled(object):
- api_extensions = ['all']
+class ConfigFixture(conf_fixture.Config):
- class fake_compute_feature_enabled(fake_default_feature_enabled):
- api_v3_extensions = ['all']
+ def __init__(self):
+ config.register_opts()
+ # Register locking options
+ importutils.import_module('tempest.openstack.common.lockutils')
+ super(ConfigFixture, self).__init__()
- class fake_object_storage_discoverable_apis(object):
- discoverable_apis = ['all']
+ def setUp(self):
+ super(ConfigFixture, self).setUp()
+ self.conf.set_default('build_interval', 10, group='compute')
+ self.conf.set_default('build_timeout', 10, group='compute')
+ self.conf.set_default('disable_ssl_certificate_validation', True,
+ group='identity')
+ self.conf.set_default('uri', 'http://fake_uri.com/auth',
+ group='identity')
+ self.conf.set_default('uri_v3', 'http://fake_uri_v3.com/auth',
+ group='identity')
+ self.conf.set_default('neutron', True, group='service_available')
+ self.conf.set_default('heat', True, group='service_available')
+ if not os.path.exists(str(os.environ.get('OS_TEST_LOCK_PATH'))):
+ os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH')))
+ self.conf.set_default('lock_path',
+ str(os.environ.get('OS_TEST_LOCK_PATH')))
+ self.conf.set_default('auth_version', 'v2', group='identity')
+ for config_option in ['username', 'password', 'tenant_name']:
+ # Identity group items
+ for prefix in ['', 'alt_', 'admin_']:
+ self.conf.set_default(prefix + config_option,
+ 'fake_' + config_option,
+ group='identity')
+ # Compute Admin group items
+ self.conf.set_default(config_option, 'fake_' + config_option,
+ group='compute-admin')
- class fake_service_available(object):
- nova = True
- glance = True
- cinder = True
- heat = True
- neutron = True
- swift = True
- horizon = True
- class fake_negative(object):
- test_generator = 'tempest.common.' \
- 'generator.negative_generator.NegativeTestGenerator'
-
- compute_feature_enabled = fake_compute_feature_enabled()
- volume_feature_enabled = fake_default_feature_enabled()
- network_feature_enabled = fake_default_feature_enabled()
- object_storage_feature_enabled = fake_object_storage_discoverable_apis()
-
- service_available = fake_service_available()
-
- compute = fake_compute()
- identity = fake_identity()
-
- negative = fake_negative()
+class FakePrivate(config.TempestConfigPrivate):
+ def __init__(self):
+ cfg.CONF([], default_config_files=[])
+ self._set_attrs()
diff --git a/tempest/tests/fake_credentials.py b/tempest/tests/fake_credentials.py
new file mode 100644
index 0000000..48f67d2
--- /dev/null
+++ b/tempest/tests/fake_credentials.py
@@ -0,0 +1,62 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import auth
+
+
+class FakeCredentials(auth.Credentials):
+
+ def is_valid(self):
+ return True
+
+
+class FakeKeystoneV2Credentials(auth.KeystoneV2Credentials):
+
+ def __init__(self):
+ creds = dict(
+ username='fake_username',
+ password='fake_password',
+ tenant_name='fake_tenant_name'
+ )
+ super(FakeKeystoneV2Credentials, self).__init__(**creds)
+
+
+class FakeKeystoneV3Credentials(auth.KeystoneV3Credentials):
+ """
+ Fake credentials suitable for the Keystone Identity V3 API
+ """
+
+ def __init__(self):
+ creds = dict(
+ username='fake_username',
+ password='fake_password',
+ user_domain_name='fake_domain_name',
+ project_name='fake_tenant_name'
+ )
+ super(FakeKeystoneV3Credentials, self).__init__(**creds)
+
+
+class FakeKeystoneV3DomainCredentials(auth.KeystoneV3Credentials):
+ """
+ Fake credentials suitable for the Keystone Identity V3 API, with no scope
+ """
+
+ def __init__(self):
+ creds = dict(
+ username='fake_username',
+ password='fake_password',
+ user_domain_name='fake_domain_name'
+ )
+ super(FakeKeystoneV3DomainCredentials, self).__init__(**creds)
diff --git a/tempest/tests/fake_http.py b/tempest/tests/fake_http.py
index a09d5ba..7b878af 100644
--- a/tempest/tests/fake_http.py
+++ b/tempest/tests/fake_http.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import httplib2
@@ -44,3 +45,29 @@
else:
msg = "unsupported return type %s" % self.return_type
raise TypeError(msg)
+
+
+class fake_httplib(object):
+ def __init__(self, headers, body=None,
+ version=1.0, status=200, reason="Ok"):
+ """
+ :param headers: dict representing HTTP response headers
+ :param body: file-like object
+ :param version: HTTP Version
+ :param status: Response status code
+ :param reason: Status code related message.
+ """
+ self.body = body
+ self.status = status
+ self.reason = reason
+ self.version = version
+ self.headers = headers
+
+ def getheaders(self):
+ return copy.deepcopy(self.headers).items()
+
+ def getheader(self, key, default):
+ return self.headers.get(key, default)
+
+ def read(self, amt):
+ return self.body.read(amt)
diff --git a/tempest/tests/fake_identity.py b/tempest/tests/fake_identity.py
index 058c9c2..1900fc9 100644
--- a/tempest/tests/fake_identity.py
+++ b/tempest/tests/fake_identity.py
@@ -113,7 +113,7 @@
"expires_at": "2020-01-01T00:00:10.000123Z",
"project": {
"domain": {
- "id": "fake_id",
+ "id": "fake_domain_id",
"name": "fake"
},
"id": "project_id",
@@ -121,7 +121,7 @@
},
"user": {
"domain": {
- "id": "domain_id",
+ "id": "fake_domain_id",
"name": "domain_name"
},
"id": "fake_user_id",
diff --git a/tempest/tests/negative/test_generate_json.py b/tempest/tests/negative/test_generate_json.py
deleted file mode 100644
index e09fcdf..0000000
--- a/tempest/tests/negative/test_generate_json.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014 Deutsche Telekom AG
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.common.generator import negative_generator
-import tempest.test
-
-
-class TestNegativeGenerator(tempest.test.BaseTestCase):
-
- fake_input_str = {"type": "string",
- "minLength": 2,
- "maxLength": 8,
- 'results': {'gen_number': 404}}
-
- fake_input_int = {"type": "integer",
- "maximum": 255,
- "minimum": 1}
-
- fake_input_obj = {"type": "object",
- "properties": {"minRam": {"type": "integer"},
- "diskName": {"type": "string"},
- "maxRam": {"type": "integer", }
- }
- }
-
- def setUp(self):
- super(TestNegativeGenerator, self).setUp()
- self.negative = negative_generator.NegativeTestGenerator()
-
- def _validate_result(self, data):
- self.assertTrue(isinstance(data, list))
- for t in data:
- self.assertTrue(isinstance(t, tuple))
-
- def test_generate_invalid_string(self):
- result = self.negative.generate(self.fake_input_str)
- self._validate_result(result)
-
- def test_generate_invalid_integer(self):
- result = self.negative.generate(self.fake_input_int)
- self._validate_result(result)
-
- def test_generate_invalid_obj(self):
- result = self.negative.generate(self.fake_input_obj)
- self._validate_result(result)
diff --git a/tempest/tests/negative/test_negative_auto_test.py b/tempest/tests/negative/test_negative_auto_test.py
index 27ddc95..7a1909a 100644
--- a/tempest/tests/negative/test_negative_auto_test.py
+++ b/tempest/tests/negative/test_negative_auto_test.py
@@ -15,6 +15,7 @@
import mock
+from tempest import config
import tempest.test as test
from tempest.tests import base
from tempest.tests import fake_config
@@ -38,7 +39,8 @@
def setUp(self):
super(TestNegativeAutoTest, self).setUp()
- self.stubs.Set(test, 'CONF', fake_config.FakeConfig)
+ self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
def _check_prop_entries(self, result, entry):
entries = [a for a in result if entry in a[0]]
diff --git a/tempest/tests/negative/test_negative_generators.py b/tempest/tests/negative/test_negative_generators.py
new file mode 100644
index 0000000..c77faca
--- /dev/null
+++ b/tempest/tests/negative/test_negative_generators.py
@@ -0,0 +1,153 @@
+# Copyright 2014 Deutsche Telekom AG
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import jsonschema
+import mock
+
+from tempest.common.generator import base_generator
+from tempest.common.generator import negative_generator
+from tempest.common.generator import valid_generator
+from tempest.tests import base
+
+
+class TestNegativeBasicGenerator(base.TestCase):
+ valid_desc = {
+ "name": "list-flavors-with-detail",
+ "http-method": "GET",
+ "url": "flavors/detail",
+ "json-schema": {
+ "type": "object",
+ "properties": {
+ "minRam": {"type": "integer"},
+ "minDisk": {"type": "integer"}
+ }
+ },
+ "resources": ["flavor", "volume", "image"]
+ }
+
+ minimal_desc = {
+ "name": "list-flavors-with-detail",
+ "http-method": "GET",
+ "url": "flavors/detail",
+ }
+
+ add_prop_desc = {
+ "name": "list-flavors-with-detail",
+ "http-method": "GET",
+ "url": "flavors/detail",
+ "unknown_field": [12]
+ }
+
+ invalid_json_schema_desc = {
+ "name": "list-flavors-with-detail",
+ "http-method": "GET",
+ "url": "flavors/detail",
+ "json-schema": {"type": "NotExistingType"}
+ }
+
+ def setUp(self):
+ super(TestNegativeBasicGenerator, self).setUp()
+ self.generator = base_generator.BasicGeneratorSet()
+
+ def _assert_valid_jsonschema_call(self, jsonschema_mock, desc):
+ self.assertEqual(jsonschema_mock.call_count, 1)
+ jsonschema_mock.assert_called_with(desc, self.generator.schema)
+
+ @mock.patch('jsonschema.validate', wraps=jsonschema.validate)
+ def test_validate_schema_with_valid_input(self, jsonschema_mock):
+ self.generator.validate_schema(self.valid_desc)
+ self._assert_valid_jsonschema_call(jsonschema_mock, self.valid_desc)
+
+ @mock.patch('jsonschema.validate', wraps=jsonschema.validate)
+ def test_validate_schema_with_minimal_input(self, jsonschema_mock):
+ self.generator.validate_schema(self.minimal_desc)
+ self._assert_valid_jsonschema_call(jsonschema_mock, self.minimal_desc)
+
+ def test_validate_schema_with_invalid_input(self):
+ self.assertRaises(jsonschema.ValidationError,
+ self.generator.validate_schema, self.add_prop_desc)
+ self.assertRaises(jsonschema.SchemaError,
+ self.generator.validate_schema,
+ self.invalid_json_schema_desc)
+
+
+class BaseNegativeGenerator(object):
+ types = ['string', 'integer', 'object']
+
+ fake_input_str = {"type": "string",
+ "minLength": 2,
+ "maxLength": 8,
+ 'results': {'gen_int': 404}}
+
+ fake_input_int = {"type": "integer",
+ "maximum": 255,
+ "minimum": 1}
+
+ fake_input_obj = {"type": "object",
+ "properties": {"minRam": {"type": "integer"},
+ "diskName": {"type": "string"},
+ "maxRam": {"type": "integer", }
+ }
+ }
+
+ unkown_type_schema = {
+ "type": "not_defined"
+ }
+
+ def _validate_result(self, data):
+ self.assertTrue(isinstance(data, list))
+ for t in data:
+ self.assertIsInstance(t, tuple)
+ self.assertEqual(3, len(t))
+ self.assertIsInstance(t[0], str)
+
+ def test_generate_string(self):
+ result = self.generator.generate(self.fake_input_str)
+ self._validate_result(result)
+
+ def test_generate_integer(self):
+ result = self.generator.generate(self.fake_input_int)
+ self._validate_result(result)
+
+ def test_generate_obj(self):
+ result = self.generator.generate(self.fake_input_obj)
+ self._validate_result(result)
+
+ def test_generator_mandatory_functions(self):
+ for data_type in self.types:
+ self.assertIn(data_type, self.generator.types_dict)
+
+ def test_generate_with_unknown_type(self):
+ self.assertRaises(TypeError, self.generator.generate,
+ self.unkown_type_schema)
+
+
+class TestNegativeValidGenerator(base.TestCase, BaseNegativeGenerator):
+ def setUp(self):
+ super(TestNegativeValidGenerator, self).setUp()
+ self.generator = valid_generator.ValidTestGenerator()
+
+ def test_generate_valid(self):
+ result = self.generator.generate_valid(self.fake_input_obj)
+ self.assertIn("minRam", result)
+ self.assertIsInstance(result["minRam"], int)
+ self.assertIn("diskName", result)
+ self.assertIsInstance(result["diskName"], str)
+
+
+class TestNegativeNegativeGenerator(base.TestCase, BaseNegativeGenerator):
+ def setUp(self):
+ super(TestNegativeNegativeGenerator, self).setUp()
+ self.generator = negative_generator.NegativeTestGenerator()
diff --git a/tempest/tests/test_auth.py b/tempest/tests/test_auth.py
index df04d65..03333be 100644
--- a/tempest/tests/test_auth.py
+++ b/tempest/tests/test_auth.py
@@ -14,6 +14,7 @@
# under the License.
import copy
+import datetime
from tempest import auth
from tempest.common import http
@@ -21,18 +22,16 @@
from tempest import exceptions
from tempest.openstack.common.fixture import mockpatch
from tempest.tests import base
+from tempest.tests import fake_auth_provider
from tempest.tests import fake_config
+from tempest.tests import fake_credentials
from tempest.tests import fake_http
from tempest.tests import fake_identity
class BaseAuthTestsSetUp(base.TestCase):
_auth_provider_class = None
- credentials = {
- 'username': 'fake_user',
- 'password': 'fake_pwd',
- 'tenant_name': 'fake_tenant'
- }
+ credentials = fake_credentials.FakeCredentials()
def _auth(self, credentials, **params):
"""
@@ -42,9 +41,14 @@
def setUp(self):
super(BaseAuthTestsSetUp, self).setUp()
- self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakeConfig)
+ self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
self.fake_http = fake_http.fake_httplib2(return_type=200)
self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
+ self.stubs.Set(auth, 'get_credentials',
+ fake_auth_provider.get_credentials)
+ self.stubs.Set(auth, 'get_default_credentials',
+ fake_auth_provider.get_default_credentials)
self.auth_provider = self._auth(self.credentials)
@@ -56,12 +60,19 @@
"""
_auth_provider_class = auth.AuthProvider
- def test_check_credentials_is_dict(self):
- self.assertTrue(self.auth_provider.check_credentials({}))
+ def test_check_credentials_class(self):
+ self.assertRaises(NotImplementedError,
+ self.auth_provider.check_credentials,
+ auth.Credentials())
def test_check_credentials_bad_type(self):
self.assertFalse(self.auth_provider.check_credentials([]))
+ def test_instantiate_with_dict(self):
+ # Dict credentials are only supported for backward compatibility
+ auth_provider = self._auth(credentials={})
+ self.assertIsInstance(auth_provider.credentials, auth.Credentials)
+
def test_instantiate_with_bad_credentials_type(self):
"""
Assure that credentials with bad type fail with TypeError
@@ -98,10 +109,15 @@
self.assertIsNone(self.auth_provider.alt_part)
self.assertIsNone(self.auth_provider.alt_auth_data)
+ def test_fill_credentials(self):
+ self.assertRaises(NotImplementedError,
+ self.auth_provider.fill_credentials)
+
class TestKeystoneV2AuthProvider(BaseAuthTestsSetUp):
_endpoints = fake_identity.IDENTITY_V2_RESPONSE['access']['serviceCatalog']
_auth_provider_class = auth.KeystoneV2AuthProvider
+ credentials = fake_credentials.FakeKeystoneV2Credentials()
def setUp(self):
super(TestKeystoneV2AuthProvider, self).setUp()
@@ -121,6 +137,13 @@
def _get_token_from_fake_identity(self):
return fake_identity.TOKEN
+ def _get_from_fake_identity(self, attr):
+ access = fake_identity.IDENTITY_V2_RESPONSE['access']
+ if attr == 'user_id':
+ return access['user']['id']
+ elif attr == 'tenant_id':
+ return access['token']['tenant']['id']
+
def _test_request_helper(self, filters, expected):
url, headers, body = self.auth_provider.auth_request('GET',
self.target_url,
@@ -130,6 +153,11 @@
self.assertEqual(expected['token'], headers['X-Auth-Token'])
self.assertEqual(expected['body'], body)
+ def _auth_data_with_expiry(self, date_as_string):
+ token, access = self.auth_provider.auth_data
+ access['token']['expires'] = date_as_string
+ return token, access
+
def test_request(self):
filters = {
'service': 'compute',
@@ -203,16 +231,12 @@
del cred[attr]
self.assertFalse(self.auth_provider.check_credentials(cred))
- def test_check_credentials_not_scoped_missing_tenant_name(self):
- cred = copy.copy(self.credentials)
- del cred['tenant_name']
- self.assertTrue(self.auth_provider.check_credentials(cred,
- scoped=False))
-
- def test_check_credentials_missing_tenant_name(self):
- cred = copy.copy(self.credentials)
- del cred['tenant_name']
- self.assertFalse(self.auth_provider.check_credentials(cred))
+ def test_fill_credentials(self):
+ self.auth_provider.fill_credentials()
+ creds = self.auth_provider.credentials
+ for attr in ['user_id', 'tenant_id']:
+ self.assertEqual(self._get_from_fake_identity(attr),
+ getattr(creds, attr))
def _test_base_url_helper(self, expected_url, filters,
auth_data=None):
@@ -291,16 +315,30 @@
expected = 'http://fake_url/'
self._test_base_url_helper(expected, self.filters)
+ def test_token_not_expired(self):
+ expiry_data = datetime.datetime.utcnow() + datetime.timedelta(days=1)
+ auth_data = self._auth_data_with_expiry(
+ expiry_data.strftime(self.auth_provider.EXPIRY_DATE_FORMAT))
+ self.assertFalse(self.auth_provider.is_expired(auth_data))
+
+ def test_token_expired(self):
+ expiry_data = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
+ auth_data = self._auth_data_with_expiry(
+ expiry_data.strftime(self.auth_provider.EXPIRY_DATE_FORMAT))
+ self.assertTrue(self.auth_provider.is_expired(auth_data))
+
+ def test_token_not_expired_to_be_renewed(self):
+ expiry_data = datetime.datetime.utcnow() + \
+ self.auth_provider.token_expiry_threshold / 2
+ auth_data = self._auth_data_with_expiry(
+ expiry_data.strftime(self.auth_provider.EXPIRY_DATE_FORMAT))
+ self.assertTrue(self.auth_provider.is_expired(auth_data))
+
class TestKeystoneV3AuthProvider(TestKeystoneV2AuthProvider):
_endpoints = fake_identity.IDENTITY_V3_RESPONSE['token']['catalog']
_auth_provider_class = auth.KeystoneV3AuthProvider
- credentials = {
- 'username': 'fake_user',
- 'password': 'fake_pwd',
- 'tenant_name': 'fake_tenant',
- 'domain_name': 'fake_domain_name',
- }
+ credentials = fake_credentials.FakeKeystoneV3Credentials()
def setUp(self):
super(TestKeystoneV3AuthProvider, self).setUp()
@@ -315,10 +353,49 @@
return ep['url'].replace('v3', replacement)
return ep['url']
- def test_check_credentials_missing_tenant_name(self):
- cred = copy.copy(self.credentials)
- del cred['domain_name']
- self.assertFalse(self.auth_provider.check_credentials(cred))
+ def _auth_data_with_expiry(self, date_as_string):
+ token, access = self.auth_provider.auth_data
+ access['expires_at'] = date_as_string
+ return token, access
+
+ def _get_from_fake_identity(self, attr):
+ token = fake_identity.IDENTITY_V3_RESPONSE['token']
+ if attr == 'user_id':
+ return token['user']['id']
+ elif attr == 'project_id':
+ return token['project']['id']
+ elif attr == 'user_domain_id':
+ return token['user']['domain']['id']
+ elif attr == 'project_domain_id':
+ return token['project']['domain']['id']
+
+ def test_check_credentials_missing_attribute(self):
+ # reset credentials to fresh ones
+ self.credentials.reset()
+ for attr in ['username', 'password', 'user_domain_name',
+ 'project_domain_name']:
+ cred = copy.copy(self.credentials)
+ del cred[attr]
+ self.assertFalse(self.auth_provider.check_credentials(cred),
+ "Credentials should be invalid without %s" % attr)
+
+ def test_check_domain_credentials_missing_attribute(self):
+ # reset credentials to fresh ones
+ self.credentials.reset()
+ domain_creds = fake_credentials.FakeKeystoneV3DomainCredentials()
+ for attr in ['username', 'password', 'user_domain_name']:
+ cred = copy.copy(domain_creds)
+ del cred[attr]
+ self.assertFalse(self.auth_provider.check_credentials(cred),
+ "Credentials should be invalid without %s" % attr)
+
+ def test_fill_credentials(self):
+ self.auth_provider.fill_credentials()
+ creds = self.auth_provider.credentials
+ for attr in ['user_id', 'project_id', 'user_domain_id',
+ 'project_domain_id']:
+ self.assertEqual(self._get_from_fake_identity(attr),
+ getattr(creds, attr))
# Overwrites v2 test
def test_base_url_to_get_admin_endpoint(self):
diff --git a/tempest/tests/test_commands.py b/tempest/tests/test_commands.py
new file mode 100644
index 0000000..bdb9269
--- /dev/null
+++ b/tempest/tests/test_commands.py
@@ -0,0 +1,87 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import subprocess
+
+from tempest.common import commands
+from tempest.tests import base
+
+
+class TestCommands(base.TestCase):
+
+ def setUp(self):
+ super(TestCommands, self).setUp()
+ self.subprocess_args = {'stdout': subprocess.PIPE,
+ 'stderr': subprocess.STDOUT}
+
+ @mock.patch('subprocess.Popen')
+ def test_ip_addr_raw(self, mock):
+ expected = ['/usr/bin/sudo', '-n', 'ip', 'a']
+ commands.ip_addr_raw()
+ mock.assert_called_once_with(expected, **self.subprocess_args)
+
+ @mock.patch('subprocess.Popen')
+ def test_ip_route_raw(self, mock):
+ expected = ['/usr/bin/sudo', '-n', 'ip', 'r']
+ commands.ip_route_raw()
+ mock.assert_called_once_with(expected, **self.subprocess_args)
+
+ @mock.patch('subprocess.Popen')
+ def test_ip_ns_raw(self, mock):
+ expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'list']
+ commands.ip_ns_raw()
+ mock.assert_called_once_with(expected, **self.subprocess_args)
+
+ @mock.patch('subprocess.Popen')
+ def test_iptables_raw(self, mock):
+ table = 'filter'
+ expected = ['/usr/bin/sudo', '-n', 'iptables', '-v', '-S', '-t',
+ '%s' % table]
+ commands.iptables_raw(table)
+ mock.assert_called_once_with(expected, **self.subprocess_args)
+
+ @mock.patch('subprocess.Popen')
+ def test_ip_ns_list(self, mock):
+ expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'list']
+ commands.ip_ns_list()
+ mock.assert_called_once_with(expected, **self.subprocess_args)
+
+ @mock.patch('subprocess.Popen')
+ def test_ip_ns_addr(self, mock):
+ ns_list = commands.ip_ns_list()
+ for ns in ns_list:
+ expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
+ 'ip', 'a']
+ commands.ip_ns_addr(ns)
+ mock.assert_called_once_with(expected, **self.subprocess_args)
+
+ @mock.patch('subprocess.Popen')
+ def test_ip_ns_route(self, mock):
+ ns_list = commands.ip_ns_list()
+ for ns in ns_list:
+ expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
+ 'ip', 'r']
+ commands.ip_ns_route(ns)
+ mock.assert_called_once_with(expected, **self.subprocess_args)
+
+ @mock.patch('subprocess.Popen')
+ def test_iptables_ns(self, mock):
+ table = 'filter'
+ ns_list = commands.ip_ns_list()
+ for ns in ns_list:
+ expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
+ 'iptables', '-v', '-S', '-t', table]
+ commands.iptables_ns(ns, table)
+ mock.assert_called_once_with(expected, **self.subprocess_args)
diff --git a/tempest/tests/test_compute_xml_common.py b/tempest/tests/test_compute_xml_common.py
index bfa6a10..1561931 100644
--- a/tempest/tests/test_compute_xml_common.py
+++ b/tempest/tests/test_compute_xml_common.py
@@ -13,7 +13,7 @@
# under the License.
from lxml import etree
-from tempest.services.compute.xml import common
+from tempest.common import xml_utils as common
from tempest.tests import base
diff --git a/tempest/tests/test_credentials.py b/tempest/tests/test_credentials.py
new file mode 100644
index 0000000..9da5f92
--- /dev/null
+++ b/tempest/tests/test_credentials.py
@@ -0,0 +1,229 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from oslo.config import cfg
+
+from tempest import auth
+from tempest.common import http
+from tempest.common import tempest_fixtures as fixtures
+from tempest import config
+from tempest import exceptions
+from tempest.tests import base
+from tempest.tests import fake_config
+from tempest.tests import fake_http
+from tempest.tests import fake_identity
+
+
+class CredentialsTests(base.TestCase):
+ attributes = {}
+ credentials_class = auth.Credentials
+
+ def _get_credentials(self, attributes=None):
+ if attributes is None:
+ attributes = self.attributes
+ return self.credentials_class(**attributes)
+
+ def setUp(self):
+ super(CredentialsTests, self).setUp()
+ self.fake_http = fake_http.fake_httplib2(return_type=200)
+ self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
+ self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+ def test_create(self):
+ creds = self._get_credentials()
+ self.assertEqual(self.attributes, creds._initial)
+
+ def test_create_invalid_attr(self):
+ self.assertRaises(exceptions.InvalidCredentials,
+ self._get_credentials,
+ attributes=dict(invalid='fake'))
+
+ def test_default(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ for ctype in self.credentials_class.TYPES:
+ self.assertRaises(NotImplementedError,
+ self.credentials_class.get_default,
+ credentials_type=ctype)
+
+ def test_invalid_default(self):
+ self.assertRaises(exceptions.InvalidCredentials,
+ auth.Credentials.get_default,
+ credentials_type='invalid_type')
+
+ def test_is_valid(self):
+ creds = self._get_credentials()
+ self.assertRaises(NotImplementedError, creds.is_valid)
+
+
+class KeystoneV2CredentialsTests(CredentialsTests):
+ attributes = {
+ 'username': 'fake_username',
+ 'password': 'fake_password',
+ 'tenant_name': 'fake_tenant_name'
+ }
+
+ identity_response = fake_identity._fake_v2_response
+ credentials_class = auth.KeystoneV2Credentials
+
+ def setUp(self):
+ super(KeystoneV2CredentialsTests, self).setUp()
+ self.stubs.Set(http.ClosingHttp, 'request', self.identity_response)
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+ def _verify_credentials(self, credentials_class, filled=True,
+ creds_dict=None):
+
+ def _check(credentials):
+ # Check the right version of credentials has been returned
+ self.assertIsInstance(credentials, credentials_class)
+ # Check the id attributes are filled in
+ attributes = [x for x in credentials.ATTRIBUTES if (
+ '_id' in x and x != 'domain_id')]
+ for attr in attributes:
+ if filled:
+ self.assertIsNotNone(getattr(credentials, attr))
+ else:
+ self.assertIsNone(getattr(credentials, attr))
+
+ if creds_dict is None:
+ for ctype in auth.Credentials.TYPES:
+ creds = auth.get_default_credentials(credential_type=ctype,
+ fill_in=filled)
+ _check(creds)
+ else:
+ creds = auth.get_credentials(fill_in=filled, **creds_dict)
+ _check(creds)
+
+ def test_get_default_credentials(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ self._verify_credentials(credentials_class=self.credentials_class)
+
+ def test_get_credentials(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ self._verify_credentials(credentials_class=self.credentials_class,
+ creds_dict=self.attributes)
+
+ def test_get_credentials_not_filled(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ self._verify_credentials(credentials_class=self.credentials_class,
+ filled=False,
+ creds_dict=self.attributes)
+
+ def test_is_valid(self):
+ creds = self._get_credentials()
+ self.assertTrue(creds.is_valid())
+
+ def test_is_not_valid(self):
+ creds = self._get_credentials()
+ for attr in self.attributes.keys():
+ delattr(creds, attr)
+ self.assertFalse(creds.is_valid(),
+ "Credentials should be invalid without %s" % attr)
+
+ def test_default(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ for ctype in self.credentials_class.TYPES:
+ creds = self.credentials_class.get_default(credentials_type=ctype)
+ for attr in self.attributes.keys():
+ # Default configuration values related to credentials
+ # are defined as fake_* in fake_config.py
+ self.assertEqual(getattr(creds, attr), 'fake_' + attr)
+
+ def test_reset_all_attributes(self):
+ creds = self._get_credentials()
+ initial_creds = copy.deepcopy(creds)
+ set_attr = creds.__dict__.keys()
+ missing_attr = set(creds.ATTRIBUTES).difference(set_attr)
+ # Set all unset attributes, then reset
+ for attr in missing_attr:
+ setattr(creds, attr, 'fake' + attr)
+ creds.reset()
+ # Check reset credentials are same as initial ones
+ self.assertEqual(creds, initial_creds)
+
+ def test_reset_single_attribute(self):
+ creds = self._get_credentials()
+ initial_creds = copy.deepcopy(creds)
+ set_attr = creds.__dict__.keys()
+ missing_attr = set(creds.ATTRIBUTES).difference(set_attr)
+ # Set one unset attributes, then reset
+ for attr in missing_attr:
+ setattr(creds, attr, 'fake' + attr)
+ creds.reset()
+ # Check reset credentials are same as initial ones
+ self.assertEqual(creds, initial_creds)
+
+
+class KeystoneV3CredentialsTests(KeystoneV2CredentialsTests):
+ attributes = {
+ 'username': 'fake_username',
+ 'password': 'fake_password',
+ 'project_name': 'fake_project_name',
+ 'user_domain_name': 'fake_domain_name'
+ }
+
+ credentials_class = auth.KeystoneV3Credentials
+ identity_response = fake_identity._fake_v3_response
+
+ def setUp(self):
+ super(KeystoneV3CredentialsTests, self).setUp()
+ # Additional config items reset by cfg fixture after each test
+ cfg.CONF.set_default('auth_version', 'v3', group='identity')
+ # Identity group items
+ for prefix in ['', 'alt_', 'admin_']:
+ cfg.CONF.set_default(prefix + 'domain_name', 'fake_domain_name',
+ group='identity')
+ # Compute Admin group items
+ cfg.CONF.set_default('domain_name', 'fake_domain_name',
+ group='compute-admin')
+
+ def test_default(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ for ctype in self.credentials_class.TYPES:
+ creds = self.credentials_class.get_default(credentials_type=ctype)
+ for attr in self.attributes.keys():
+ if attr == 'project_name':
+ config_value = 'fake_tenant_name'
+ elif attr == 'user_domain_name':
+ config_value = 'fake_domain_name'
+ else:
+ config_value = 'fake_' + attr
+ self.assertEqual(getattr(creds, attr), config_value)
+
+ def test_synced_attributes(self):
+ attributes = self.attributes
+ # Create V3 credentials with tenant instead of project, and user_domain
+ for attr in ['project_id', 'user_domain_id']:
+ attributes[attr] = 'fake_' + attr
+ creds = self._get_credentials(attributes)
+ self.assertEqual(creds.project_name, creds.tenant_name)
+ self.assertEqual(creds.project_id, creds.tenant_id)
+ self.assertEqual(creds.user_domain_name, creds.project_domain_name)
+ self.assertEqual(creds.user_domain_id, creds.project_domain_id)
+ # Replace user_domain with project_domain
+ del attributes['user_domain_name']
+ del attributes['user_domain_id']
+ del attributes['project_name']
+ del attributes['project_id']
+ for attr in ['project_domain_name', 'project_domain_id',
+ 'tenant_name', 'tenant_id']:
+ attributes[attr] = 'fake_' + attr
+ self.assertEqual(creds.tenant_name, creds.project_name)
+ self.assertEqual(creds.tenant_id, creds.project_id)
+ self.assertEqual(creds.project_domain_name, creds.user_domain_name)
+ self.assertEqual(creds.project_domain_id, creds.user_domain_id)
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index aa3c8fc..804204a 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -13,8 +13,12 @@
# under the License.
+import mock
import testtools
+from oslo.config import cfg
+
+from tempest import config
from tempest import exceptions
from tempest.openstack.common.fixture import mockpatch
from tempest import test
@@ -25,7 +29,8 @@
class BaseDecoratorsTest(base.TestCase):
def setUp(self):
super(BaseDecoratorsTest, self).setUp()
- self.stubs.Set(test, 'CONF', fake_config.FakeConfig)
+ self.config_fixture = self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
class TestAttrDecorator(BaseDecoratorsTest):
@@ -191,10 +196,8 @@
class TestRequiresExtDecorator(BaseDecoratorsTest):
def setUp(self):
super(TestRequiresExtDecorator, self).setUp()
- self.fixture = self.useFixture(mockpatch.PatchObject(
- test.CONF.compute_feature_enabled,
- 'api_extensions',
- new=['enabled_ext', 'another_ext']))
+ cfg.CONF.set_default('api_extensions', ['enabled_ext', 'another_ext'],
+ 'compute-feature-enabled')
def _test_requires_ext_helper(self, expected_to_skip=True,
**decorator_args):
@@ -220,7 +223,7 @@
def test_requires_ext_decorator_with_all_ext_enabled(self):
# disable fixture so the default (all) is used.
- self.fixture.cleanUp()
+ self.config_fixture.cleanUp()
self._test_requires_ext_helper(expected_to_skip=False,
extension='random_ext',
service='compute')
@@ -230,3 +233,19 @@
self._test_requires_ext_helper,
extension='enabled_ext',
service='bad_service')
+
+
+class TestSimpleNegativeDecorator(BaseDecoratorsTest):
+ @test.SimpleNegativeAutoTest
+ class FakeNegativeJSONTest(test.NegativeAutoTest):
+ _schema_file = 'fake/schemas/file.json'
+
+ def test_testfunc_exist(self):
+ self.assertIn("test_fake_negative", dir(self.FakeNegativeJSONTest))
+
+ @mock.patch('tempest.test.NegativeAutoTest.execute')
+ def test_testfunc_calls_execute(self, mock):
+ obj = self.FakeNegativeJSONTest("test_fake_negative")
+ self.assertIn("test_fake_negative", dir(obj))
+ obj.test_fake_negative()
+ mock.assert_called_once_with(self.FakeNegativeJSONTest._schema_file)
diff --git a/tempest/tests/test_glance_http.py b/tempest/tests/test_glance_http.py
new file mode 100644
index 0000000..bb2df43
--- /dev/null
+++ b/tempest/tests/test_glance_http.py
@@ -0,0 +1,200 @@
+# Copyright 2014 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import httplib
+import json
+import mock
+import six
+import socket
+
+from tempest.common import glance_http
+from tempest import exceptions
+from tempest.openstack.common.fixture import mockpatch
+from tempest.tests import base
+from tempest.tests import fake_auth_provider
+from tempest.tests import fake_http
+
+
+class TestGlanceHTTPClient(base.TestCase):
+
+ def setUp(self):
+ super(TestGlanceHTTPClient, self).setUp()
+ self.fake_http = fake_http.fake_httplib2(return_type=200)
+ # NOTE(maurosr): using http here implies that we will be using httplib
+ # directly. With https glance_client would use an httpS version, but
+ # the real backend would still be httplib anyway and since we mock it
+ # that there is no reason to care.
+ self.endpoint = 'http://fake_url.com'
+ self.fake_auth = fake_auth_provider.FakeAuthProvider()
+
+ self.fake_auth.base_url = mock.MagicMock(return_value=self.endpoint)
+
+ self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+ 'request',
+ side_effect=self.fake_http.request(self.endpoint)[1]))
+ self.client = glance_http.HTTPClient(self.fake_auth, {})
+
+ def _set_response_fixture(self, header, status, resp_body):
+ resp = fake_http.fake_httplib(header, status=status,
+ body=six.StringIO(resp_body))
+ self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+ 'getresponse',
+ return_value=resp))
+ return resp
+
+ def test_json_request_without_content_type_header(self):
+ self._set_response_fixture({}, 200, 'fake_response_body')
+ resp, body = self.client.json_request('GET', '/images')
+ self.assertEqual(200, resp.status)
+ self.assertIsNone(body)
+
+ def test_json_request_with_xml_content_type_header(self):
+ self._set_response_fixture({'content-type': 'application/xml'},
+ 200, 'fake_response_body')
+ resp, body = self.client.json_request('GET', '/images')
+ self.assertEqual(200, resp.status)
+ self.assertIsNone(body)
+
+ def test_json_request_with_content_type_header(self):
+ self._set_response_fixture({'content-type': 'application/json'},
+ 200, 'fake_response_body')
+ resp, body = self.client.json_request('GET', '/images')
+ self.assertEqual(200, resp.status)
+ self.assertEqual('fake_response_body', body)
+
+ def test_json_request_fails_to_json_loads(self):
+ self._set_response_fixture({'content-type': 'application/json'},
+ 200, 'fake_response_body')
+ self.useFixture(mockpatch.PatchObject(json, 'loads',
+ side_effect=ValueError()))
+ resp, body = self.client.json_request('GET', '/images')
+ self.assertEqual(200, resp.status)
+ self.assertEqual(body, 'fake_response_body')
+
+ def test_json_request_socket_timeout(self):
+ self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+ 'request',
+ side_effect=socket.timeout()))
+ self.assertRaises(exceptions.TimeoutException,
+ self.client.json_request, 'GET', '/images')
+
+ def test_json_request_endpoint_not_found(self):
+ self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+ 'request',
+ side_effect=socket.gaierror()))
+ self.assertRaises(exceptions.EndpointNotFound,
+ self.client.json_request, 'GET', '/images')
+
+ def test_raw_request(self):
+ self._set_response_fixture({}, 200, 'fake_response_body')
+ resp, body = self.client.raw_request('GET', '/images')
+ self.assertEqual(200, resp.status)
+ self.assertEqual('fake_response_body', body.read())
+
+ def test_raw_request_with_response_chunked(self):
+ self._set_response_fixture({}, 200, 'fake_response_body')
+ self.useFixture(mockpatch.PatchObject(glance_http,
+ 'CHUNKSIZE', 1))
+ resp, body = self.client.raw_request('GET', '/images')
+ self.assertEqual(200, resp.status)
+ self.assertEqual('fake_response_body', body.read())
+
+ def test_raw_request_chunked(self):
+ self.useFixture(mockpatch.PatchObject(glance_http,
+ 'CHUNKSIZE', 1))
+ self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+ 'endheaders'))
+ self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+ 'send'))
+
+ self._set_response_fixture({}, 200, 'fake_response_body')
+ req_body = six.StringIO('fake_request_body')
+ resp, body = self.client.raw_request('PUT', '/images', body=req_body)
+ self.assertEqual(200, resp.status)
+ self.assertEqual('fake_response_body', body.read())
+ httplib.HTTPConnection.send.assert_call_count(req_body.len)
+
+ def test_get_connection_class_for_https(self):
+ conn_class = self.client.get_connection_class('https')
+ self.assertEqual(glance_http.VerifiedHTTPSConnection, conn_class)
+
+ def test_get_connection_class_for_http(self):
+ conn_class = (self.client.get_connection_class('http'))
+ self.assertEqual(httplib.HTTPConnection, conn_class)
+
+ def test_get_connection_http(self):
+ self.assertTrue(isinstance(self.client.get_connection(),
+ httplib.HTTPConnection))
+
+ def test_get_connection_https(self):
+ endpoint = 'https://fake_url.com'
+ self.fake_auth.base_url = mock.MagicMock(return_value=endpoint)
+ self.client = glance_http.HTTPClient(self.fake_auth, {})
+ self.assertTrue(isinstance(self.client.get_connection(),
+ glance_http.VerifiedHTTPSConnection))
+
+ def test_get_connection_url_not_fount(self):
+ self.useFixture(mockpatch.PatchObject(self.client, 'connection_class',
+ side_effect=httplib.InvalidURL()
+ ))
+ self.assertRaises(exceptions.EndpointNotFound,
+ self.client.get_connection)
+
+ def test_get_connection_kwargs_default_for_http(self):
+ kwargs = self.client.get_connection_kwargs('http')
+ self.assertEqual(600, kwargs['timeout'])
+ self.assertEqual(1, len(kwargs.keys()))
+
+ def test_get_connection_kwargs_set_timeout_for_http(self):
+ kwargs = self.client.get_connection_kwargs('http', timeout=10,
+ cacert='foo')
+ self.assertEqual(10, kwargs['timeout'])
+ # nothing more than timeout is evaluated for http connections
+ self.assertEqual(1, len(kwargs.keys()))
+
+ def test_get_connection_kwargs_default_for_https(self):
+ kwargs = self.client.get_connection_kwargs('https')
+ self.assertEqual(600, kwargs['timeout'])
+ self.assertEqual(None, kwargs['cacert'])
+ self.assertEqual(None, kwargs['cert_file'])
+ self.assertEqual(None, kwargs['key_file'])
+ self.assertEqual(False, kwargs['insecure'])
+ self.assertEqual(True, kwargs['ssl_compression'])
+ self.assertEqual(6, len(kwargs.keys()))
+
+ def test_get_connection_kwargs_set_params_for_https(self):
+ kwargs = self.client.get_connection_kwargs('https', timeout=10,
+ cacert='foo',
+ cert_file='/foo/bar.cert',
+ key_file='/foo/key.pem',
+ insecure=True,
+ ssl_compression=False)
+ self.assertEqual(10, kwargs['timeout'])
+ self.assertEqual('foo', kwargs['cacert'])
+ self.assertEqual('/foo/bar.cert', kwargs['cert_file'])
+ self.assertEqual('/foo/key.pem', kwargs['key_file'])
+ self.assertEqual(True, kwargs['insecure'])
+ self.assertEqual(False, kwargs['ssl_compression'])
+ self.assertEqual(6, len(kwargs.keys()))
+
+
+class TestResponseBodyIterator(base.TestCase):
+
+ def test_iter_default_chunk_size_64k(self):
+ resp = fake_http.fake_httplib({}, six.StringIO(
+ 'X' * (glance_http.CHUNKSIZE + 1)))
+ iterator = glance_http.ResponseBodyIterator(resp)
+ chunks = list(iterator)
+ self.assertEqual(chunks, ['X' * glance_http.CHUNKSIZE, 'X'])
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
new file mode 100644
index 0000000..ab81836
--- /dev/null
+++ b/tempest/tests/test_hacking.py
@@ -0,0 +1,95 @@
+# Copyright 2014 Matthew Treinish
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.hacking import checks
+from tempest.tests import base
+
+
+class HackingTestCase(base.TestCase):
+ """
+ This class tests the hacking checks in tempest.hacking.checks by passing
+ strings to the check methods like the pep8/flake8 parser would. The parser
+ loops over each line in the file and then passes the parameters to the
+ check method. The parameter names in the check method dictate what type of
+ object is passed to the check method. The parameter types are::
+
+ logical_line: A processed line with the following modifications:
+ - Multi-line statements converted to a single line.
+ - Stripped left and right.
+ - Contents of strings replaced with "xxx" of same length.
+ - Comments removed.
+ physical_line: Raw line of text from the input file.
+ lines: a list of the raw lines from the input file
+ tokens: the tokens that contribute to this logical line
+ line_number: line number in the input file
+ total_lines: number of lines in the input file
+ blank_lines: blank lines before this one
+ indent_char: indentation character in this file (" " or "\t")
+ indent_level: indentation (with tabs expanded to multiples of 8)
+ previous_indent_level: indentation on previous line
+ previous_logical: previous logical line
+ filename: Path of the file being run through pep8
+
+ When running a test on a check method the return will be False/None if
+ there is no violation in the sample input. If there is an error a tuple is
+ returned with a position in the line, and a message. So to check the result
+ just assertTrue if the check is expected to fail and assertFalse if it
+ should pass.
+ """
+ def test_no_setupclass_for_unit_tests(self):
+ self.assertTrue(checks.no_setupclass_for_unit_tests(
+ " def setUpClass(cls):", './tempest/tests/fake_test.py'))
+ self.assertIsNone(checks.no_setupclass_for_unit_tests(
+ " def setUpClass(cls): # noqa", './tempest/tests/fake_test.py'))
+ self.assertFalse(checks.no_setupclass_for_unit_tests(
+ " def setUpClass(cls):", './tempest/api/fake_test.py'))
+
+ def test_import_no_clients_in_api(self):
+ for client in checks.PYTHON_CLIENTS:
+ string = "import " + client + "client"
+ self.assertTrue(checks.import_no_clients_in_api(
+ string, './tempest/api/fake_test.py'))
+ self.assertFalse(checks.import_no_clients_in_api(
+ string, './tempest/scenario/fake_test.py'))
+
+ def test_scenario_tests_need_service_tags(self):
+ self.assertFalse(checks.scenario_tests_need_service_tags(
+ 'def test_fake:', './tempest/scenario/test_fake.py',
+ "@test.services('compute')"))
+ self.assertFalse(checks.scenario_tests_need_service_tags(
+ 'def test_fake_test:', './tempest/api/compute/test_fake.py',
+ "@test.services('image')"))
+ self.assertTrue(checks.scenario_tests_need_service_tags(
+ 'def test_fake_test:', './tempest/scenario/test_fake.py',
+ '\n'))
+
+ def test_no_vi_headers(self):
+ # NOTE(mtreinish) The lines parameter is used only for finding the
+ # line location in the file. So these tests just pass a list of an
+ # arbitrary length to use for verifying the check function.
+ self.assertTrue(checks.no_vi_headers(
+ '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 1, range(250)))
+ self.assertTrue(checks.no_vi_headers(
+ '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 249, range(250)))
+ self.assertFalse(checks.no_vi_headers(
+ '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 149, range(250)))
+
+ def test_service_tags_not_in_module_path(self):
+ self.assertTrue(checks.service_tags_not_in_module_path(
+ "@test.services('compute')", './tempest/api/compute/fake_test.py'))
+ self.assertFalse(checks.service_tags_not_in_module_path(
+ "@test.services('compute')",
+ './tempest/scenario/compute/fake_test.py'))
+ self.assertFalse(checks.service_tags_not_in_module_path(
+ "@test.services('compute')", './tempest/api/image/fake_test.py'))
diff --git a/tempest/tests/test_rest_client.py b/tempest/tests/test_rest_client.py
index 827b5c9..64ad3bc 100644
--- a/tempest/tests/test_rest_client.py
+++ b/tempest/tests/test_rest_client.py
@@ -16,10 +16,10 @@
import json
from tempest.common import rest_client
+from tempest.common import xml_utils as xml
from tempest import config
from tempest import exceptions
from tempest.openstack.common.fixture import mockpatch
-from tempest.services.compute.xml import common as xml
from tempest.tests import base
from tempest.tests import fake_auth_provider
from tempest.tests import fake_config
@@ -35,14 +35,15 @@
def setUp(self):
super(BaseRestClientTestClass, self).setUp()
- self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakeConfig)
+ self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
self.rest_client = rest_client.RestClient(
fake_auth_provider.FakeAuthProvider())
self.stubs.Set(httplib2.Http, 'request', self.fake_http.request)
self.useFixture(mockpatch.PatchObject(self.rest_client, '_get_region',
side_effect=self._get_region()))
self.useFixture(mockpatch.PatchObject(self.rest_client,
- '_log_response'))
+ '_log_request'))
class TestRestClientHTTPMethods(BaseRestClientTestClass):
@@ -138,6 +139,102 @@
self._verify_headers(resp)
+class TestRestClientUpdateHeaders(BaseRestClientTestClass):
+ def setUp(self):
+ self.fake_http = fake_http.fake_httplib2()
+ super(TestRestClientUpdateHeaders, self).setUp()
+ self.useFixture(mockpatch.PatchObject(self.rest_client,
+ '_error_checker'))
+ self.headers = {'X-Configuration-Session': 'session_id'}
+
+ def test_post_update_headers(self):
+ __, return_dict = self.rest_client.post(self.url, {},
+ extra_headers=True,
+ headers=self.headers)
+
+ self.assertDictContainsSubset(
+ {'X-Configuration-Session': 'session_id',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'},
+ return_dict['headers']
+ )
+
+ def test_get_update_headers(self):
+ __, return_dict = self.rest_client.get(self.url,
+ extra_headers=True,
+ headers=self.headers)
+
+ self.assertDictContainsSubset(
+ {'X-Configuration-Session': 'session_id',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'},
+ return_dict['headers']
+ )
+
+ def test_delete_update_headers(self):
+ __, return_dict = self.rest_client.delete(self.url,
+ extra_headers=True,
+ headers=self.headers)
+
+ self.assertDictContainsSubset(
+ {'X-Configuration-Session': 'session_id',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'},
+ return_dict['headers']
+ )
+
+ def test_patch_update_headers(self):
+ __, return_dict = self.rest_client.patch(self.url, {},
+ extra_headers=True,
+ headers=self.headers)
+
+ self.assertDictContainsSubset(
+ {'X-Configuration-Session': 'session_id',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'},
+ return_dict['headers']
+ )
+
+ def test_put_update_headers(self):
+ __, return_dict = self.rest_client.put(self.url, {},
+ extra_headers=True,
+ headers=self.headers)
+
+ self.assertDictContainsSubset(
+ {'X-Configuration-Session': 'session_id',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'},
+ return_dict['headers']
+ )
+
+ def test_head_update_headers(self):
+ self.useFixture(mockpatch.PatchObject(self.rest_client,
+ 'response_checker'))
+
+ __, return_dict = self.rest_client.head(self.url,
+ extra_headers=True,
+ headers=self.headers)
+
+ self.assertDictContainsSubset(
+ {'X-Configuration-Session': 'session_id',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'},
+ return_dict['headers']
+ )
+
+ def test_copy_update_headers(self):
+ __, return_dict = self.rest_client.copy(self.url,
+ extra_headers=True,
+ headers=self.headers)
+
+ self.assertDictContainsSubset(
+ {'X-Configuration-Session': 'session_id',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'},
+ return_dict['headers']
+ )
+
+
class TestRestClientHeadersXML(TestRestClientHeadersJSON):
TYPE = "xml"
@@ -254,7 +351,8 @@
def setUp(self):
super(TestRestClientErrorCheckerJSON, self).setUp()
- self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakeConfig)
+ self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
self.rest_client = rest_client.RestClient(
fake_auth_provider.FakeAuthProvider())
@@ -341,3 +439,104 @@
self.assertRaises(exceptions.InvalidContentType,
self.rest_client._error_checker,
**self.set_data("405", enc="fake_enc"))
+
+
+class TestRestClientUtils(BaseRestClientTestClass):
+
+ def _is_resource_deleted(self, resource_id):
+ if not isinstance(self.retry_pass, int):
+ return False
+ if self.retry_count >= self.retry_pass:
+ return True
+ self.retry_count = self.retry_count + 1
+ return False
+
+ def setUp(self):
+ self.fake_http = fake_http.fake_httplib2()
+ super(TestRestClientUtils, self).setUp()
+ self.retry_count = 0
+ self.retry_pass = None
+ self.original_deleted_method = self.rest_client.is_resource_deleted
+ self.rest_client.is_resource_deleted = self._is_resource_deleted
+
+ def test_wait_for_resource_deletion(self):
+ self.retry_pass = 2
+ # Ensure timeout long enough for loop execution to hit retry count
+ self.rest_client.build_timeout = 500
+ sleep_mock = self.patch('time.sleep')
+ self.rest_client.wait_for_resource_deletion('1234')
+ self.assertEqual(len(sleep_mock.mock_calls), 2)
+
+ def test_wait_for_resource_deletion_not_deleted(self):
+ self.patch('time.sleep')
+ # Set timeout to be very quick to force exception faster
+ self.rest_client.build_timeout = 1
+ self.assertRaises(exceptions.TimeoutException,
+ self.rest_client.wait_for_resource_deletion,
+ '1234')
+
+ def test_wait_for_deletion_with_unimplemented_deleted_method(self):
+ self.rest_client.is_resource_deleted = self.original_deleted_method
+ self.assertRaises(NotImplementedError,
+ self.rest_client.wait_for_resource_deletion,
+ '1234')
+
+
+class TestNegativeRestClient(BaseRestClientTestClass):
+
+ def setUp(self):
+ self.fake_http = fake_http.fake_httplib2()
+ super(TestNegativeRestClient, self).setUp()
+ self.negative_rest_client = rest_client.NegativeRestClient(
+ fake_auth_provider.FakeAuthProvider())
+ self.useFixture(mockpatch.PatchObject(self.negative_rest_client,
+ '_log_request'))
+
+ def test_post(self):
+ __, return_dict = self.negative_rest_client.send_request('POST',
+ self.url,
+ [], {})
+ self.assertEqual('POST', return_dict['method'])
+
+ def test_get(self):
+ __, return_dict = self.negative_rest_client.send_request('GET',
+ self.url,
+ [])
+ self.assertEqual('GET', return_dict['method'])
+
+ def test_delete(self):
+ __, return_dict = self.negative_rest_client.send_request('DELETE',
+ self.url,
+ [])
+ self.assertEqual('DELETE', return_dict['method'])
+
+ def test_patch(self):
+ __, return_dict = self.negative_rest_client.send_request('PATCH',
+ self.url,
+ [], {})
+ self.assertEqual('PATCH', return_dict['method'])
+
+ def test_put(self):
+ __, return_dict = self.negative_rest_client.send_request('PUT',
+ self.url,
+ [], {})
+ self.assertEqual('PUT', return_dict['method'])
+
+ def test_head(self):
+ self.useFixture(mockpatch.PatchObject(self.negative_rest_client,
+ 'response_checker'))
+ __, return_dict = self.negative_rest_client.send_request('HEAD',
+ self.url,
+ [])
+ self.assertEqual('HEAD', return_dict['method'])
+
+ def test_copy(self):
+ __, return_dict = self.negative_rest_client.send_request('COPY',
+ self.url,
+ [])
+ self.assertEqual('COPY', return_dict['method'])
+
+ def test_other(self):
+ self.assertRaises(AssertionError,
+ self.negative_rest_client.send_request,
+ 'OTHER', self.url, [])
diff --git a/tempest/tests/test_tenant_isolation.py b/tempest/tests/test_tenant_isolation.py
new file mode 100644
index 0000000..9292dcb
--- /dev/null
+++ b/tempest/tests/test_tenant_isolation.py
@@ -0,0 +1,480 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import keystoneclient.v2_0.client as keystoneclient
+from mock import patch
+import neutronclient.v2_0.client as neutronclient
+from oslo.config import cfg
+
+from tempest import clients
+from tempest.common import http
+from tempest.common import isolated_creds
+from tempest import config
+from tempest import exceptions
+from tempest.openstack.common.fixture import mockpatch
+from tempest.services.identity.json import identity_client as json_iden_client
+from tempest.services.identity.xml import identity_client as xml_iden_client
+from tempest.services.network.json import network_client as json_network_client
+from tempest.services.network.xml import network_client as xml_network_client
+from tempest.tests import base
+from tempest.tests import fake_config
+from tempest.tests import fake_http
+from tempest.tests import fake_identity
+
+
+class TestTenantIsolation(base.TestCase):
+
+ def setUp(self):
+ super(TestTenantIsolation, self).setUp()
+ self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+ self.fake_http = fake_http.fake_httplib2(return_type=200)
+ self.stubs.Set(http.ClosingHttp, 'request',
+ fake_identity._fake_v2_response)
+
+ def test_tempest_client(self):
+ iso_creds = isolated_creds.IsolatedCreds('test class')
+ self.assertTrue(isinstance(iso_creds.identity_admin_client,
+ json_iden_client.IdentityClientJSON))
+ self.assertTrue(isinstance(iso_creds.network_admin_client,
+ json_network_client.NetworkClientJSON))
+
+ def test_official_client(self):
+ self.useFixture(mockpatch.PatchObject(keystoneclient.Client,
+ 'authenticate'))
+ self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
+ '_get_image_client'))
+ self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
+ '_get_object_storage_client'))
+ self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
+ '_get_orchestration_client'))
+ iso_creds = isolated_creds.IsolatedCreds('test class',
+ tempest_client=False)
+ self.assertTrue(isinstance(iso_creds.identity_admin_client,
+ keystoneclient.Client))
+ self.assertTrue(isinstance(iso_creds.network_admin_client,
+ neutronclient.Client))
+
+ def test_tempest_client_xml(self):
+ iso_creds = isolated_creds.IsolatedCreds('test class', interface='xml')
+ self.assertEqual(iso_creds.interface, 'xml')
+ self.assertTrue(isinstance(iso_creds.identity_admin_client,
+ xml_iden_client.IdentityClientXML))
+ self.assertTrue(isinstance(iso_creds.network_admin_client,
+ xml_network_client.NetworkClientXML))
+
+ def _mock_user_create(self, id, name):
+ user_fix = self.useFixture(mockpatch.PatchObject(
+ json_iden_client.IdentityClientJSON,
+ 'create_user',
+ return_value=({'status': 200},
+ {'id': id, 'name': name})))
+ return user_fix
+
+ def _mock_tenant_create(self, id, name):
+ tenant_fix = self.useFixture(mockpatch.PatchObject(
+ json_iden_client.IdentityClientJSON,
+ 'create_tenant',
+ return_value=({'status': 200},
+ {'id': id, 'name': name})))
+ return tenant_fix
+
+ def _mock_network_create(self, iso_creds, id, name):
+ net_fix = self.useFixture(mockpatch.PatchObject(
+ iso_creds.network_admin_client,
+ 'create_network',
+ return_value=({'status': 200},
+ {'network': {'id': id, 'name': name}})))
+ return net_fix
+
+ def _mock_subnet_create(self, iso_creds, id, name):
+ subnet_fix = self.useFixture(mockpatch.PatchObject(
+ iso_creds.network_admin_client,
+ 'create_subnet',
+ return_value=({'status': 200},
+ {'subnet': {'id': id, 'name': name}})))
+ return subnet_fix
+
+ def _mock_router_create(self, id, name):
+ router_fix = self.useFixture(mockpatch.PatchObject(
+ json_network_client.NetworkClientJSON,
+ 'create_router',
+ return_value=({'status': 200},
+ {'router': {'id': id, 'name': name}})))
+ return router_fix
+
+ @patch('tempest.common.rest_client.RestClient')
+ def test_primary_creds(self, MockRestClient):
+ cfg.CONF.set_default('neutron', False, 'service_available')
+ iso_creds = isolated_creds.IsolatedCreds('test class',
+ password='fake_password')
+ self._mock_tenant_create('1234', 'fake_prim_tenant')
+ self._mock_user_create('1234', 'fake_prim_user')
+ primary_creds = iso_creds.get_primary_creds()
+ self.assertEqual(primary_creds.username, 'fake_prim_user')
+ self.assertEqual(primary_creds.tenant_name, 'fake_prim_tenant')
+ # Verify IDs
+ self.assertEqual(primary_creds.tenant_id, '1234')
+ self.assertEqual(primary_creds.user_id, '1234')
+
+ @patch('tempest.common.rest_client.RestClient')
+ def test_admin_creds(self, MockRestClient):
+ cfg.CONF.set_default('neutron', False, 'service_available')
+ iso_creds = isolated_creds.IsolatedCreds('test class',
+ password='fake_password')
+ self._mock_user_create('1234', 'fake_admin_user')
+ self._mock_tenant_create('1234', 'fake_admin_tenant')
+ self.useFixture(mockpatch.PatchObject(
+ json_iden_client.IdentityClientJSON,
+ 'list_roles',
+ return_value=({'status': 200},
+ [{'id': '1234', 'name': 'admin'}])))
+
+ user_mock = patch.object(json_iden_client.IdentityClientJSON,
+ 'assign_user_role')
+ user_mock.start()
+ self.addCleanup(user_mock.stop)
+ with patch.object(json_iden_client.IdentityClientJSON,
+ 'assign_user_role') as user_mock:
+ admin_creds = iso_creds.get_admin_creds()
+ user_mock.assert_called_once_with('1234', '1234', '1234')
+ self.assertEqual(admin_creds.username, 'fake_admin_user')
+ self.assertEqual(admin_creds.tenant_name, 'fake_admin_tenant')
+ # Verify IDs
+ self.assertEqual(admin_creds.tenant_id, '1234')
+ self.assertEqual(admin_creds.user_id, '1234')
+
+ @patch('tempest.common.rest_client.RestClient')
+ def test_all_cred_cleanup(self, MockRestClient):
+ cfg.CONF.set_default('neutron', False, 'service_available')
+ iso_creds = isolated_creds.IsolatedCreds('test class',
+ password='fake_password')
+ tenant_fix = self._mock_tenant_create('1234', 'fake_prim_tenant')
+ user_fix = self._mock_user_create('1234', 'fake_prim_user')
+ iso_creds.get_primary_creds()
+ tenant_fix.cleanUp()
+ user_fix.cleanUp()
+ tenant_fix = self._mock_tenant_create('12345', 'fake_alt_tenant')
+ user_fix = self._mock_user_create('12345', 'fake_alt_user')
+ iso_creds.get_alt_creds()
+ tenant_fix.cleanUp()
+ user_fix.cleanUp()
+ tenant_fix = self._mock_tenant_create('123456', 'fake_admin_tenant')
+ user_fix = self._mock_user_create('123456', 'fake_admin_user')
+ self.useFixture(mockpatch.PatchObject(
+ json_iden_client.IdentityClientJSON,
+ 'list_roles',
+ return_value=({'status': 200},
+ [{'id': '123456', 'name': 'admin'}])))
+ with patch.object(json_iden_client.IdentityClientJSON,
+ 'assign_user_role'):
+ iso_creds.get_admin_creds()
+ user_mock = self.patch(
+ 'tempest.services.identity.json.identity_client.'
+ 'IdentityClientJSON.delete_user')
+ tenant_mock = self.patch(
+ 'tempest.services.identity.json.identity_client.'
+ 'IdentityClientJSON.delete_tenant')
+ iso_creds.clear_isolated_creds()
+ # Verify user delete calls
+ calls = user_mock.mock_calls
+ self.assertEqual(len(calls), 3)
+ args = map(lambda x: x[1][0], calls)
+ self.assertIn('1234', args)
+ self.assertIn('12345', args)
+ self.assertIn('123456', args)
+ # Verify tenant delete calls
+ calls = tenant_mock.mock_calls
+ self.assertEqual(len(calls), 3)
+ args = map(lambda x: x[1][0], calls)
+ self.assertIn('1234', args)
+ self.assertIn('12345', args)
+ self.assertIn('123456', args)
+
+ @patch('tempest.common.rest_client.RestClient')
+ def test_alt_creds(self, MockRestClient):
+ cfg.CONF.set_default('neutron', False, 'service_available')
+ iso_creds = isolated_creds.IsolatedCreds('test class',
+ password='fake_password')
+ self._mock_user_create('1234', 'fake_alt_user')
+ self._mock_tenant_create('1234', 'fake_alt_tenant')
+ alt_creds = iso_creds.get_alt_creds()
+ self.assertEqual(alt_creds.username, 'fake_alt_user')
+ self.assertEqual(alt_creds.tenant_name, 'fake_alt_tenant')
+ # Verify IDs
+ self.assertEqual(alt_creds.tenant_id, '1234')
+ self.assertEqual(alt_creds.user_id, '1234')
+
+ @patch('tempest.common.rest_client.RestClient')
+ def test_network_creation(self, MockRestClient):
+ iso_creds = isolated_creds.IsolatedCreds('test class',
+ password='fake_password')
+ self._mock_user_create('1234', 'fake_prim_user')
+ self._mock_tenant_create('1234', 'fake_prim_tenant')
+ self._mock_network_create(iso_creds, '1234', 'fake_net')
+ self._mock_subnet_create(iso_creds, '1234', 'fake_subnet')
+ self._mock_router_create('1234', 'fake_router')
+ router_interface_mock = self.patch(
+ 'tempest.services.network.json.network_client.NetworkClientJSON.'
+ 'add_router_interface_with_subnet_id')
+ iso_creds.get_primary_creds()
+ router_interface_mock.called_once_with('1234', '1234')
+ network = iso_creds.get_primary_network()
+ subnet = iso_creds.get_primary_subnet()
+ router = iso_creds.get_primary_router()
+ self.assertEqual(network['id'], '1234')
+ self.assertEqual(network['name'], 'fake_net')
+ self.assertEqual(subnet['id'], '1234')
+ self.assertEqual(subnet['name'], 'fake_subnet')
+ self.assertEqual(router['id'], '1234')
+ self.assertEqual(router['name'], 'fake_router')
+
+ @patch('tempest.common.rest_client.RestClient')
+ def test_network_cleanup(self, MockRestClient):
+ iso_creds = isolated_creds.IsolatedCreds('test class',
+ password='fake_password')
+ # Create primary tenant and network
+ user_fix = self._mock_user_create('1234', 'fake_prim_user')
+ tenant_fix = self._mock_tenant_create('1234', 'fake_prim_tenant')
+ net_fix = self._mock_network_create(iso_creds, '1234', 'fake_net')
+ subnet_fix = self._mock_subnet_create(iso_creds, '1234', 'fake_subnet')
+ router_fix = self._mock_router_create('1234', 'fake_router')
+ router_interface_mock = self.patch(
+ 'tempest.services.network.json.network_client.NetworkClientJSON.'
+ 'add_router_interface_with_subnet_id')
+ iso_creds.get_primary_creds()
+ router_interface_mock.called_once_with('1234', '1234')
+ router_interface_mock.reset_mock()
+ tenant_fix.cleanUp()
+ user_fix.cleanUp()
+ net_fix.cleanUp()
+ subnet_fix.cleanUp()
+ router_fix.cleanUp()
+ # Create alternate tenant and network
+ user_fix = self._mock_user_create('12345', 'fake_alt_user')
+ tenant_fix = self._mock_tenant_create('12345', 'fake_alt_tenant')
+ net_fix = self._mock_network_create(iso_creds, '12345', 'fake_alt_net')
+ subnet_fix = self._mock_subnet_create(iso_creds, '12345',
+ 'fake_alt_subnet')
+ router_fix = self._mock_router_create('12345', 'fake_alt_router')
+ iso_creds.get_alt_creds()
+ router_interface_mock.called_once_with('12345', '12345')
+ router_interface_mock.reset_mock()
+ tenant_fix.cleanUp()
+ user_fix.cleanUp()
+ net_fix.cleanUp()
+ subnet_fix.cleanUp()
+ router_fix.cleanUp()
+ # Create admin tenant and networks
+ user_fix = self._mock_user_create('123456', 'fake_admin_user')
+ tenant_fix = self._mock_tenant_create('123456', 'fake_admin_tenant')
+ net_fix = self._mock_network_create(iso_creds, '123456',
+ 'fake_admin_net')
+ subnet_fix = self._mock_subnet_create(iso_creds, '123456',
+ 'fake_admin_subnet')
+ router_fix = self._mock_router_create('123456', 'fake_admin_router')
+ self.useFixture(mockpatch.PatchObject(
+ json_iden_client.IdentityClientJSON,
+ 'list_roles',
+ return_value=({'status': 200},
+ [{'id': '123456', 'name': 'admin'}])))
+ with patch.object(json_iden_client.IdentityClientJSON,
+ 'assign_user_role'):
+ iso_creds.get_admin_creds()
+ self.patch('tempest.services.identity.json.identity_client.'
+ 'IdentityClientJSON.delete_user')
+ self.patch('tempest.services.identity.json.identity_client.'
+ 'IdentityClientJSON.delete_tenant')
+ net = patch.object(iso_creds.network_admin_client,
+ 'delete_network')
+ net_mock = net.start()
+ subnet = patch.object(iso_creds.network_admin_client,
+ 'delete_subnet')
+ subnet_mock = subnet.start()
+ router = patch.object(iso_creds.network_admin_client,
+ 'delete_router')
+ router_mock = router.start()
+ remove_router_interface_mock = self.patch(
+ 'tempest.services.network.json.network_client.NetworkClientJSON.'
+ 'remove_router_interface_with_subnet_id')
+ port_list_mock = patch.object(iso_creds.network_admin_client,
+ 'list_ports', return_value=(
+ {'status': 200}, {'ports': []}))
+ port_list_mock.start()
+ iso_creds.clear_isolated_creds()
+ # Verify remove router interface calls
+ calls = remove_router_interface_mock.mock_calls
+ self.assertEqual(len(calls), 3)
+ args = map(lambda x: x[1], calls)
+ self.assertIn(('1234', '1234'), args)
+ self.assertIn(('12345', '12345'), args)
+ self.assertIn(('123456', '123456'), args)
+ # Verify network delete calls
+ calls = net_mock.mock_calls
+ self.assertEqual(len(calls), 3)
+ args = map(lambda x: x[1][0], calls)
+ self.assertIn('1234', args)
+ self.assertIn('12345', args)
+ self.assertIn('123456', args)
+ # Verify subnet delete calls
+ calls = subnet_mock.mock_calls
+ self.assertEqual(len(calls), 3)
+ args = map(lambda x: x[1][0], calls)
+ self.assertIn('1234', args)
+ self.assertIn('12345', args)
+ self.assertIn('123456', args)
+ # Verify router delete calls
+ calls = router_mock.mock_calls
+ self.assertEqual(len(calls), 3)
+ args = map(lambda x: x[1][0], calls)
+ self.assertIn('1234', args)
+ self.assertIn('12345', args)
+ self.assertIn('123456', args)
+
+ @patch('tempest.common.rest_client.RestClient')
+ def test_network_alt_creation(self, MockRestClient):
+ iso_creds = isolated_creds.IsolatedCreds('test class',
+ password='fake_password')
+ self._mock_user_create('1234', 'fake_alt_user')
+ self._mock_tenant_create('1234', 'fake_alt_tenant')
+ self._mock_network_create(iso_creds, '1234', 'fake_alt_net')
+ self._mock_subnet_create(iso_creds, '1234', 'fake_alt_subnet')
+ self._mock_router_create('1234', 'fake_alt_router')
+ router_interface_mock = self.patch(
+ 'tempest.services.network.json.network_client.NetworkClientJSON.'
+ 'add_router_interface_with_subnet_id')
+ iso_creds.get_alt_creds()
+ router_interface_mock.called_once_with('1234', '1234')
+ network = iso_creds.get_alt_network()
+ subnet = iso_creds.get_alt_subnet()
+ router = iso_creds.get_alt_router()
+ self.assertEqual(network['id'], '1234')
+ self.assertEqual(network['name'], 'fake_alt_net')
+ self.assertEqual(subnet['id'], '1234')
+ self.assertEqual(subnet['name'], 'fake_alt_subnet')
+ self.assertEqual(router['id'], '1234')
+ self.assertEqual(router['name'], 'fake_alt_router')
+
+ @patch('tempest.common.rest_client.RestClient')
+ def test_network_admin_creation(self, MockRestClient):
+ iso_creds = isolated_creds.IsolatedCreds('test class',
+ password='fake_password')
+ self._mock_user_create('1234', 'fake_admin_user')
+ self._mock_tenant_create('1234', 'fake_admin_tenant')
+ self._mock_network_create(iso_creds, '1234', 'fake_admin_net')
+ self._mock_subnet_create(iso_creds, '1234', 'fake_admin_subnet')
+ self._mock_router_create('1234', 'fake_admin_router')
+ router_interface_mock = self.patch(
+ 'tempest.services.network.json.network_client.NetworkClientJSON.'
+ 'add_router_interface_with_subnet_id')
+ self.useFixture(mockpatch.PatchObject(
+ json_iden_client.IdentityClientJSON,
+ 'list_roles',
+ return_value=({'status': 200},
+ [{'id': '123456', 'name': 'admin'}])))
+ with patch.object(json_iden_client.IdentityClientJSON,
+ 'assign_user_role'):
+ iso_creds.get_admin_creds()
+ router_interface_mock.called_once_with('1234', '1234')
+ network = iso_creds.get_admin_network()
+ subnet = iso_creds.get_admin_subnet()
+ router = iso_creds.get_admin_router()
+ self.assertEqual(network['id'], '1234')
+ self.assertEqual(network['name'], 'fake_admin_net')
+ self.assertEqual(subnet['id'], '1234')
+ self.assertEqual(subnet['name'], 'fake_admin_subnet')
+ self.assertEqual(router['id'], '1234')
+ self.assertEqual(router['name'], 'fake_admin_router')
+
+ @patch('tempest.common.rest_client.RestClient')
+ def test_no_network_resources(self, MockRestClient):
+ net_dict = {
+ 'network': False,
+ 'router': False,
+ 'subnet': False,
+ 'dhcp': False,
+ }
+ iso_creds = isolated_creds.IsolatedCreds('test class',
+ password='fake_password',
+ network_resources=net_dict)
+ self._mock_user_create('1234', 'fake_prim_user')
+ self._mock_tenant_create('1234', 'fake_prim_tenant')
+ net = patch.object(iso_creds.network_admin_client,
+ 'delete_network')
+ net_mock = net.start()
+ subnet = patch.object(iso_creds.network_admin_client,
+ 'delete_subnet')
+ subnet_mock = subnet.start()
+ router = patch.object(iso_creds.network_admin_client,
+ 'delete_router')
+ router_mock = router.start()
+
+ iso_creds.get_primary_creds()
+ self.assertEqual(net_mock.mock_calls, [])
+ self.assertEqual(subnet_mock.mock_calls, [])
+ self.assertEqual(router_mock.mock_calls, [])
+ network = iso_creds.get_primary_network()
+ subnet = iso_creds.get_primary_subnet()
+ router = iso_creds.get_primary_router()
+ self.assertIsNone(network)
+ self.assertIsNone(subnet)
+ self.assertIsNone(router)
+
+ @patch('tempest.common.rest_client.RestClient')
+ def test_router_without_network(self, MockRestClient):
+ net_dict = {
+ 'network': False,
+ 'router': True,
+ 'subnet': False,
+ 'dhcp': False,
+ }
+ iso_creds = isolated_creds.IsolatedCreds('test class',
+ password='fake_password',
+ network_resources=net_dict)
+ self._mock_user_create('1234', 'fake_prim_user')
+ self._mock_tenant_create('1234', 'fake_prim_tenant')
+ self.assertRaises(exceptions.InvalidConfiguration,
+ iso_creds.get_primary_creds)
+
+ @patch('tempest.common.rest_client.RestClient')
+ def test_subnet_without_network(self, MockRestClient):
+ net_dict = {
+ 'network': False,
+ 'router': False,
+ 'subnet': True,
+ 'dhcp': False,
+ }
+ iso_creds = isolated_creds.IsolatedCreds('test class',
+ password='fake_password',
+ network_resources=net_dict)
+ self._mock_user_create('1234', 'fake_prim_user')
+ self._mock_tenant_create('1234', 'fake_prim_tenant')
+ self.assertRaises(exceptions.InvalidConfiguration,
+ iso_creds.get_primary_creds)
+
+ @patch('tempest.common.rest_client.RestClient')
+ def test_dhcp_without_subnet(self, MockRestClient):
+ net_dict = {
+ 'network': False,
+ 'router': False,
+ 'subnet': False,
+ 'dhcp': True,
+ }
+ iso_creds = isolated_creds.IsolatedCreds('test class',
+ password='fake_password',
+ network_resources=net_dict)
+ self._mock_user_create('1234', 'fake_prim_user')
+ self._mock_tenant_create('1234', 'fake_prim_tenant')
+ self.assertRaises(exceptions.InvalidConfiguration,
+ iso_creds.get_primary_creds)
diff --git a/tempest/tests/test_waiters.py b/tempest/tests/test_waiters.py
new file mode 100644
index 0000000..1f9825e
--- /dev/null
+++ b/tempest/tests/test_waiters.py
@@ -0,0 +1,49 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+
+import mock
+
+from tempest.common import waiters
+from tempest import exceptions
+from tempest.tests import base
+
+
+class TestImageWaiters(base.TestCase):
+ def setUp(self):
+ super(TestImageWaiters, self).setUp()
+ self.client = mock.MagicMock()
+ self.client.build_timeout = 1
+ self.client.build_interval = 1
+
+ def test_wait_for_image_status(self):
+ self.client.get_image.return_value = (None, {'status': 'active'})
+ start_time = int(time.time())
+ waiters.wait_for_image_status(self.client, 'fake_image_id', 'active')
+ end_time = int(time.time())
+ # Ensure waiter returns before build_timeout
+ self.assertTrue((end_time - start_time) < 10)
+
+ def test_wait_for_image_status_timeout(self):
+ self.client.get_image.return_value = (None, {'status': 'saving'})
+ self.assertRaises(exceptions.TimeoutException,
+ waiters.wait_for_image_status,
+ self.client, 'fake_image_id', 'active')
+
+ def test_wait_for_image_status_error_on_image_create(self):
+ self.client.get_image.return_value = (None, {'status': 'ERROR'})
+ self.assertRaises(exceptions.AddImageException,
+ waiters.wait_for_image_status,
+ self.client, 'fake_image_id', 'active')
diff --git a/tempest/tests/test_wrappers.py b/tempest/tests/test_wrappers.py
index f6ed445..bba4012 100644
--- a/tempest/tests/test_wrappers.py
+++ b/tempest/tests/test_wrappers.py
@@ -14,6 +14,7 @@
import os
import shutil
+import StringIO
import subprocess
import tempfile
@@ -33,6 +34,7 @@
# Setup Test files
self.testr_conf_file = os.path.join(self.directory, '.testr.conf')
self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg')
+ self.subunit_trace = os.path.join(self.directory, 'subunit-trace.py')
self.passing_file = os.path.join(self.test_dir, 'test_passing.py')
self.failing_file = os.path.join(self.test_dir, 'test_failing.py')
self.init_file = os.path.join(self.test_dir, '__init__.py')
@@ -43,55 +45,48 @@
shutil.copy('setup.py', self.setup_py)
shutil.copy('tempest/tests/files/setup.cfg', self.setup_cfg_file)
shutil.copy('tempest/tests/files/__init__.py', self.init_file)
+ shutil.copy('tools/subunit-trace.py', self.subunit_trace)
+ # copy over the pretty_tox scripts
+ shutil.copy('tools/pretty_tox.sh',
+ os.path.join(self.directory, 'pretty_tox.sh'))
+ shutil.copy('tools/pretty_tox_serial.sh',
+ os.path.join(self.directory, 'pretty_tox_serial.sh'))
+
+ self.stdout = StringIO.StringIO()
+ self.stderr = StringIO.StringIO()
+ # Change directory, run wrapper and check result
+ self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+ os.chdir(self.directory)
+
+ def assertRunExit(self, cmd, expected):
+ p = subprocess.Popen(
+ "bash %s" % cmd, shell=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ # wait in the general case is dangerous, however the amount of
+ # data coming back on those pipes is small enough it shouldn't be
+ # a problem.
+ p.wait()
+
+ self.assertEqual(
+ p.returncode, expected,
+ "Stdout: %s; Stderr: %s" % (p.stdout, p.stderr))
def test_pretty_tox(self):
- # Copy wrapper script and requirements:
- pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
- shutil.copy('tools/pretty_tox.sh', pretty_tox)
- # Change directory, run wrapper and check result
- self.addCleanup(os.chdir, os.path.abspath(os.curdir))
- os.chdir(self.directory)
# Git init is required for the pbr testr command. pbr requires a git
# version or an sdist to work. so make the test directory a git repo
# too.
- subprocess.call(['git', 'init'])
- exit_code = subprocess.call('bash pretty_tox.sh tests.passing',
- shell=True, stdout=DEVNULL, stderr=DEVNULL)
- self.assertEqual(exit_code, 0)
+ subprocess.call(['git', 'init'], stderr=DEVNULL)
+ self.assertRunExit('pretty_tox.sh tests.passing', 0)
def test_pretty_tox_fails(self):
- # Copy wrapper script and requirements:
- pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
- shutil.copy('tools/pretty_tox.sh', pretty_tox)
- # Change directory, run wrapper and check result
- self.addCleanup(os.chdir, os.path.abspath(os.curdir))
- os.chdir(self.directory)
# Git init is required for the pbr testr command. pbr requires a git
# version or an sdist to work. so make the test directory a git repo
# too.
- subprocess.call(['git', 'init'])
- exit_code = subprocess.call('bash pretty_tox.sh', shell=True,
- stdout=DEVNULL, stderr=DEVNULL)
- self.assertEqual(exit_code, 1)
+ subprocess.call(['git', 'init'], stderr=DEVNULL)
+ self.assertRunExit('pretty_tox.sh', 1)
def test_pretty_tox_serial(self):
- # Copy wrapper script and requirements:
- pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
- shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
- # Change directory, run wrapper and check result
- self.addCleanup(os.chdir, os.path.abspath(os.curdir))
- os.chdir(self.directory)
- exit_code = subprocess.call('bash pretty_tox_serial.sh tests.passing',
- shell=True, stdout=DEVNULL, stderr=DEVNULL)
- self.assertEqual(exit_code, 0)
+ self.assertRunExit('pretty_tox_serial.sh tests.passing', 0)
def test_pretty_tox_serial_fails(self):
- # Copy wrapper script and requirements:
- pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
- shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
- # Change directory, run wrapper and check result
- self.addCleanup(os.chdir, os.path.abspath(os.curdir))
- os.chdir(self.directory)
- exit_code = subprocess.call('bash pretty_tox_serial.sh', shell=True,
- stdout=DEVNULL, stderr=DEVNULL)
- self.assertEqual(exit_code, 1)
+ self.assertRunExit('pretty_tox_serial.sh', 1)
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
index 10d421e..4c39f78 100644
--- a/tempest/thirdparty/boto/test.py
+++ b/tempest/thirdparty/boto/test.py
@@ -108,6 +108,9 @@
CODE_RE = '.*' # regexp makes sense in group match
def match(self, exc):
+ """:returns: Retruns with an error string if not matches,
+ returns with None when matches.
+ """
if not isinstance(exc, exception.BotoServerError):
return "%r not an BotoServerError instance" % exc
LOG.info("Status: %s , error_code: %s", exc.status, exc.error_code)
@@ -119,6 +122,7 @@
return ("Error code (%s) does not match" +
"the expected re pattern \"%s\"") %\
(exc.error_code, self.CODE_RE)
+ return None
class ClientError(BotoExceptionMatcher):
@@ -313,7 +317,7 @@
except ValueError:
return "_GONE"
except exception.EC2ResponseError as exc:
- if colusure_matcher.match(exc):
+ if colusure_matcher.match(exc) is None:
return "_GONE"
else:
raise
@@ -449,7 +453,7 @@
return "_GONE"
except exception.EC2ResponseError as exc:
if cls.ec2_error_code.\
- client.InvalidInstanceID.NotFound.match(exc):
+ client.InvalidInstanceID.NotFound.match(exc) is None:
return "_GONE"
# NOTE(afazekas): incorrect code,
# but the resource must be destoreyd
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index bbfbb79..33b8d6e 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -40,7 +40,7 @@
": requires ami/aki/ari manifest")))
cls.s3_client = cls.os.s3_client
cls.ec2_client = cls.os.ec2api_client
- cls.zone = cls.ec2_client.get_good_zone()
+ cls.zone = CONF.boto.aws_zone
cls.materials_path = CONF.boto.s3_materials_path
ami_manifest = CONF.boto.ami_manifest
aki_manifest = CONF.boto.aki_manifest
@@ -218,11 +218,8 @@
else:
self.assertNotEqual(instance.state, "running")
- # NOTE(afazekas): doctored test case,
- # with normal validation it would fail
- @test.skip_because(bug="1182679")
@test.attr(type='smoke')
- def test_integration_1(self):
+ def test_compute_with_volumes(self):
# EC2 1. integration test (not strict)
image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
sec_group_name = data_utils.rand_name("securitygroup-")
@@ -250,14 +247,20 @@
instance_type=self.instance_type,
key_name=self.keypair_name,
security_groups=(sec_group_name,))
+
+ LOG.debug("Instance booted - state: %s",
+ reservation.instances[0].state)
+
self.addResourceCleanUp(self.destroy_reservation,
reservation)
volume = self.ec2_client.create_volume(1, self.zone)
+ LOG.debug("Volume created - status: %s", volume.status)
+
self.addResourceCleanUp(self.destroy_volume_wait, volume)
instance = reservation.instances[0]
- LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
+ LOG.debug("Instance now running - state: %s", instance.state)
address = self.ec2_client.allocate_address()
rcuk_a = self.addResourceCleanUp(address.delete)
@@ -285,10 +288,21 @@
volume.attach(instance.id, "/dev/vdh")
def _volume_state():
+ """Return volume state realizing that 'in-use' is overloaded."""
volume.update(validate=True)
- return volume.status
+ status = volume.status
+ attached = volume.attach_data.status
+ LOG.debug("Volume %s is in status: %s, attach_status: %s",
+ volume.id, status, attached)
+ # Nova reports 'in-use' on 'attaching' volumes because we
+ # have a single volume status, and EC2 has 2. Ensure that
+ # if we aren't attached yet we return something other than
+ # 'in-use'
+ if status == 'in-use' and attached != 'attached':
+ return 'attaching'
+ else:
+ return status
- self.assertVolumeStatusWait(_volume_state, "in-use")
wait.re_search_wait(_volume_state, "in-use")
# NOTE(afazekas): Different Hypervisor backends names
@@ -297,6 +311,7 @@
def _part_state():
current = ssh.get_partitions().split('\n')
+ LOG.debug("Partition map for instance: %s", current)
if current > part_lines:
return 'INCREASE'
if current < part_lines:
@@ -312,7 +327,6 @@
self.assertVolumeStatusWait(_volume_state, "available")
wait.re_search_wait(_volume_state, "available")
- LOG.info("Volume %s state: %s", volume.id, volume.status)
wait.state_wait(_part_state, 'DECREASE')
@@ -324,7 +338,7 @@
self.assertAddressReleasedWait(address)
self.cancelResourceCleanUp(rcuk_a)
- LOG.info("state: %s", instance.state)
+ LOG.debug("Instance %s state: %s", instance.id, instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
# TODO(afazekas): move steps from teardown to the test case
diff --git a/tempest/thirdparty/boto/test_ec2_volumes.py b/tempest/thirdparty/boto/test_ec2_volumes.py
index 6a771e5..12dea18 100644
--- a/tempest/thirdparty/boto/test_ec2_volumes.py
+++ b/tempest/thirdparty/boto/test_ec2_volumes.py
@@ -38,7 +38,7 @@
raise cls.skipException(skip_msg)
cls.client = cls.os.ec2api_client
- cls.zone = cls.client.get_good_zone()
+ cls.zone = CONF.boto.aws_zone
@test.attr(type='smoke')
def test_create_get_delete(self):
diff --git a/tools/check_logs.py b/tools/check_logs.py
index edf95a1..bc4eaca 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -30,9 +30,30 @@
dump_all_errors = True
# As logs are made clean, add to this set
-must_be_clean = set(['c-sch', 'g-reg', 'ceilometer-alarm-notifier',
- 'ceilometer-collector', 'horizon', 'n-crt', 'n-obj',
- 'q-vpn'])
+allowed_dirty = set([
+ 'c-api',
+ 'ceilometer-acentral',
+ 'ceilometer-acompute',
+ 'ceilometer-alarm-evaluator',
+ 'ceilometer-anotification',
+ 'ceilometer-api',
+ 'ceilometer-collector',
+ 'c-vol',
+ 'g-api',
+ 'h-api',
+ 'h-eng',
+ 'ir-cond',
+ 'n-api',
+ 'n-cpu',
+ 'n-net',
+ 'q-agt',
+ 'q-dhcp',
+ 'q-lbaas',
+ 'q-meta',
+ 'q-metering',
+ 'q-svc',
+ 'q-vpn',
+ 's-proxy'])
def process_files(file_specs, url_specs, whitelists):
@@ -69,12 +90,12 @@
break
if not whitelisted or dump_all_errors:
if print_log_name:
- print("Log File: %s" % name)
+ print("\nLog File Has Errors: %s" % name)
print_log_name = False
if not whitelisted:
had_errors = True
print("*** Not Whitelisted ***"),
- print(line)
+ print(line.rstrip())
return had_errors
@@ -135,8 +156,8 @@
return 0
failed = False
for log in logs_with_errors:
- if log in must_be_clean:
- print("FAILED: %s" % log)
+ if log not in allowed_dirty:
+ print("Log: %s not allowed to have ERRORS or TRACES" % log)
failed = True
if failed:
return 1
diff --git a/tools/config/check_uptodate.sh b/tools/config/check_uptodate.sh
index 45c8629..528bd5b 100755
--- a/tools/config/check_uptodate.sh
+++ b/tools/config/check_uptodate.sh
@@ -1,10 +1,25 @@
-#!/bin/sh
-TEMPDIR=`mktemp -d`
-CFGFILE=tempest.conf.sample
-tools/config/generate_sample.sh -b ./ -p tempest -o $TEMPDIR
-if ! diff $TEMPDIR/$CFGFILE etc/$CFGFILE
+#!/usr/bin/env bash
+
+PROJECT_NAME=${PROJECT_NAME:-tempest}
+CFGFILE_NAME=${PROJECT_NAME}.conf.sample
+
+if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then
+ CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME}
+elif [ -e etc/${CFGFILE_NAME} ]; then
+ CFGFILE=etc/${CFGFILE_NAME}
+else
+ echo "${0##*/}: can not find config file"
+ exit 1
+fi
+
+TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
+trap "rm -rf $TEMPDIR" EXIT
+
+tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
+
+if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
then
- echo "E: tempest.conf.sample is not up to date, please run:"
- echo "tools/generate_sample.sh"
- exit 42
+ echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
+ echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh."
+ exit 1
fi
diff --git a/tools/config/generate_sample.sh b/tools/config/generate_sample.sh
index 607fecb..20ddfbb 100755
--- a/tools/config/generate_sample.sh
+++ b/tools/config/generate_sample.sh
@@ -4,8 +4,8 @@
echo "Try \`${0##*/} --help' for more information." >&2
}
-PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:o: \
- --long help,base-dir:,package-name:,output-dir: -- "$@")
+PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o: \
+ --long help,base-dir:,package-name:,output-dir:,module:,library: -- "$@")
if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
@@ -21,6 +21,8 @@
echo "-b, --base-dir=DIR project base directory"
echo "-p, --package-name=NAME project package name"
echo "-o, --output-dir=DIR file output directory"
+ echo "-m, --module=MOD extra python module to interrogate for options"
+ echo "-l, --library=LIB extra library that registers options for discovery"
exit 0
;;
-b|--base-dir)
@@ -38,6 +40,16 @@
OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
shift
;;
+ -m|--module)
+ shift
+ MODULES="$MODULES -m $1"
+ shift
+ ;;
+ -l|--library)
+ shift
+ LIBRARIES="$LIBRARIES -l $1"
+ shift
+ ;;
--)
break
;;
@@ -53,7 +65,7 @@
BASEDIR=$(cd "$BASEDIR" && pwd)
fi
-PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}}
+PACKAGENAME=${PACKAGENAME:-$(python setup.py --name)}
TARGETDIR=$BASEDIR/$PACKAGENAME
if ! [ -d $TARGETDIR ]
then
@@ -77,12 +89,20 @@
FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
-EXTRA_MODULES_FILE="`dirname $0`/oslo.config.generator.rc"
-if test -r "$EXTRA_MODULES_FILE"
+RC_FILE="`dirname $0`/oslo.config.generator.rc"
+if test -r "$RC_FILE"
then
- source "$EXTRA_MODULES_FILE"
+ source "$RC_FILE"
fi
+for mod in ${TEMPEST_CONFIG_GENERATOR_EXTRA_MODULES}; do
+ MODULES="$MODULES -m $mod"
+done
+
+for lib in ${TEMPEST_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do
+ LIBRARIES="$LIBRARIES -l $lib"
+done
+
export EVENTLET_NO_GREENDNS=yes
OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
@@ -90,7 +110,7 @@
DEFAULT_MODULEPATH=tempest.openstack.common.config.generator
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
-python -m $MODULEPATH $FILES > $OUTPUTFILE
+python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
# Hook to allow projects to append custom config file snippets
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
index 46822e3..743b59d 100644
--- a/tools/install_venv_common.py
+++ b/tools/install_venv_common.py
@@ -101,7 +101,6 @@
print('done.')
else:
print("venv already exists...")
- pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh
index 07c35a0..f3c88f3 100755
--- a/tools/pretty_tox.sh
+++ b/tools/pretty_tox.sh
@@ -3,4 +3,4 @@
set -o pipefail
TESTRARGS=$1
-python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit2pyunit
+python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | $(dirname $0)/subunit-trace.py
diff --git a/tools/pretty_tox_serial.sh b/tools/pretty_tox_serial.sh
index 42ce760..1634b8e 100755
--- a/tools/pretty_tox_serial.sh
+++ b/tools/pretty_tox_serial.sh
@@ -7,7 +7,7 @@
if [ ! -d .testrepository ]; then
testr init
fi
-testr run --subunit $TESTRARGS | subunit2pyunit
+testr run --subunit $TESTRARGS | $(dirname $0)/subunit-trace.py
retval=$?
testr slowest
exit $retval
diff --git a/tools/subunit-trace.py b/tools/subunit-trace.py
new file mode 100755
index 0000000..7bb88a4
--- /dev/null
+++ b/tools/subunit-trace.py
@@ -0,0 +1,268 @@
+#!/usr/bin/env python
+
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# Copyright 2014 Samsung Electronics
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Trace a subunit stream in reasonable detail and high accuracy."""
+
+import functools
+import re
+import sys
+
+import mimeparse
+import subunit
+import testtools
+
+DAY_SECONDS = 60 * 60 * 24
+FAILS = []
+RESULTS = {}
+
+
+class Starts(testtools.StreamResult):
+
+ def __init__(self, output):
+ super(Starts, self).__init__()
+ self._output = output
+
+ def startTestRun(self):
+ self._neednewline = False
+ self._emitted = set()
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ super(Starts, self).status(
+ test_id, test_status,
+ test_tags=test_tags, runnable=runnable, file_name=file_name,
+ file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+ route_code=route_code, timestamp=timestamp)
+ if not test_id:
+ if not file_bytes:
+ return
+ if not mime_type or mime_type == 'test/plain;charset=utf8':
+ mime_type = 'text/plain; charset=utf-8'
+ primary, sub, parameters = mimeparse.parse_mime_type(mime_type)
+ content_type = testtools.content_type.ContentType(
+ primary, sub, parameters)
+ content = testtools.content.Content(
+ content_type, lambda: [file_bytes])
+ text = content.as_text()
+ if text and text[-1] not in '\r\n':
+ self._neednewline = True
+ self._output.write(text)
+ elif test_status == 'inprogress' and test_id not in self._emitted:
+ if self._neednewline:
+ self._neednewline = False
+ self._output.write('\n')
+ worker = ''
+ for tag in test_tags or ():
+ if tag.startswith('worker-'):
+ worker = '(' + tag[7:] + ') '
+ if timestamp:
+ timestr = timestamp.isoformat()
+ else:
+ timestr = ''
+ self._output.write('%s: %s%s [start]\n' %
+ (timestr, worker, test_id))
+ self._emitted.add(test_id)
+
+
+def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
+ """Clean up the test name for display.
+
+ By default we strip out the tags in the test because they don't help us
+ in identifying the test that is run to it's result.
+
+ Make it possible to strip out the testscenarios information (not to
+ be confused with tempest scenarios) however that's often needed to
+ indentify generated negative tests.
+ """
+ if strip_tags:
+ tags_start = name.find('[')
+ tags_end = name.find(']')
+ if tags_start > 0 and tags_end > tags_start:
+ newname = name[:tags_start]
+ newname += name[tags_end + 1:]
+ name = newname
+
+ if strip_scenarios:
+ tags_start = name.find('(')
+ tags_end = name.find(')')
+ if tags_start > 0 and tags_end > tags_start:
+ newname = name[:tags_start]
+ newname += name[tags_end + 1:]
+ name = newname
+
+ return name
+
+
+def get_duration(timestamps):
+ start, end = timestamps
+ if not start or not end:
+ duration = ''
+ else:
+ delta = end - start
+ duration = '%d.%06ds' % (
+ delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
+ return duration
+
+
+def find_worker(test):
+ for tag in test['tags']:
+ if tag.startswith('worker-'):
+ return int(tag[7:])
+ return 'NaN'
+
+
+# Print out stdout/stderr if it exists, always
+def print_attachments(stream, test, all_channels=False):
+ """Print out subunit attachments.
+
+ Print out subunit attachments that contain content. This
+ runs in 2 modes, one for successes where we print out just stdout
+ and stderr, and an override that dumps all the attachments.
+ """
+ channels = ('stdout', 'stderr')
+ for name, detail in test['details'].items():
+ # NOTE(sdague): the subunit names are a little crazy, and actually
+ # are in the form pythonlogging:'' (with the colon and quotes)
+ name = name.split(':')[0]
+ if detail.content_type.type == 'test':
+ detail.content_type.type = 'text'
+ if (all_channels or name in channels) and detail.as_text():
+ title = "Captured %s:" % name
+ stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
+ # indent attachment lines 4 spaces to make them visually
+ # offset
+ for line in detail.as_text().split('\n'):
+ stream.write(" %s\n" % line)
+
+
+def show_outcome(stream, test):
+ global RESULTS
+ status = test['status']
+ # TODO(sdague): ask lifeless why on this?
+ if status == 'exists':
+ return
+
+ worker = find_worker(test)
+ name = cleanup_test_name(test['id'])
+ duration = get_duration(test['timestamps'])
+
+ if worker not in RESULTS:
+ RESULTS[worker] = []
+ RESULTS[worker].append(test)
+
+ # don't count the end of the return code as a fail
+ if name == 'process-returncode':
+ return
+
+ if status == 'success':
+ stream.write('{%s} %s [%s] ... ok\n' % (
+ worker, name, duration))
+ print_attachments(stream, test)
+ elif status == 'fail':
+ FAILS.append(test)
+ stream.write('{%s} %s [%s] ... FAILED\n' % (
+ worker, name, duration))
+ print_attachments(stream, test, all_channels=True)
+ elif status == 'skip':
+ stream.write('{%s} %s ... SKIPPED: %s\n' % (
+ worker, name, test['details']['reason'].as_text()))
+ else:
+ stream.write('{%s} %s [%s] ... %s\n' % (
+ worker, name, duration, test['status']))
+ print_attachments(stream, test, all_channels=True)
+
+ stream.flush()
+
+
+def print_fails(stream):
+ """Print summary failure report.
+
+ Currently unused, however there remains debate on inline vs. at end
+ reporting, so leave the utility function for later use.
+ """
+ if not FAILS:
+ return
+ stream.write("\n==============================\n")
+ stream.write("Failed %s tests - output below:" % len(FAILS))
+ stream.write("\n==============================\n")
+ for f in FAILS:
+ stream.write("\n%s\n" % f['id'])
+ stream.write("%s\n" % ('-' * len(f['id'])))
+ print_attachments(stream, f, all_channels=True)
+ stream.write('\n')
+
+
+def count_tests(key, value):
+ count = 0
+ for k, v in RESULTS.items():
+ for item in v:
+ if key in item:
+ if re.search(value, item[key]):
+ count += 1
+ return count
+
+
+def worker_stats(worker):
+ tests = RESULTS[worker]
+ num_tests = len(tests)
+ delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
+ return num_tests, delta
+
+
+def print_summary(stream):
+ stream.write("\n======\nTotals\n======\n")
+ stream.write("Run: %s\n" % count_tests('status', '.*'))
+ stream.write(" - Passed: %s\n" % count_tests('status', 'success'))
+ stream.write(" - Skipped: %s\n" % count_tests('status', 'skip'))
+ stream.write(" - Failed: %s\n" % count_tests('status', 'fail'))
+
+ # we could have no results, especially as we filter out the process-codes
+ if RESULTS:
+ stream.write("\n==============\nWorker Balance\n==============\n")
+
+ for w in range(max(RESULTS.keys()) + 1):
+ if w not in RESULTS:
+ stream.write(
+ " - WARNING: missing Worker %s! "
+ "Race in testr accounting.\n" % w)
+ else:
+ num, time = worker_stats(w)
+ stream.write(" - Worker %s (%s tests) => %ss\n" %
+ (w, num, time))
+
+
+def main():
+ stream = subunit.ByteStreamToStreamResult(
+ sys.stdin, non_subunit_name='stdout')
+ starts = Starts(sys.stdout)
+ outcomes = testtools.StreamToDict(
+ functools.partial(show_outcome, sys.stdout))
+ summary = testtools.StreamSummary()
+ result = testtools.CopyStreamResult([starts, outcomes, summary])
+ result.startTestRun()
+ try:
+ stream.run(result)
+ finally:
+ result.stopTestRun()
+ print_summary(sys.stdout)
+ return (0 if summary.wasSuccessful() else 1)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/tempest_auto_config.py b/tools/tempest_auto_config.py
deleted file mode 100644
index 5b8d05b..0000000
--- a/tools/tempest_auto_config.py
+++ /dev/null
@@ -1,395 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# This script aims to configure an initial OpenStack environment with all the
-# necessary configurations for tempest's run using nothing but OpenStack's
-# native API.
-# That includes, creating users, tenants, registering images (cirros),
-# configuring neutron and so on.
-#
-# ASSUMPTION: this script is run by an admin user as it is meant to configure
-# the OpenStack environment prior to actual use.
-
-# Config
-import ConfigParser
-import os
-import tarfile
-import urllib2
-
-# Default client libs
-import glanceclient as glance_client
-import keystoneclient.v2_0.client as keystone_client
-
-# Import OpenStack exceptions
-import glanceclient.exc as glance_exception
-import keystoneclient.exceptions as keystone_exception
-
-
-TEMPEST_TEMP_DIR = os.getenv("TEMPEST_TEMP_DIR", "/tmp").rstrip('/')
-TEMPEST_ROOT_DIR = os.getenv("TEMPEST_ROOT_DIR", os.getenv("HOME")).rstrip('/')
-
-# Environment variables override defaults
-TEMPEST_CONFIG_DIR = os.getenv("TEMPEST_CONFIG_DIR",
- "%s%s" % (TEMPEST_ROOT_DIR, "/etc")).rstrip('/')
-TEMPEST_CONFIG_FILE = os.getenv("TEMPEST_CONFIG_FILE",
- "%s%s" % (TEMPEST_CONFIG_DIR, "/tempest.conf"))
-TEMPEST_CONFIG_SAMPLE = os.getenv("TEMPEST_CONFIG_SAMPLE",
- "%s%s" % (TEMPEST_CONFIG_DIR,
- "/tempest.conf.sample"))
-# Image references
-IMAGE_DOWNLOAD_CHUNK_SIZE = 8 * 1024
-IMAGE_UEC_SOURCE_URL = os.getenv("IMAGE_UEC_SOURCE_URL",
- "http://download.cirros-cloud.net/0.3.1/"
- "cirros-0.3.1-x86_64-uec.tar.gz")
-TEMPEST_IMAGE_ID = os.getenv('IMAGE_ID')
-TEMPEST_IMAGE_ID_ALT = os.getenv('IMAGE_ID_ALT')
-IMAGE_STATUS_ACTIVE = 'active'
-
-
-class ClientManager(object):
- """
- Manager that provides access to the official python clients for
- calling various OpenStack APIs.
- """
- def __init__(self):
- self.identity_client = None
- self.image_client = None
- self.network_client = None
- self.compute_client = None
- self.volume_client = None
-
- def get_identity_client(self, **kwargs):
- """
- Returns the openstack identity python client
- :param username: a string representing the username
- :param password: a string representing the user's password
- :param tenant_name: a string representing the tenant name of the user
- :param auth_url: a string representing the auth url of the identity
- :param insecure: True if we wish to disable ssl certificate validation,
- False otherwise
- :returns an instance of openstack identity python client
- """
- if not self.identity_client:
- self.identity_client = keystone_client.Client(**kwargs)
-
- return self.identity_client
-
- def get_image_client(self, version="1", *args, **kwargs):
- """
- This method returns OpenStack glance python client
- :param version: a string representing the version of the glance client
- to use.
- :param string endpoint: A user-supplied endpoint URL for the glance
- service.
- :param string token: Token for authentication.
- :param integer timeout: Allows customization of the timeout for client
- http requests. (optional)
- :return: a Client object representing the glance client
- """
- if not self.image_client:
- self.image_client = glance_client.Client(version, *args, **kwargs)
-
- return self.image_client
-
-
-def get_tempest_config(path_to_config):
- """
- Gets the tempest configuration file as a ConfigParser object
- :param path_to_config: path to the config file
- :return: a ConfigParser object representing the tempest configuration file
- """
- # get the sample config file from the sample
- config = ConfigParser.ConfigParser()
- config.readfp(open(path_to_config))
-
- return config
-
-
-def update_config_admin_credentials(config, config_section):
- """
- Updates the tempest config with the admin credentials
- :param config: a ConfigParser object representing the tempest config file
- :param config_section: the section name where the admin credentials are
- """
- # Check if credentials are present, default uses the config credentials
- OS_USERNAME = os.getenv('OS_USERNAME',
- config.get(config_section, "admin_username"))
- OS_PASSWORD = os.getenv('OS_PASSWORD',
- config.get(config_section, "admin_password"))
- OS_TENANT_NAME = os.getenv('OS_TENANT_NAME',
- config.get(config_section, "admin_tenant_name"))
- OS_AUTH_URL = os.getenv('OS_AUTH_URL', config.get(config_section, "uri"))
-
- if not (OS_AUTH_URL and
- OS_USERNAME and
- OS_PASSWORD and
- OS_TENANT_NAME):
- raise Exception("Admin environment variables not found.")
-
- # TODO(tkammer): Add support for uri_v3
- config_identity_params = {'uri': OS_AUTH_URL,
- 'admin_username': OS_USERNAME,
- 'admin_password': OS_PASSWORD,
- 'admin_tenant_name': OS_TENANT_NAME}
-
- update_config_section_with_params(config,
- config_section,
- config_identity_params)
-
-
-def update_config_section_with_params(config, config_section, params):
- """
- Updates a given config object with given params
- :param config: a ConfigParser object representing the tempest config file
- :param config_section: the section we would like to update
- :param params: the parameters we wish to update for that section
- """
- for option, value in params.items():
- config.set(config_section, option, value)
-
-
-def get_identity_client_kwargs(config, config_section):
- """
- Get the required arguments for the identity python client
- :param config: a ConfigParser object representing the tempest config file
- :param config_section: the section name in the configuration where the
- arguments can be found
- :return: a dictionary representing the needed arguments for the identity
- client
- """
- username = config.get(config_section, 'admin_username')
- password = config.get(config_section, 'admin_password')
- tenant_name = config.get(config_section, 'admin_tenant_name')
- auth_url = config.get(config_section, 'uri')
- dscv = config.get(config_section, 'disable_ssl_certificate_validation')
- kwargs = {'username': username,
- 'password': password,
- 'tenant_name': tenant_name,
- 'auth_url': auth_url,
- 'insecure': dscv}
-
- return kwargs
-
-
-def create_user_with_tenant(identity_client, username, password, tenant_name):
- """
- Creates a user using a given identity client
- :param identity_client: openstack identity python client
- :param username: a string representing the username
- :param password: a string representing the user's password
- :param tenant_name: a string representing the tenant name of the user
- """
- # Try to create the necessary tenant
- tenant_id = None
- try:
- tenant_description = "Tenant for Tempest %s user" % username
- tenant = identity_client.tenants.create(tenant_name,
- tenant_description)
- tenant_id = tenant.id
- except keystone_exception.Conflict:
-
- # if already exist, use existing tenant
- tenant_list = identity_client.tenants.list()
- for tenant in tenant_list:
- if tenant.name == tenant_name:
- tenant_id = tenant.id
-
- # Try to create the user
- try:
- email = "%s@test.com" % username
- identity_client.users.create(name=username,
- password=password,
- email=email,
- tenant_id=tenant_id)
- except keystone_exception.Conflict:
-
- # if already exist, use existing user
- pass
-
-
-def create_users_and_tenants(identity_client,
- config,
- config_section):
- """
- Creates the two non admin users and tenants for tempest
- :param identity_client: openstack identity python client
- :param config: a ConfigParser object representing the tempest config file
- :param config_section: the section name of identity in the config
- """
- # Get the necessary params from the config file
- tenant_name = config.get(config_section, 'tenant_name')
- username = config.get(config_section, 'username')
- password = config.get(config_section, 'password')
-
- alt_tenant_name = config.get(config_section, 'alt_tenant_name')
- alt_username = config.get(config_section, 'alt_username')
- alt_password = config.get(config_section, 'alt_password')
-
- # Create the necessary users for the test runs
- create_user_with_tenant(identity_client, username, password, tenant_name)
- create_user_with_tenant(identity_client, alt_username, alt_password,
- alt_tenant_name)
-
-
-def get_image_client_kwargs(identity_client, config, config_section):
- """
- Get the required arguments for the image python client
- :param identity_client: openstack identity python client
- :param config: a ConfigParser object representing the tempest config file
- :param config_section: the section name of identity in the config
- :return: a dictionary representing the needed arguments for the image
- client
- """
-
- token = identity_client.auth_token
- endpoint = identity_client.\
- service_catalog.url_for(service_type='image', endpoint_type='publicURL'
- )
- dscv = config.get(config_section, 'disable_ssl_certificate_validation')
- kwargs = {'endpoint': endpoint,
- 'token': token,
- 'insecure': dscv}
-
- return kwargs
-
-
-def images_exist(image_client):
- """
- Checks whether the images ID's located in the environment variable are
- indeed registered
- :param image_client: the openstack python client representing the image
- client
- """
- exist = True
- if not TEMPEST_IMAGE_ID or not TEMPEST_IMAGE_ID_ALT:
- exist = False
- else:
- try:
- image_client.images.get(TEMPEST_IMAGE_ID)
- image_client.images.get(TEMPEST_IMAGE_ID_ALT)
- except glance_exception.HTTPNotFound:
- exist = False
-
- return exist
-
-
-def download_and_register_uec_images(image_client, download_url,
- download_folder):
- """
- Downloads and registered the UEC AKI/AMI/ARI images
- :param image_client:
- :param download_url: the url of the uec tar file
- :param download_folder: the destination folder we wish to save the file to
- """
- basename = os.path.basename(download_url)
- path = os.path.join(download_folder, basename)
-
- request = urllib2.urlopen(download_url)
-
- # First, download the file
- with open(path, "wb") as fp:
- while True:
- chunk = request.read(IMAGE_DOWNLOAD_CHUNK_SIZE)
- if not chunk:
- break
-
- fp.write(chunk)
-
- # Then extract and register images
- tar = tarfile.open(path, "r")
- for name in tar.getnames():
- file_obj = tar.extractfile(name)
- format = "aki"
-
- if file_obj.name.endswith(".img"):
- format = "ami"
-
- if file_obj.name.endswith("initrd"):
- format = "ari"
-
- # Register images in image client
- image_client.images.create(name=file_obj.name, disk_format=format,
- container_format=format, data=file_obj,
- is_public="true")
-
- tar.close()
-
-
-def create_images(image_client, config, config_section,
- download_url=IMAGE_UEC_SOURCE_URL,
- download_folder=TEMPEST_TEMP_DIR):
- """
- Creates images for tempest's use and registers the environment variables
- IMAGE_ID and IMAGE_ID_ALT with registered images
- :param image_client: OpenStack python image client
- :param config: a ConfigParser object representing the tempest config file
- :param config_section: the section name where the IMAGE ids are set
- :param download_url: the URL from which we should download the UEC tar
- :param download_folder: the place where we want to save the download file
- """
- if not images_exist(image_client):
- # Falls down to the default uec images
- download_and_register_uec_images(image_client, download_url,
- download_folder)
- image_ids = []
- for image in image_client.images.list():
- image_ids.append(image.id)
-
- os.environ["IMAGE_ID"] = image_ids[0]
- os.environ["IMAGE_ID_ALT"] = image_ids[1]
-
- params = {'image_ref': os.getenv("IMAGE_ID"),
- 'image_ref_alt': os.getenv("IMAGE_ID_ALT")}
-
- update_config_section_with_params(config, config_section, params)
-
-
-def main():
- """
- Main module to control the script
- """
- # Check if config file exists or fall to the default sample otherwise
- path_to_config = TEMPEST_CONFIG_SAMPLE
-
- if os.path.isfile(TEMPEST_CONFIG_FILE):
- path_to_config = TEMPEST_CONFIG_FILE
-
- config = get_tempest_config(path_to_config)
- update_config_admin_credentials(config, 'identity')
-
- client_manager = ClientManager()
-
- # Set the identity related info for tempest
- identity_client_kwargs = get_identity_client_kwargs(config,
- 'identity')
- identity_client = client_manager.get_identity_client(
- **identity_client_kwargs)
-
- # Create the necessary users and tenants for tempest run
- create_users_and_tenants(identity_client, config, 'identity')
-
- # Set the image related info for tempest
- image_client_kwargs = get_image_client_kwargs(identity_client,
- config,
- 'identity')
- image_client = client_manager.get_image_client(**image_client_kwargs)
-
- # Create the necessary users and tenants for tempest run
- create_images(image_client, config, 'compute')
-
- # TODO(tkammer): add network implementation
-
-if __name__ == "__main__":
- main()
diff --git a/tools/verify_tempest_config.py b/tools/verify_tempest_config.py
deleted file mode 100755
index 4be812c..0000000
--- a/tools/verify_tempest_config.py
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-import sys
-
-import httplib2
-
-from tempest import clients
-from tempest import config
-
-
-CONF = config.CONF
-RAW_HTTP = httplib2.Http()
-
-
-def verify_glance_api_versions(os):
- # Check glance api versions
- __, versions = os.image_client.get_versions()
- if CONF.image_feature_enabled.api_v1 != ('v1.1' in versions or 'v1.0' in
- versions):
- print('Config option image api_v1 should be change to: %s' % (
- not CONF.image_feature_enabled.api_v1))
- if CONF.image_feature_enabled.api_v2 != ('v2.0' in versions):
- print('Config option image api_v2 should be change to: %s' % (
- not CONF.image_feature_enabled.api_v2))
-
-
-def verify_nova_api_versions(os):
- # Check nova api versions - only get base URL without PATH
- os.servers_client.skip_path = True
- # The nova base endpoint url includes the version but to get the versions
- # list the unversioned endpoint is needed
- v2_endpoint = os.servers_client.base_url
- v2_endpoint_parts = v2_endpoint.split('/')
- endpoint = v2_endpoint_parts[0] + '//' + v2_endpoint_parts[2]
- __, body = RAW_HTTP.request(endpoint, 'GET')
- body = json.loads(body)
- # Restore full base_url
- os.servers_client.skip_path = False
- versions = map(lambda x: x['id'], body['versions'])
- if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
- print('Config option compute api_v3 should be change to: %s' % (
- not CONF.compute_feature_enabled.api_v3))
-
-
-def get_extension_client(os, service):
- extensions_client = {
- 'nova': os.extensions_client,
- 'nova_v3': os.extensions_v3_client,
- 'cinder': os.volumes_extension_client,
- 'neutron': os.network_client,
- }
- if service not in extensions_client:
- print('No tempest extensions client for %s' % service)
- exit(1)
- return extensions_client[service]
-
-
-def get_enabled_extensions(service):
- extensions_options = {
- 'nova': CONF.compute_feature_enabled.api_extensions,
- 'nova_v3': CONF.compute_feature_enabled.api_v3_extensions,
- 'cinder': CONF.volume_feature_enabled.api_extensions,
- 'neutron': CONF.network_feature_enabled.api_extensions,
- }
- if service not in extensions_options:
- print('No supported extensions list option for %s' % service)
- exit(1)
- return extensions_options[service]
-
-
-def verify_extensions(os, service, results):
- extensions_client = get_extension_client(os, service)
- __, resp = extensions_client.list_extensions()
- if isinstance(resp, dict):
- # Neutron's extension 'name' field has is not a single word (it has
- # spaces in the string) Since that can't be used for list option the
- # api_extension option in the network-feature-enabled group uses alias
- # instead of name.
- if service == 'neutron':
- extensions = map(lambda x: x['alias'], resp['extensions'])
- else:
- extensions = map(lambda x: x['name'], resp['extensions'])
-
- else:
- extensions = map(lambda x: x['name'], resp)
- if not results.get(service):
- results[service] = {}
- extensions_opt = get_enabled_extensions(service)
- if extensions_opt[0] == 'all':
- results[service]['extensions'] = 'all'
- return results
- # Verify that all configured extensions are actually enabled
- for extension in extensions_opt:
- results[service][extension] = extension in extensions
- # Verify that there aren't additional extensions enabled that aren't
- # specified in the config list
- for extension in extensions:
- if extension not in extensions_opt:
- results[service][extension] = False
- return results
-
-
-def display_results(results):
- for service in results:
- # If all extensions are specified as being enabled there is no way to
- # verify this so we just assume this to be true
- if results[service].get('extensions'):
- continue
- extension_list = get_enabled_extensions(service)
- for extension in results[service]:
- if not results[service][extension]:
- if extension in extension_list:
- print("%s extension: %s should not be included in the list"
- " of enabled extensions" % (service, extension))
- else:
- print("%s extension: %s should be included in the list of "
- "enabled extensions" % (service, extension))
-
-
-def check_service_availability(service):
- if service == 'nova_v3':
- service = 'nova'
- return getattr(CONF.service_available, service)
-
-
-def main(argv):
- print('Running config verification...')
- os = clients.ComputeAdminManager(interface='json')
- results = {}
- for service in ['nova', 'nova_v3', 'cinder', 'neutron']:
- # TODO(mtreinish) make this a keystone endpoint check for available
- # services
- if not check_service_availability(service):
- print("%s is not available" % service)
- continue
- results = verify_extensions(os, service, results)
- verify_glance_api_versions(os)
- verify_nova_api_versions(os)
- display_results(results)
-
-
-if __name__ == "__main__":
- main(sys.argv)
diff --git a/tox.ini b/tox.ini
index 4a625f8..5e8d283 100644
--- a/tox.ini
+++ b/tox.ini
@@ -54,7 +54,7 @@
setenv = OS_TEST_TIMEOUT=1200
# The regex below is used to select heat api/scenario tests tagged as slow.
commands =
- bash tools/pretty_tox_serial.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
+ bash tools/pretty_tox.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
[testenv:large-ops]
sitepackages = True